code
stringlengths 501
4.91M
| package
stringlengths 2
88
| path
stringlengths 11
291
| filename
stringlengths 4
197
| parsed_code
stringlengths 0
4.91M
| quality_prob
float64 0
0.99
| learning_prob
float64 0.02
1
|
---|---|---|---|---|---|---|
import datetime
try:
from lxml import etree
except ImportError:
import xml.etree.ElementTree as etree
XCAL_URN = 'urn:ietf:params:xml:ns:xcal'
PENTABARF_URN = 'http://pentabarf.org'
PENTABARF_NS = '{' + PENTABARF_URN + '}'
NSMAP = {
None: XCAL_URN,
'pentabarf': PENTABARF_URN
}
DATE_FORMAT = '%Y%m%dT%H%M%S'
class XCalExporter:
extension = 'xcal'
def write(self, fileobj, conf):
root = etree.Element('iCalendar', nsmap=NSMAP)
vc = etree.SubElement(root, 'vcalendar')
etree.SubElement(vc, 'version').text = '2.0'
etree.SubElement(vc, 'prodid').text = '-//Pentabarf//Schedule//EN'
etree.SubElement(vc, 'x-wr-caldesc').text = conf.title
etree.SubElement(vc, 'x-wr-calname').text = conf.title
domain = conf.get_domain()
for event in sorted(conf.events):
if not event.active or not event.room:
continue
xevent = etree.SubElement(vc, 'vevent')
etree.SubElement(xevent, 'method').text = 'PUBLISH'
etree.SubElement(xevent, 'uid').text = '{}@{}@{}'.format(
conf.slug, event.guid, domain)
etree.SubElement(xevent, PENTABARF_NS + 'event-id').text = str(event.guid)
etree.SubElement(xevent, PENTABARF_NS + 'event-slug').text = event.slug
etree.SubElement(xevent, PENTABARF_NS + 'title').text = event.title
etree.SubElement(xevent, PENTABARF_NS + 'subtitle').text = event.subtitle
etree.SubElement(xevent, PENTABARF_NS + 'language').text = event.language
etree.SubElement(xevent, PENTABARF_NS + 'language-code').text = event.language
duration = datetime.timedelta(minutes=event.duration)
etree.SubElement(xevent, 'dtstart').text = event.start.strftime(DATE_FORMAT)
etree.SubElement(xevent, 'dtend').text = (event.start + duration).strftime(DATE_FORMAT)
etree.SubElement(xevent, 'duration').text = str(event.duration / 60.0)
etree.SubElement(xevent, 'summary').text = event.title
etree.SubElement(xevent, 'description').text = event.abstract or event.description or ''
etree.SubElement(xevent, 'class').text = 'PUBLIC'
etree.SubElement(xevent, 'status').text = 'CONFIRMED'
etree.SubElement(xevent, 'category').text = 'Talk'
etree.SubElement(xevent, 'url').text = event.url or ''
etree.SubElement(xevent, 'location').text = event.room.name
for sp in event.speakers:
etree.SubElement(xevent, 'attendee').text = sp.name
try:
result = etree.tostring(root, encoding='unicode', pretty_print=True)
except TypeError:
# built-in ElementTree doesn't do pretty_print
result = etree.tostring(root, encoding='unicode')
fileobj.write("<?xml version='1.0' encoding='utf-8'?>\n")
fileobj.write(result)
|
schedule-convert
|
/schedule_convert-1.0.0-py3-none-any.whl/schedule_convert/exporters/xcal.py
|
xcal.py
|
import datetime
try:
from lxml import etree
except ImportError:
import xml.etree.ElementTree as etree
XCAL_URN = 'urn:ietf:params:xml:ns:xcal'
PENTABARF_URN = 'http://pentabarf.org'
PENTABARF_NS = '{' + PENTABARF_URN + '}'
NSMAP = {
None: XCAL_URN,
'pentabarf': PENTABARF_URN
}
DATE_FORMAT = '%Y%m%dT%H%M%S'
class XCalExporter:
extension = 'xcal'
def write(self, fileobj, conf):
root = etree.Element('iCalendar', nsmap=NSMAP)
vc = etree.SubElement(root, 'vcalendar')
etree.SubElement(vc, 'version').text = '2.0'
etree.SubElement(vc, 'prodid').text = '-//Pentabarf//Schedule//EN'
etree.SubElement(vc, 'x-wr-caldesc').text = conf.title
etree.SubElement(vc, 'x-wr-calname').text = conf.title
domain = conf.get_domain()
for event in sorted(conf.events):
if not event.active or not event.room:
continue
xevent = etree.SubElement(vc, 'vevent')
etree.SubElement(xevent, 'method').text = 'PUBLISH'
etree.SubElement(xevent, 'uid').text = '{}@{}@{}'.format(
conf.slug, event.guid, domain)
etree.SubElement(xevent, PENTABARF_NS + 'event-id').text = str(event.guid)
etree.SubElement(xevent, PENTABARF_NS + 'event-slug').text = event.slug
etree.SubElement(xevent, PENTABARF_NS + 'title').text = event.title
etree.SubElement(xevent, PENTABARF_NS + 'subtitle').text = event.subtitle
etree.SubElement(xevent, PENTABARF_NS + 'language').text = event.language
etree.SubElement(xevent, PENTABARF_NS + 'language-code').text = event.language
duration = datetime.timedelta(minutes=event.duration)
etree.SubElement(xevent, 'dtstart').text = event.start.strftime(DATE_FORMAT)
etree.SubElement(xevent, 'dtend').text = (event.start + duration).strftime(DATE_FORMAT)
etree.SubElement(xevent, 'duration').text = str(event.duration / 60.0)
etree.SubElement(xevent, 'summary').text = event.title
etree.SubElement(xevent, 'description').text = event.abstract or event.description or ''
etree.SubElement(xevent, 'class').text = 'PUBLIC'
etree.SubElement(xevent, 'status').text = 'CONFIRMED'
etree.SubElement(xevent, 'category').text = 'Talk'
etree.SubElement(xevent, 'url').text = event.url or ''
etree.SubElement(xevent, 'location').text = event.room.name
for sp in event.speakers:
etree.SubElement(xevent, 'attendee').text = sp.name
try:
result = etree.tostring(root, encoding='unicode', pretty_print=True)
except TypeError:
# built-in ElementTree doesn't do pretty_print
result = etree.tostring(root, encoding='unicode')
fileobj.write("<?xml version='1.0' encoding='utf-8'?>\n")
fileobj.write(result)
| 0.234056 | 0.097005 |
from datetime import timedelta
try:
from lxml import etree
except ImportError:
import xml.etree.ElementTree as etree
DATE_FORMAT = '%Y-%m-%d'
DATE_ISO_FORMAT = '%Y-%m-%dT%H:%M:%S'
def to_minutes(duration):
hours = duration // 60
minutes = duration % 60
return '{:02d}:{:02d}'.format(hours, minutes)
def to_timestamp(date):
if date.tzinfo is None:
return date.strftime(DATE_ISO_FORMAT)
return date.strftime(DATE_ISO_FORMAT) + date.tzinfo.tzname(date)
class FrabXmlExporter:
extension = 'xml'
def write(self, fileobj, conf):
root = etree.Element('schedule')
etree.SubElement(root, 'version').text = '0.2'
xconf = etree.SubElement(root, 'conference')
etree.SubElement(xconf, 'acronym').text = conf.slug
etree.SubElement(xconf, 'title').text = conf.title
etree.SubElement(xconf, 'start').text = min(conf.days).strftime(DATE_FORMAT)
etree.SubElement(xconf, 'end').text = max(conf.days).strftime(DATE_FORMAT)
etree.SubElement(xconf, 'days').text = str(len(conf.days))
etree.SubElement(xconf, 'timeslot_duration').text = to_minutes(conf.timeslot)
etree.SubElement(xconf, 'base_url').text = conf.url or ''
for i, day in enumerate(sorted(conf.days)):
all_talks = list(conf.filter_events(day=day))
if not all_talks:
continue
xday = etree.SubElement(root, 'day')
xday.set('index', str(i+1))
xday.set('date', day.strftime(DATE_FORMAT))
xday.set('start', to_timestamp(all_talks[0].start))
xday.set('end', to_timestamp(all_talks[-1].start + timedelta(
minutes=all_talks[-1].duration)))
for room in sorted(conf.rooms):
xroom = None
for talk in conf.filter_events(day=day, room=room):
if xroom is None:
xroom = etree.SubElement(xday, 'room')
xroom.set('name', room.name)
xtalk = etree.SubElement(xroom, 'event')
xtalk.set('guid', str(talk.guid))
xtalk.set('id', str(talk.id))
etree.SubElement(xtalk, 'date').text = to_timestamp(talk.start)
etree.SubElement(xtalk, 'start').text = talk.start.strftime('%H:%M')
etree.SubElement(xtalk, 'duration').text = to_minutes(talk.duration)
etree.SubElement(xtalk, 'room').text = room.name
etree.SubElement(xtalk, 'slug').text = talk.slug
etree.SubElement(xtalk, 'url').text = talk.url or ''
rec = etree.SubElement(xtalk, 'recording')
etree.SubElement(rec, 'license').text = ''
etree.SubElement(rec, 'optout').text = 'false' if talk.can_record else 'true'
etree.SubElement(xtalk, 'title').text = talk.title
etree.SubElement(xtalk, 'subtitle').text = talk.subtitle or ''
etree.SubElement(xtalk, 'track').text = talk.track or ''
etree.SubElement(xtalk, 'type').text = 'Talk'
etree.SubElement(xtalk, 'language').text = talk.language
etree.SubElement(xtalk, 'abstract').text = talk.abstract or ''
etree.SubElement(xtalk, 'description').text = talk.description or ''
etree.SubElement(xtalk, 'logo').text = talk.logo or ''
persons = etree.SubElement(xtalk, 'persons')
for speaker in talk.speakers:
xsp = etree.SubElement(persons, 'person')
xsp.set('id', str(speaker.id))
xsp.text = speaker.name
etree.SubElement(xtalk, 'links')
etree.SubElement(xtalk, 'attachments')
try:
result = etree.tostring(root, encoding='unicode', pretty_print=True)
except TypeError:
# built-in ElementTree doesn't do pretty_print
result = etree.tostring(root, encoding='unicode')
fileobj.write("<?xml version='1.0' encoding='utf-8'?>\n")
fileobj.write(result)
|
schedule-convert
|
/schedule_convert-1.0.0-py3-none-any.whl/schedule_convert/exporters/frab_xml.py
|
frab_xml.py
|
from datetime import timedelta
try:
from lxml import etree
except ImportError:
import xml.etree.ElementTree as etree
DATE_FORMAT = '%Y-%m-%d'
DATE_ISO_FORMAT = '%Y-%m-%dT%H:%M:%S'
def to_minutes(duration):
hours = duration // 60
minutes = duration % 60
return '{:02d}:{:02d}'.format(hours, minutes)
def to_timestamp(date):
if date.tzinfo is None:
return date.strftime(DATE_ISO_FORMAT)
return date.strftime(DATE_ISO_FORMAT) + date.tzinfo.tzname(date)
class FrabXmlExporter:
extension = 'xml'
def write(self, fileobj, conf):
root = etree.Element('schedule')
etree.SubElement(root, 'version').text = '0.2'
xconf = etree.SubElement(root, 'conference')
etree.SubElement(xconf, 'acronym').text = conf.slug
etree.SubElement(xconf, 'title').text = conf.title
etree.SubElement(xconf, 'start').text = min(conf.days).strftime(DATE_FORMAT)
etree.SubElement(xconf, 'end').text = max(conf.days).strftime(DATE_FORMAT)
etree.SubElement(xconf, 'days').text = str(len(conf.days))
etree.SubElement(xconf, 'timeslot_duration').text = to_minutes(conf.timeslot)
etree.SubElement(xconf, 'base_url').text = conf.url or ''
for i, day in enumerate(sorted(conf.days)):
all_talks = list(conf.filter_events(day=day))
if not all_talks:
continue
xday = etree.SubElement(root, 'day')
xday.set('index', str(i+1))
xday.set('date', day.strftime(DATE_FORMAT))
xday.set('start', to_timestamp(all_talks[0].start))
xday.set('end', to_timestamp(all_talks[-1].start + timedelta(
minutes=all_talks[-1].duration)))
for room in sorted(conf.rooms):
xroom = None
for talk in conf.filter_events(day=day, room=room):
if xroom is None:
xroom = etree.SubElement(xday, 'room')
xroom.set('name', room.name)
xtalk = etree.SubElement(xroom, 'event')
xtalk.set('guid', str(talk.guid))
xtalk.set('id', str(talk.id))
etree.SubElement(xtalk, 'date').text = to_timestamp(talk.start)
etree.SubElement(xtalk, 'start').text = talk.start.strftime('%H:%M')
etree.SubElement(xtalk, 'duration').text = to_minutes(talk.duration)
etree.SubElement(xtalk, 'room').text = room.name
etree.SubElement(xtalk, 'slug').text = talk.slug
etree.SubElement(xtalk, 'url').text = talk.url or ''
rec = etree.SubElement(xtalk, 'recording')
etree.SubElement(rec, 'license').text = ''
etree.SubElement(rec, 'optout').text = 'false' if talk.can_record else 'true'
etree.SubElement(xtalk, 'title').text = talk.title
etree.SubElement(xtalk, 'subtitle').text = talk.subtitle or ''
etree.SubElement(xtalk, 'track').text = talk.track or ''
etree.SubElement(xtalk, 'type').text = 'Talk'
etree.SubElement(xtalk, 'language').text = talk.language
etree.SubElement(xtalk, 'abstract').text = talk.abstract or ''
etree.SubElement(xtalk, 'description').text = talk.description or ''
etree.SubElement(xtalk, 'logo').text = talk.logo or ''
persons = etree.SubElement(xtalk, 'persons')
for speaker in talk.speakers:
xsp = etree.SubElement(persons, 'person')
xsp.set('id', str(speaker.id))
xsp.text = speaker.name
etree.SubElement(xtalk, 'links')
etree.SubElement(xtalk, 'attachments')
try:
result = etree.tostring(root, encoding='unicode', pretty_print=True)
except TypeError:
# built-in ElementTree doesn't do pretty_print
result = etree.tostring(root, encoding='unicode')
fileobj.write("<?xml version='1.0' encoding='utf-8'?>\n")
fileobj.write(result)
| 0.379378 | 0.127435 |
import uuid
import re
import math
from slugify import slugify
UUID_NAMESPACE = uuid.UUID('6ba7b838-9dad-11d1-80b4-00c04fd430c8')
class Conference:
def __init__(self, title):
self.title = title
self.slug = None
self.url = None
self.timeslot = 5
self.days = set()
self.rooms = set()
self.speakers = set()
self.events = []
self.timezone = None
self.default_track = None
self.needs_data = title is None
def __len__(self):
return len(self.events)
def is_empty(self):
return len([e for e in self.events if e.active and e.start and e.room]) == 0
def get_domain(self):
if not self.url:
return 'conference.com'
m = re.search(r'://([^/]+)', self.url)
return m.group(1)
def make_guid(self, event):
if self.slug is None or event.slug is None:
return None
if event.room is not None:
room = event.room.name
else:
room = ''
return uuid.uuid5(UUID_NAMESPACE, self.slug + event.slug +
room + event.start.strftime('%Y-%m-%d'))
def filter_events(self, day=None, room=None):
for event in sorted(self.events):
if not event.start or not event.room or not event.active:
continue
if room and event.room != room:
continue
if day and event.start.date() != day:
continue
yield event
def prepare(self):
if self.slug is None and self.title is not None:
self.slug = slugify(self.title)
guids = set()
timeslot = None
event_ids = set()
next_event_id = 1
for event in self.events:
if event.start.tzinfo is None and self.timezone is not None:
event.start = event.start.replace(tzinfo=self.timezone)
self.days.add(event.start.date())
if event.id is None or str(event.id) in event_ids:
while str(next_event_id) in event_ids:
next_event_id += 1
event.id = next_event_id
event_ids.add(str(event.id))
if event.room:
self.rooms.add(event.room)
self.speakers.update(event.speakers)
if event.slug is None:
event.slug = slugify(event.title)
if event.track is None:
event.track = self.default_track
if event.guid is None:
event.guid = self.make_guid(event)
if event.guid is not None:
if event.guid in guids:
raise Exception('Duplicated guid {}'.format(event.guid))
guids.add(event.guid)
if event.duration:
if not timeslot:
timeslot = event.duration
else:
timeslot = math.gcd(timeslot, event.duration)
# Deduplicate speaker ids
speaker_ids = set()
next_speaker_id = 1
for sp in self.speakers:
if sp.id is None or str(sp.id) in speaker_ids:
while str(next_speaker_id) in speaker_ids:
next_speaker_id += 1
sp.id = next_speaker_id
speaker_ids.add(str(sp.id))
if timeslot is not None:
self.timeslot = timeslot
def merge(self, other):
if self.timeslot is None or other.timeslot < self.timeslot:
self.timeslot = other.timeslot
speaker_ids = set([str(sp.id) for sp in self.speakers])
next_speaker_id = 1
for sp in other.speakers:
if sp.id is None or str(sp.id) in speaker_ids:
while str(next_speaker_id) in speaker_ids:
next_speaker_id += 1
sp.id = next_speaker_id
speaker_ids.add(str(sp.id))
self.speakers.add(sp)
for event in other.events:
self.events.append(event)
self.prepare()
class Room:
def __init__(self, name, sortId=None):
self.name = name
self.sortId = sortId
def __lt__(self, other):
if self.sortId is not None and other.sortId is not None:
return self.sortId < other.sortId
return self.name < other.name
def __eq__(self, other):
if not isinstance(other, Room):
return False
return self.sortId == other.sortId and self.name == other.name
def __hash__(self):
return hash(self.name)
|
schedule-convert
|
/schedule_convert-1.0.0-py3-none-any.whl/schedule_convert/model/schedule.py
|
schedule.py
|
import uuid
import re
import math
from slugify import slugify
UUID_NAMESPACE = uuid.UUID('6ba7b838-9dad-11d1-80b4-00c04fd430c8')
class Conference:
def __init__(self, title):
self.title = title
self.slug = None
self.url = None
self.timeslot = 5
self.days = set()
self.rooms = set()
self.speakers = set()
self.events = []
self.timezone = None
self.default_track = None
self.needs_data = title is None
def __len__(self):
return len(self.events)
def is_empty(self):
return len([e for e in self.events if e.active and e.start and e.room]) == 0
def get_domain(self):
if not self.url:
return 'conference.com'
m = re.search(r'://([^/]+)', self.url)
return m.group(1)
def make_guid(self, event):
if self.slug is None or event.slug is None:
return None
if event.room is not None:
room = event.room.name
else:
room = ''
return uuid.uuid5(UUID_NAMESPACE, self.slug + event.slug +
room + event.start.strftime('%Y-%m-%d'))
def filter_events(self, day=None, room=None):
for event in sorted(self.events):
if not event.start or not event.room or not event.active:
continue
if room and event.room != room:
continue
if day and event.start.date() != day:
continue
yield event
def prepare(self):
if self.slug is None and self.title is not None:
self.slug = slugify(self.title)
guids = set()
timeslot = None
event_ids = set()
next_event_id = 1
for event in self.events:
if event.start.tzinfo is None and self.timezone is not None:
event.start = event.start.replace(tzinfo=self.timezone)
self.days.add(event.start.date())
if event.id is None or str(event.id) in event_ids:
while str(next_event_id) in event_ids:
next_event_id += 1
event.id = next_event_id
event_ids.add(str(event.id))
if event.room:
self.rooms.add(event.room)
self.speakers.update(event.speakers)
if event.slug is None:
event.slug = slugify(event.title)
if event.track is None:
event.track = self.default_track
if event.guid is None:
event.guid = self.make_guid(event)
if event.guid is not None:
if event.guid in guids:
raise Exception('Duplicated guid {}'.format(event.guid))
guids.add(event.guid)
if event.duration:
if not timeslot:
timeslot = event.duration
else:
timeslot = math.gcd(timeslot, event.duration)
# Deduplicate speaker ids
speaker_ids = set()
next_speaker_id = 1
for sp in self.speakers:
if sp.id is None or str(sp.id) in speaker_ids:
while str(next_speaker_id) in speaker_ids:
next_speaker_id += 1
sp.id = next_speaker_id
speaker_ids.add(str(sp.id))
if timeslot is not None:
self.timeslot = timeslot
def merge(self, other):
if self.timeslot is None or other.timeslot < self.timeslot:
self.timeslot = other.timeslot
speaker_ids = set([str(sp.id) for sp in self.speakers])
next_speaker_id = 1
for sp in other.speakers:
if sp.id is None or str(sp.id) in speaker_ids:
while str(next_speaker_id) in speaker_ids:
next_speaker_id += 1
sp.id = next_speaker_id
speaker_ids.add(str(sp.id))
self.speakers.add(sp)
for event in other.events:
self.events.append(event)
self.prepare()
class Room:
def __init__(self, name, sortId=None):
self.name = name
self.sortId = sortId
def __lt__(self, other):
if self.sortId is not None and other.sortId is not None:
return self.sortId < other.sortId
return self.name < other.name
def __eq__(self, other):
if not isinstance(other, Room):
return False
return self.sortId == other.sortId and self.name == other.name
def __hash__(self):
return hash(self.name)
| 0.366476 | 0.084003 |
from typing import Any, Optional
import numbers
import re
from bisect import bisect, bisect_left
from collections.abc import Iterable
from datetime import datetime, timedelta, date
from calendar import monthrange
DAYNAMES = "sun", "mon", "tue", "wed", "thu", "fri", "sat"
WEEKDAYS = dict(zip(DAYNAMES, range(7)))
CRON_PATTERN_INVALID = """\
Invalid crontab pattern. Valid range is {min}-{max}. \
'{value}' was found.\
"""
CRON_INVALID_TYPE = """\
Argument cronspec needs to be of any of the following types: \
int, str, or an iterable type. {type!r} was given.\
"""
CRON_REPR = """\
<crontab: {0._orig_minute} {0._orig_hour} {0._orig_day_of_week} \
{0._orig_day_of_month} {0._orig_month_of_year} (m/h/d/dM/MY)>\
"""
class AttributeDict(dict):
"""Dict subclass with attribute access."""
def __getattr__(self, k):
# type: (str) -> Any
"""`d.key -> d[key]`."""
try:
return self[k]
except KeyError:
raise AttributeError(
f"{type(self).__name__!r} object has no attribute {k!r}"
)
def __setattr__(self, key, value):
# type: (str, Any) -> None
"""`d[key] = value -> d.key = value`."""
self[key] = value
def cronfield(s):
return "*" if s is None else s
def dictfilter(d=None, **kw):
"""Remove all keys from dict ``d`` whose value is :const:`None`."""
d = kw if d is None else (dict(d, **kw) if kw else d)
return {k: v for k, v in d.items() if v is not None}
class ParseException(Exception):
"""Raised by :class:`crontab_parser` when the input can't be parsed."""
def weekday(name):
"""Return the position of a weekday: 0 - 7, where 0 is Sunday.
Example:
>>> weekday('sunday'), weekday('sun'), weekday('mon')
(0, 0, 1)
"""
abbreviation = name[0:3].lower()
try:
return WEEKDAYS[abbreviation]
except KeyError:
# Show original day name in exception, instead of abbr.
raise KeyError(name)
class CrontabParser:
"""Parser for Crontab expressions.
Any expression of the form 'groups'
(see BNF grammar below) is accepted and expanded to a set of numbers.
These numbers represent the units of time that the Crontab needs to
run on:
.. code-block:: bnf
digit :: '0'..'9'
dow :: 'a'..'z'
number :: digit+ | dow+
steps :: number
range :: number ( '-' number ) ?
numspec :: '*' | range
expr :: numspec ( '/' steps ) ?
groups :: expr ( ',' expr ) *
The parser is a general purpose one, useful for parsing hours, minutes and
day of week expressions. Example usage:
.. code-block:: pycon
>>> minutes = CrontabParser(60).parse('*/15')
[0, 15, 30, 45]
>>> hours = CrontabParser(24).parse('*/4')
[0, 4, 8, 12, 16, 20]
>>> day_of_week = CrontabParser(7).parse('*')
[0, 1, 2, 3, 4, 5, 6]
It can also parse day of month and month of year expressions if initialized
with a minimum of 1. Example usage:
.. code-block:: pycon
>>> days_of_month = CrontabParser(31, 1).parse('*/3')
[1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 31]
>>> months_of_year = CrontabParser(12, 1).parse('*/2')
[1, 3, 5, 7, 9, 11]
>>> months_of_year = CrontabParser(12, 1).parse('2-12/2')
[2, 4, 6, 8, 10, 12]
The maximum possible expanded value returned is found by the formula:
:math:`max_ + min_ - 1`
"""
ParseException = ParseException
_range = r"(\w+?)-(\w+)"
_steps = r"/(\w+)?"
_star = r"\*"
def __init__(self, max_=60, min_=0):
self.max_ = max_
self.min_ = min_
self.pats = (
(re.compile(self._range + self._steps), self._range_steps),
(re.compile(self._range), self._expand_range),
(re.compile(self._star + self._steps), self._star_steps),
(re.compile("^" + self._star + "$"), self._expand_star),
)
def parse(self, spec):
acc = set()
for part in spec.split(","):
if not part:
raise self.ParseException("empty part")
acc |= set(self._parse_part(part))
return acc
def _parse_part(self, part):
for regex, handler in self.pats:
m = regex.match(part)
if m:
return handler(m.groups())
return self._expand_range((part,))
def _expand_range(self, toks):
fr = self._expand_number(toks[0])
if len(toks) > 1:
to = self._expand_number(toks[1])
if to < fr: # Wrap around max_ if necessary
return list(range(fr, self.min_ + self.max_)) + list(
range(self.min_, to + 1)
)
return list(range(fr, to + 1))
return [fr]
def _range_steps(self, toks):
if len(toks) != 3 or not toks[2]:
raise self.ParseException("empty filter")
return self._expand_range(toks[:2])[:: int(toks[2])]
def _star_steps(self, toks):
if not toks or not toks[0]:
raise self.ParseException("empty filter")
return self._expand_star()[:: int(toks[0])]
def _expand_star(self, *args):
return list(range(self.min_, self.max_ + self.min_))
def _expand_number(self, s):
if isinstance(s, str) and s[0] == "-":
raise self.ParseException("negative numbers not supported")
try:
i = int(s)
except ValueError:
try:
i = weekday(s)
except KeyError:
raise ValueError(f"Invalid weekday literal {s!r}.")
max_val = self.min_ + self.max_ - 1
if i > max_val:
raise ValueError(f"Invalid end range: {i} > {max_val}.")
if i < self.min_:
raise ValueError(f"Invalid beginning range: {i} < {self.min_}.")
return i
class Crontab:
"""Crontab schedule.
Like a :manpage:`cron(5)`-job, you can specify units of time of when
you'd like the job to execute. It's a reasonably complete
implementation of :command:`cron`'s features, so it should provide a fair
degree of scheduling needs.
You can specify a minute, an hour, a day of the week, a day of the
month, and/or a month in the year in any of the following formats:
.. attribute:: minute
- A (list of) integers from 0-59 that represent the minutes of
an hour of when execution should occur; or
- A string representing a Crontab pattern. This may get pretty
advanced, like ``minute='*/15'`` (for every quarter) or
``minute='1,13,30-45,50-59/2'``.
.. attribute:: hour
- A (list of) integers from 0-23 that represent the hours of
a day of when execution should occur; or
- A string representing a Crontab pattern. This may get pretty
advanced, like ``hour='*/3'`` (for every three hours) or
``hour='0,8-17/2'`` (at midnight, and every two hours during
office hours).
.. attribute:: day_of_week
- A (list of) integers from 0-6, where Sunday = 0 and Saturday =
6, that represent the days of a week that execution should
occur.
- A string representing a Crontab pattern. This may get pretty
advanced, like ``day_of_week='mon-fri'`` (for weekdays only).
(Beware that ``day_of_week='*/2'`` does not literally mean
'every two days', but 'every day that is divisible by two'!)
.. attribute:: day_of_month
- A (list of) integers from 1-31 that represents the days of the
month that execution should occur.
- A string representing a Crontab pattern. This may get pretty
advanced, such as ``day_of_month='2-30/2'`` (for every even
numbered day) or ``day_of_month='1-7,15-21'`` (for the first and
third weeks of the month).
.. attribute:: month_of_year
- A (list of) integers from 1-12 that represents the months of
the year during which execution can occur.
- A string representing a Crontab pattern. This may get pretty
advanced, such as ``month_of_year='*/3'`` (for the first month
of every quarter) or ``month_of_year='2-12/2'`` (for every even
numbered month).
It's important to realize that any day on which execution should
occur must be represented by entries in all three of the day and
month attributes. For example, if ``day_of_week`` is 0 and
``day_of_month`` is every seventh day, only months that begin
on Sunday and are also in the ``month_of_year`` attribute will have
execution events. Or, ``day_of_week`` is 1 and ``day_of_month``
is '1-7,15-21' means every first and third Monday of every month
present in ``month_of_year``.
"""
def __init__(
self,
minute="*",
hour="*",
day_of_week="*",
day_of_month="*",
month_of_year="*",
tz: Optional[str] = None,
**kwargs,
):
self._orig_minute = cronfield(minute)
self._orig_hour = cronfield(hour)
self._orig_day_of_week = cronfield(day_of_week)
self._orig_day_of_month = cronfield(day_of_month)
self._orig_month_of_year = cronfield(month_of_year)
self._orig_kwargs = kwargs
self.hour = self._expand_cronspec(hour, 24)
self.minute = self._expand_cronspec(minute, 60)
self.day_of_week = self._expand_cronspec(day_of_week, 7)
self.day_of_month = self._expand_cronspec(day_of_month, 31, 1)
self.month_of_year = self._expand_cronspec(month_of_year, 12, 1)
self.tz = None
if tz is not None:
import pytz
if isinstance(tz, str):
self.tz = pytz.timezone(tz) # type: ignore
elif isinstance(tz, pytz.BaseTzInfo):
self.tz = tz
else:
raise ValueError(
"Timezone must be string or pytz.timezone object"
)
@classmethod
def from_expression(
cls, crontab_expression: str, tz: Optional[str] = None
) -> "Crontab":
items = crontab_expression.split(" ")
if len(items) != 5:
raise ValueError(
"Invalid number of components in crontab expression"
)
return cls(
minute=items[0],
hour=items[1],
day_of_week=items[2],
day_of_month=items[3],
month_of_year=items[4],
tz=tz,
)
@staticmethod
def _expand_cronspec(cronspec, max_, min_=0):
"""Expand cron specification.
Takes the given cronspec argument in one of the forms:
.. code-block:: text
int (like 7)
str (like '3-5,*/15', '*', or 'monday')
set (like {0,15,30,45}
list (like [8-17])
And convert it to an (expanded) set representing all time unit
values on which the Crontab triggers. Only in case of the base
type being :class:`str`, parsing occurs. (It's fast and
happens only once for each Crontab instance, so there's no
significant performance overhead involved.)
For the other base types, merely Python type conversions happen.
The argument ``max_`` is needed to determine the expansion of
``*`` and ranges. The argument ``min_`` is needed to determine
the expansion of ``*`` and ranges for 1-based cronspecs, such as
day of month or month of year. The default is sufficient for minute,
hour, and day of week.
"""
if isinstance(cronspec, numbers.Integral):
result = {cronspec}
elif isinstance(cronspec, str):
result = CrontabParser(max_, min_).parse(cronspec)
elif isinstance(cronspec, set):
result = cronspec
elif isinstance(cronspec, Iterable):
result = set(cronspec)
else:
raise TypeError(CRON_INVALID_TYPE.format(type=type(cronspec)))
# assure the result does not preceed the min or exceed the max
for number in result:
if number >= max_ + min_ or number < min_:
raise ValueError(
CRON_PATTERN_INVALID.format(
min=min_, max=max_ - 1 + min_, value=number
)
)
return result
def _delta_to_next(self, last_run_at, next_hour, next_minute):
"""Find next delta.
Takes a :class:`~datetime.datetime` of last run, next minute and hour,
and returns a :class:`~celery.utils.time.ffwd` for the next
scheduled day and time.
Only called when ``day_of_month`` and/or ``month_of_year``
cronspec is specified to further limit scheduled job execution.
"""
datedata = AttributeDict(year=last_run_at.year)
days_of_month = sorted(self.day_of_month)
months_of_year = sorted(self.month_of_year)
def day_out_of_range(year, month, day):
try:
datetime(year=year, month=month, day=day)
except ValueError:
return True
return False
def is_before_last_run(year, month, day):
return (
self._check_awareness(datetime(year, month, day)) < last_run_at
)
def roll_over():
for _ in range(2000):
flag = (
datedata.dom == len(days_of_month)
or day_out_of_range(
datedata.year,
months_of_year[datedata.moy],
days_of_month[datedata.dom],
)
or (
is_before_last_run(
datedata.year,
months_of_year[datedata.moy],
days_of_month[datedata.dom],
)
)
)
if flag:
datedata.dom = 0
datedata.moy += 1
if datedata.moy == len(months_of_year):
datedata.moy = 0
datedata.year += 1
else:
break
else:
# Tried 2000 times, we're most likely in an infinite loop
raise RuntimeError(
"unable to rollover, "
"time specification is probably invalid"
)
if last_run_at.month in self.month_of_year:
datedata.dom = bisect(days_of_month, last_run_at.day)
datedata.moy = bisect_left(months_of_year, last_run_at.month)
else:
datedata.dom = 0
datedata.moy = bisect(months_of_year, last_run_at.month)
if datedata.moy == len(months_of_year):
datedata.moy = 0
roll_over()
while 1:
th = datetime(
year=datedata.year,
month=months_of_year[datedata.moy],
day=days_of_month[datedata.dom],
)
if th.isoweekday() % 7 in self.day_of_week:
break
datedata.dom += 1
roll_over()
return Ffwd(
year=datedata.year,
month=months_of_year[datedata.moy],
day=days_of_month[datedata.dom],
hour=next_hour,
minute=next_minute,
second=0,
microsecond=0,
)
def __repr__(self):
return CRON_REPR.format(self)
def __reduce__(self):
return (
self.__class__,
(
self._orig_minute,
self._orig_hour,
self._orig_day_of_week,
self._orig_day_of_month,
self._orig_month_of_year,
),
self._orig_kwargs,
)
def __setstate__(self, state):
# Calling super's init because the kwargs aren't necessarily passed in
# the same form as they are stored by the superclass
super().__init__(**state)
def now(self) -> datetime:
if self.tz is None:
return datetime.now()
import pytz
utcnow = datetime.now(pytz.UTC)
return utcnow.astimezone(self.tz)
def _check_awareness(self, dt: datetime) -> datetime:
is_naive = dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None
if is_naive:
if self.tz is not None:
ValueError(
"You cannot use naive datetime if the crontab is defined with a timezone"
)
else:
if self.tz is None:
ValueError(
"You cannot use localized datetime if the crontab is defined without a timezone"
)
else:
dt = dt.astimezone(self.tz)
return dt
def next_run_time(self, last_run_at: Optional[datetime] = None):
last_run_at = self._check_awareness(last_run_at or self.now())
now = self._check_awareness(self.now())
dow_num = last_run_at.isoweekday() % 7 # Sunday is day 0, not day 7
execute_this_date = (
last_run_at.month in self.month_of_year
and last_run_at.day in self.day_of_month
and dow_num in self.day_of_week
)
execute_this_hour = (
execute_this_date
and last_run_at.day == now.day
and last_run_at.month == now.month
and last_run_at.year == now.year
and last_run_at.hour in self.hour
and last_run_at.minute < max(self.minute)
)
if execute_this_hour:
next_minute = min(
minute for minute in self.minute if minute > last_run_at.minute
)
delta = Ffwd(minute=next_minute, second=0, microsecond=0)
else:
next_minute = min(self.minute)
execute_today = execute_this_date and last_run_at.hour < max(
self.hour
)
if execute_today:
next_hour = min(
hour for hour in self.hour if hour > last_run_at.hour
)
delta = Ffwd(
hour=next_hour, minute=next_minute, second=0, microsecond=0
)
else:
next_hour = min(self.hour)
all_dom_moy = (
self._orig_day_of_month == "*"
and self._orig_month_of_year == "*"
)
if all_dom_moy:
next_day = min(
[day for day in self.day_of_week if day > dow_num]
or self.day_of_week
)
add_week = next_day == dow_num
delta = Ffwd(
weeks=add_week and 1 or 0,
weekday=(next_day - 1) % 7,
hour=next_hour,
minute=next_minute,
second=0,
microsecond=0,
)
else:
delta = self._delta_to_next(
last_run_at, next_hour, next_minute
)
next_run_at = now + delta
if self.tz:
next_run_at = self.tz.normalize(next_run_at)
return next_run_at
def __eq__(self, other):
if isinstance(other, Crontab):
return (
other.month_of_year == self.month_of_year
and other.day_of_month == self.day_of_month
and other.day_of_week == self.day_of_week
and other.hour == self.hour
and other.minute == self.minute
and super().__eq__(other)
)
return NotImplemented
def __ne__(self, other):
res = self.__eq__(other)
if res is NotImplemented:
return True
return not res
class Ffwd:
"""Version of ``dateutil.relativedelta`` that only supports addition."""
def __init__(
self,
year=None,
month=None,
weeks=0,
weekday=None,
day=None,
hour=None,
minute=None,
second=None,
microsecond=None,
**kwargs,
):
# pylint: disable=redefined-outer-name
# weekday is also a function in outer scope.
self.year = year
self.month = month
self.weeks = weeks
self.weekday = weekday
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
self.days = weeks * 7
self._has_time = self.hour is not None or self.minute is not None
def __radd__(self, other):
if not isinstance(other, date):
return NotImplemented
year = self.year or other.year
month = self.month or other.month
day = min(monthrange(year, month)[1], self.day or other.day)
ret = other.replace(
**dict(dictfilter(self._fields()), year=year, month=month, day=day)
)
if self.weekday is not None:
ret += timedelta(days=(7 - ret.weekday() + self.weekday) % 7)
return ret + timedelta(days=self.days)
def _fields(self, **extra):
return dictfilter(
{
"year": self.year,
"month": self.month,
"day": self.day,
"hour": self.hour,
"minute": self.minute,
"second": self.second,
"microsecond": self.microsecond,
},
**extra,
)
|
schedule-cronjob
|
/schedule_cronjob-1.3.1-py3-none-any.whl/schedule/crontab.py
|
crontab.py
|
from typing import Any, Optional
import numbers
import re
from bisect import bisect, bisect_left
from collections.abc import Iterable
from datetime import datetime, timedelta, date
from calendar import monthrange
DAYNAMES = "sun", "mon", "tue", "wed", "thu", "fri", "sat"
WEEKDAYS = dict(zip(DAYNAMES, range(7)))
CRON_PATTERN_INVALID = """\
Invalid crontab pattern. Valid range is {min}-{max}. \
'{value}' was found.\
"""
CRON_INVALID_TYPE = """\
Argument cronspec needs to be of any of the following types: \
int, str, or an iterable type. {type!r} was given.\
"""
CRON_REPR = """\
<crontab: {0._orig_minute} {0._orig_hour} {0._orig_day_of_week} \
{0._orig_day_of_month} {0._orig_month_of_year} (m/h/d/dM/MY)>\
"""
class AttributeDict(dict):
"""Dict subclass with attribute access."""
def __getattr__(self, k):
# type: (str) -> Any
"""`d.key -> d[key]`."""
try:
return self[k]
except KeyError:
raise AttributeError(
f"{type(self).__name__!r} object has no attribute {k!r}"
)
def __setattr__(self, key, value):
# type: (str, Any) -> None
"""`d[key] = value -> d.key = value`."""
self[key] = value
def cronfield(s):
return "*" if s is None else s
def dictfilter(d=None, **kw):
"""Remove all keys from dict ``d`` whose value is :const:`None`."""
d = kw if d is None else (dict(d, **kw) if kw else d)
return {k: v for k, v in d.items() if v is not None}
class ParseException(Exception):
"""Raised by :class:`crontab_parser` when the input can't be parsed."""
def weekday(name):
"""Return the position of a weekday: 0 - 7, where 0 is Sunday.
Example:
>>> weekday('sunday'), weekday('sun'), weekday('mon')
(0, 0, 1)
"""
abbreviation = name[0:3].lower()
try:
return WEEKDAYS[abbreviation]
except KeyError:
# Show original day name in exception, instead of abbr.
raise KeyError(name)
class CrontabParser:
"""Parser for Crontab expressions.
Any expression of the form 'groups'
(see BNF grammar below) is accepted and expanded to a set of numbers.
These numbers represent the units of time that the Crontab needs to
run on:
.. code-block:: bnf
digit :: '0'..'9'
dow :: 'a'..'z'
number :: digit+ | dow+
steps :: number
range :: number ( '-' number ) ?
numspec :: '*' | range
expr :: numspec ( '/' steps ) ?
groups :: expr ( ',' expr ) *
The parser is a general purpose one, useful for parsing hours, minutes and
day of week expressions. Example usage:
.. code-block:: pycon
>>> minutes = CrontabParser(60).parse('*/15')
[0, 15, 30, 45]
>>> hours = CrontabParser(24).parse('*/4')
[0, 4, 8, 12, 16, 20]
>>> day_of_week = CrontabParser(7).parse('*')
[0, 1, 2, 3, 4, 5, 6]
It can also parse day of month and month of year expressions if initialized
with a minimum of 1. Example usage:
.. code-block:: pycon
>>> days_of_month = CrontabParser(31, 1).parse('*/3')
[1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 31]
>>> months_of_year = CrontabParser(12, 1).parse('*/2')
[1, 3, 5, 7, 9, 11]
>>> months_of_year = CrontabParser(12, 1).parse('2-12/2')
[2, 4, 6, 8, 10, 12]
The maximum possible expanded value returned is found by the formula:
:math:`max_ + min_ - 1`
"""
ParseException = ParseException
_range = r"(\w+?)-(\w+)"
_steps = r"/(\w+)?"
_star = r"\*"
def __init__(self, max_=60, min_=0):
self.max_ = max_
self.min_ = min_
self.pats = (
(re.compile(self._range + self._steps), self._range_steps),
(re.compile(self._range), self._expand_range),
(re.compile(self._star + self._steps), self._star_steps),
(re.compile("^" + self._star + "$"), self._expand_star),
)
def parse(self, spec):
acc = set()
for part in spec.split(","):
if not part:
raise self.ParseException("empty part")
acc |= set(self._parse_part(part))
return acc
def _parse_part(self, part):
for regex, handler in self.pats:
m = regex.match(part)
if m:
return handler(m.groups())
return self._expand_range((part,))
def _expand_range(self, toks):
fr = self._expand_number(toks[0])
if len(toks) > 1:
to = self._expand_number(toks[1])
if to < fr: # Wrap around max_ if necessary
return list(range(fr, self.min_ + self.max_)) + list(
range(self.min_, to + 1)
)
return list(range(fr, to + 1))
return [fr]
def _range_steps(self, toks):
if len(toks) != 3 or not toks[2]:
raise self.ParseException("empty filter")
return self._expand_range(toks[:2])[:: int(toks[2])]
def _star_steps(self, toks):
if not toks or not toks[0]:
raise self.ParseException("empty filter")
return self._expand_star()[:: int(toks[0])]
def _expand_star(self, *args):
return list(range(self.min_, self.max_ + self.min_))
def _expand_number(self, s):
if isinstance(s, str) and s[0] == "-":
raise self.ParseException("negative numbers not supported")
try:
i = int(s)
except ValueError:
try:
i = weekday(s)
except KeyError:
raise ValueError(f"Invalid weekday literal {s!r}.")
max_val = self.min_ + self.max_ - 1
if i > max_val:
raise ValueError(f"Invalid end range: {i} > {max_val}.")
if i < self.min_:
raise ValueError(f"Invalid beginning range: {i} < {self.min_}.")
return i
class Crontab:
"""Crontab schedule.
Like a :manpage:`cron(5)`-job, you can specify units of time of when
you'd like the job to execute. It's a reasonably complete
implementation of :command:`cron`'s features, so it should provide a fair
degree of scheduling needs.
You can specify a minute, an hour, a day of the week, a day of the
month, and/or a month in the year in any of the following formats:
.. attribute:: minute
- A (list of) integers from 0-59 that represent the minutes of
an hour of when execution should occur; or
- A string representing a Crontab pattern. This may get pretty
advanced, like ``minute='*/15'`` (for every quarter) or
``minute='1,13,30-45,50-59/2'``.
.. attribute:: hour
- A (list of) integers from 0-23 that represent the hours of
a day of when execution should occur; or
- A string representing a Crontab pattern. This may get pretty
advanced, like ``hour='*/3'`` (for every three hours) or
``hour='0,8-17/2'`` (at midnight, and every two hours during
office hours).
.. attribute:: day_of_week
- A (list of) integers from 0-6, where Sunday = 0 and Saturday =
6, that represent the days of a week that execution should
occur.
- A string representing a Crontab pattern. This may get pretty
advanced, like ``day_of_week='mon-fri'`` (for weekdays only).
(Beware that ``day_of_week='*/2'`` does not literally mean
'every two days', but 'every day that is divisible by two'!)
.. attribute:: day_of_month
- A (list of) integers from 1-31 that represents the days of the
month that execution should occur.
- A string representing a Crontab pattern. This may get pretty
advanced, such as ``day_of_month='2-30/2'`` (for every even
numbered day) or ``day_of_month='1-7,15-21'`` (for the first and
third weeks of the month).
.. attribute:: month_of_year
- A (list of) integers from 1-12 that represents the months of
the year during which execution can occur.
- A string representing a Crontab pattern. This may get pretty
advanced, such as ``month_of_year='*/3'`` (for the first month
of every quarter) or ``month_of_year='2-12/2'`` (for every even
numbered month).
It's important to realize that any day on which execution should
occur must be represented by entries in all three of the day and
month attributes. For example, if ``day_of_week`` is 0 and
``day_of_month`` is every seventh day, only months that begin
on Sunday and are also in the ``month_of_year`` attribute will have
execution events. Or, ``day_of_week`` is 1 and ``day_of_month``
is '1-7,15-21' means every first and third Monday of every month
present in ``month_of_year``.
"""
def __init__(
self,
minute="*",
hour="*",
day_of_week="*",
day_of_month="*",
month_of_year="*",
tz: Optional[str] = None,
**kwargs,
):
self._orig_minute = cronfield(minute)
self._orig_hour = cronfield(hour)
self._orig_day_of_week = cronfield(day_of_week)
self._orig_day_of_month = cronfield(day_of_month)
self._orig_month_of_year = cronfield(month_of_year)
self._orig_kwargs = kwargs
self.hour = self._expand_cronspec(hour, 24)
self.minute = self._expand_cronspec(minute, 60)
self.day_of_week = self._expand_cronspec(day_of_week, 7)
self.day_of_month = self._expand_cronspec(day_of_month, 31, 1)
self.month_of_year = self._expand_cronspec(month_of_year, 12, 1)
self.tz = None
if tz is not None:
import pytz
if isinstance(tz, str):
self.tz = pytz.timezone(tz) # type: ignore
elif isinstance(tz, pytz.BaseTzInfo):
self.tz = tz
else:
raise ValueError(
"Timezone must be string or pytz.timezone object"
)
@classmethod
def from_expression(
cls, crontab_expression: str, tz: Optional[str] = None
) -> "Crontab":
items = crontab_expression.split(" ")
if len(items) != 5:
raise ValueError(
"Invalid number of components in crontab expression"
)
return cls(
minute=items[0],
hour=items[1],
day_of_week=items[2],
day_of_month=items[3],
month_of_year=items[4],
tz=tz,
)
@staticmethod
def _expand_cronspec(cronspec, max_, min_=0):
"""Expand cron specification.
Takes the given cronspec argument in one of the forms:
.. code-block:: text
int (like 7)
str (like '3-5,*/15', '*', or 'monday')
set (like {0,15,30,45}
list (like [8-17])
And convert it to an (expanded) set representing all time unit
values on which the Crontab triggers. Only in case of the base
type being :class:`str`, parsing occurs. (It's fast and
happens only once for each Crontab instance, so there's no
significant performance overhead involved.)
For the other base types, merely Python type conversions happen.
The argument ``max_`` is needed to determine the expansion of
``*`` and ranges. The argument ``min_`` is needed to determine
the expansion of ``*`` and ranges for 1-based cronspecs, such as
day of month or month of year. The default is sufficient for minute,
hour, and day of week.
"""
if isinstance(cronspec, numbers.Integral):
result = {cronspec}
elif isinstance(cronspec, str):
result = CrontabParser(max_, min_).parse(cronspec)
elif isinstance(cronspec, set):
result = cronspec
elif isinstance(cronspec, Iterable):
result = set(cronspec)
else:
raise TypeError(CRON_INVALID_TYPE.format(type=type(cronspec)))
# assure the result does not preceed the min or exceed the max
for number in result:
if number >= max_ + min_ or number < min_:
raise ValueError(
CRON_PATTERN_INVALID.format(
min=min_, max=max_ - 1 + min_, value=number
)
)
return result
def _delta_to_next(self, last_run_at, next_hour, next_minute):
"""Find next delta.
Takes a :class:`~datetime.datetime` of last run, next minute and hour,
and returns a :class:`~celery.utils.time.ffwd` for the next
scheduled day and time.
Only called when ``day_of_month`` and/or ``month_of_year``
cronspec is specified to further limit scheduled job execution.
"""
datedata = AttributeDict(year=last_run_at.year)
days_of_month = sorted(self.day_of_month)
months_of_year = sorted(self.month_of_year)
def day_out_of_range(year, month, day):
try:
datetime(year=year, month=month, day=day)
except ValueError:
return True
return False
def is_before_last_run(year, month, day):
return (
self._check_awareness(datetime(year, month, day)) < last_run_at
)
def roll_over():
for _ in range(2000):
flag = (
datedata.dom == len(days_of_month)
or day_out_of_range(
datedata.year,
months_of_year[datedata.moy],
days_of_month[datedata.dom],
)
or (
is_before_last_run(
datedata.year,
months_of_year[datedata.moy],
days_of_month[datedata.dom],
)
)
)
if flag:
datedata.dom = 0
datedata.moy += 1
if datedata.moy == len(months_of_year):
datedata.moy = 0
datedata.year += 1
else:
break
else:
# Tried 2000 times, we're most likely in an infinite loop
raise RuntimeError(
"unable to rollover, "
"time specification is probably invalid"
)
if last_run_at.month in self.month_of_year:
datedata.dom = bisect(days_of_month, last_run_at.day)
datedata.moy = bisect_left(months_of_year, last_run_at.month)
else:
datedata.dom = 0
datedata.moy = bisect(months_of_year, last_run_at.month)
if datedata.moy == len(months_of_year):
datedata.moy = 0
roll_over()
while 1:
th = datetime(
year=datedata.year,
month=months_of_year[datedata.moy],
day=days_of_month[datedata.dom],
)
if th.isoweekday() % 7 in self.day_of_week:
break
datedata.dom += 1
roll_over()
return Ffwd(
year=datedata.year,
month=months_of_year[datedata.moy],
day=days_of_month[datedata.dom],
hour=next_hour,
minute=next_minute,
second=0,
microsecond=0,
)
def __repr__(self):
return CRON_REPR.format(self)
def __reduce__(self):
return (
self.__class__,
(
self._orig_minute,
self._orig_hour,
self._orig_day_of_week,
self._orig_day_of_month,
self._orig_month_of_year,
),
self._orig_kwargs,
)
def __setstate__(self, state):
# Calling super's init because the kwargs aren't necessarily passed in
# the same form as they are stored by the superclass
super().__init__(**state)
def now(self) -> datetime:
if self.tz is None:
return datetime.now()
import pytz
utcnow = datetime.now(pytz.UTC)
return utcnow.astimezone(self.tz)
def _check_awareness(self, dt: datetime) -> datetime:
is_naive = dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None
if is_naive:
if self.tz is not None:
ValueError(
"You cannot use naive datetime if the crontab is defined with a timezone"
)
else:
if self.tz is None:
ValueError(
"You cannot use localized datetime if the crontab is defined without a timezone"
)
else:
dt = dt.astimezone(self.tz)
return dt
def next_run_time(self, last_run_at: Optional[datetime] = None):
last_run_at = self._check_awareness(last_run_at or self.now())
now = self._check_awareness(self.now())
dow_num = last_run_at.isoweekday() % 7 # Sunday is day 0, not day 7
execute_this_date = (
last_run_at.month in self.month_of_year
and last_run_at.day in self.day_of_month
and dow_num in self.day_of_week
)
execute_this_hour = (
execute_this_date
and last_run_at.day == now.day
and last_run_at.month == now.month
and last_run_at.year == now.year
and last_run_at.hour in self.hour
and last_run_at.minute < max(self.minute)
)
if execute_this_hour:
next_minute = min(
minute for minute in self.minute if minute > last_run_at.minute
)
delta = Ffwd(minute=next_minute, second=0, microsecond=0)
else:
next_minute = min(self.minute)
execute_today = execute_this_date and last_run_at.hour < max(
self.hour
)
if execute_today:
next_hour = min(
hour for hour in self.hour if hour > last_run_at.hour
)
delta = Ffwd(
hour=next_hour, minute=next_minute, second=0, microsecond=0
)
else:
next_hour = min(self.hour)
all_dom_moy = (
self._orig_day_of_month == "*"
and self._orig_month_of_year == "*"
)
if all_dom_moy:
next_day = min(
[day for day in self.day_of_week if day > dow_num]
or self.day_of_week
)
add_week = next_day == dow_num
delta = Ffwd(
weeks=add_week and 1 or 0,
weekday=(next_day - 1) % 7,
hour=next_hour,
minute=next_minute,
second=0,
microsecond=0,
)
else:
delta = self._delta_to_next(
last_run_at, next_hour, next_minute
)
next_run_at = now + delta
if self.tz:
next_run_at = self.tz.normalize(next_run_at)
return next_run_at
def __eq__(self, other):
if isinstance(other, Crontab):
return (
other.month_of_year == self.month_of_year
and other.day_of_month == self.day_of_month
and other.day_of_week == self.day_of_week
and other.hour == self.hour
and other.minute == self.minute
and super().__eq__(other)
)
return NotImplemented
def __ne__(self, other):
res = self.__eq__(other)
if res is NotImplemented:
return True
return not res
class Ffwd:
"""Version of ``dateutil.relativedelta`` that only supports addition."""
def __init__(
self,
year=None,
month=None,
weeks=0,
weekday=None,
day=None,
hour=None,
minute=None,
second=None,
microsecond=None,
**kwargs,
):
# pylint: disable=redefined-outer-name
# weekday is also a function in outer scope.
self.year = year
self.month = month
self.weeks = weeks
self.weekday = weekday
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
self.days = weeks * 7
self._has_time = self.hour is not None or self.minute is not None
def __radd__(self, other):
if not isinstance(other, date):
return NotImplemented
year = self.year or other.year
month = self.month or other.month
day = min(monthrange(year, month)[1], self.day or other.day)
ret = other.replace(
**dict(dictfilter(self._fields()), year=year, month=month, day=day)
)
if self.weekday is not None:
ret += timedelta(days=(7 - ret.weekday() + self.weekday) % 7)
return ret + timedelta(days=self.days)
def _fields(self, **extra):
return dictfilter(
{
"year": self.year,
"month": self.month,
"day": self.day,
"hour": self.hour,
"minute": self.minute,
"second": self.second,
"microsecond": self.microsecond,
},
**extra,
)
| 0.903324 | 0.356391 |
Thanks to all the wonderful folks who have contributed to schedule over the years:
- mattss <https://github.com/mattss>
- mrhwick <https://github.com/mrhwick>
- cfrco <https://github.com/cfrco>
- matrixise <https://github.com/matrixise>
- abultman <https://github.com/abultman>
- mplewis <https://github.com/mplewis>
- WoLfulus <https://github.com/WoLfulus>
- dylwhich <https://github.com/dylwhich>
- fkromer <https://github.com/fkromer>
- alaingilbert <https://github.com/alaingilbert>
- Zerrossetto <https://github.com/Zerrossetto>
- yetingsky <https://github.com/yetingsky>
- schnepp <https://github.com/schnepp> <https://bitbucket.org/saschaschnepp>
- grampajoe <https://github.com/grampajoe>
- gilbsgilbs <https://github.com/gilbsgilbs>
- Nathan Wailes <https://github.com/NathanWailes>
- Connor Skees <https://github.com/ConnorSkees>
- qmorek <https://github.com/qmorek>
- aisk <https://github.com/aisk>
- MichaelCorleoneLi <https://github.com/MichaelCorleoneLi>
- sijmenhuizenga <https://github.com/SijmenHuizenga>
- eladbi <https://github.com/eladbi>
- chankeypathak <https://github.com/chankeypathak>
- vubon <https://github.com/vubon>
- gaguirregabiria <https://github.com/gaguirregabiria>
- rhagenaars <https://github.com/RHagenaars>
- Skenvy <https://github.com/skenvy>
- zcking <https://github.com/zcking>
- Martin Thoma <https://github.com/MartinThoma>
- ebllg <https://github.com/ebllg>
- fredthomsen <https://github.com/fredthomsen>
- biggerfisch <https://github.com/biggerfisch>
- sosolidkk <https://github.com/sosolidkk>
- rudSarkar <https://github.com/rudSarkar>
- chrimaho <https://github.com/chrimaho>
- jweijers <https://github.com/jweijers>
- Akuli <https://github.com/Akuli>
- NaelsonDouglas <https://github.com/NaelsonDouglas>
- SergBobrovsky <https://github.com/SergBobrovsky>
- CPickens42 <https://github.com/CPickens42>
- emollier <https://github.com/emollier>
- sunpro108 <https://github.com/sunpro108>
- lerome <https://github.com/lerome>
|
schedule-cronjob
|
/schedule_cronjob-1.3.1-py3-none-any.whl/schedule_cronjob-1.3.1.dist-info/AUTHORS.rst
|
AUTHORS.rst
|
Thanks to all the wonderful folks who have contributed to schedule over the years:
- mattss <https://github.com/mattss>
- mrhwick <https://github.com/mrhwick>
- cfrco <https://github.com/cfrco>
- matrixise <https://github.com/matrixise>
- abultman <https://github.com/abultman>
- mplewis <https://github.com/mplewis>
- WoLfulus <https://github.com/WoLfulus>
- dylwhich <https://github.com/dylwhich>
- fkromer <https://github.com/fkromer>
- alaingilbert <https://github.com/alaingilbert>
- Zerrossetto <https://github.com/Zerrossetto>
- yetingsky <https://github.com/yetingsky>
- schnepp <https://github.com/schnepp> <https://bitbucket.org/saschaschnepp>
- grampajoe <https://github.com/grampajoe>
- gilbsgilbs <https://github.com/gilbsgilbs>
- Nathan Wailes <https://github.com/NathanWailes>
- Connor Skees <https://github.com/ConnorSkees>
- qmorek <https://github.com/qmorek>
- aisk <https://github.com/aisk>
- MichaelCorleoneLi <https://github.com/MichaelCorleoneLi>
- sijmenhuizenga <https://github.com/SijmenHuizenga>
- eladbi <https://github.com/eladbi>
- chankeypathak <https://github.com/chankeypathak>
- vubon <https://github.com/vubon>
- gaguirregabiria <https://github.com/gaguirregabiria>
- rhagenaars <https://github.com/RHagenaars>
- Skenvy <https://github.com/skenvy>
- zcking <https://github.com/zcking>
- Martin Thoma <https://github.com/MartinThoma>
- ebllg <https://github.com/ebllg>
- fredthomsen <https://github.com/fredthomsen>
- biggerfisch <https://github.com/biggerfisch>
- sosolidkk <https://github.com/sosolidkk>
- rudSarkar <https://github.com/rudSarkar>
- chrimaho <https://github.com/chrimaho>
- jweijers <https://github.com/jweijers>
- Akuli <https://github.com/Akuli>
- NaelsonDouglas <https://github.com/NaelsonDouglas>
- SergBobrovsky <https://github.com/SergBobrovsky>
- CPickens42 <https://github.com/CPickens42>
- emollier <https://github.com/emollier>
- sunpro108 <https://github.com/sunpro108>
- lerome <https://github.com/lerome>
| 0.451085 | 0.845369 |
# schedule_filter
This repository allows to create advanced [dbader/schedule](https://github.com/dbader/schedule) job scheduling
## Features
* doing a job at/in the:
* nth day of the year
* nth month of the year
* nth day of a month
* nth week of a month
## Examples
* basic:
* monthly:
* doing a job on every second sunday of a month ```schedule.every().sunday.do(nth_week_monthly(2, job_func))```
* doing a job on every last monday of a month ```schedule.every().monday.do(nth_week_monthly(-1, job_func))```
* doing a job on every last day of a month ```schedule.every().monday.do(nth_day_monthly(-1, job_func))```
* doing a job on every 15th of a month ```schedule.every().monday.do(nth_day_monthly(15, job_func))```
* yearly:
* doing a job on every first day of a year ```schedule.every().day.do(nth_day_yearly(1, job_func))```
* doing a job on every New Year's Eve ```schedule.every().day.do(nth_day_yearly(-1, job_func))```
* doing a job every day in april ```schedule.every().day.do(nth_month_yearly(4, job_func))```
* advanced:
* doing a job on every Christmas Eve ```schedule.every().day.do(nth_month_yearly(12, nth_day_monthly(24, job_func)))```
|
schedule-filter
|
/schedule_filter-0.2.1.tar.gz/schedule_filter-0.2.1/README.md
|
README.md
|
# schedule_filter
This repository allows to create advanced [dbader/schedule](https://github.com/dbader/schedule) job scheduling
## Features
* doing a job at/in the:
* nth day of the year
* nth month of the year
* nth day of a month
* nth week of a month
## Examples
* basic:
* monthly:
* doing a job on every second sunday of a month ```schedule.every().sunday.do(nth_week_monthly(2, job_func))```
* doing a job on every last monday of a month ```schedule.every().monday.do(nth_week_monthly(-1, job_func))```
* doing a job on every last day of a month ```schedule.every().monday.do(nth_day_monthly(-1, job_func))```
* doing a job on every 15th of a month ```schedule.every().monday.do(nth_day_monthly(15, job_func))```
* yearly:
* doing a job on every first day of a year ```schedule.every().day.do(nth_day_yearly(1, job_func))```
* doing a job on every New Year's Eve ```schedule.every().day.do(nth_day_yearly(-1, job_func))```
* doing a job every day in april ```schedule.every().day.do(nth_month_yearly(4, job_func))```
* advanced:
* doing a job on every Christmas Eve ```schedule.every().day.do(nth_month_yearly(12, nth_day_monthly(24, job_func)))```
| 0.827759 | 0.989541 |
import datetime
import calendar
import functools
def nth_day_yearly(n, job_func, *args, **kwargs):
"""
addition to schedule.every().day.do() or
schedule.every().day.at(time).do()
with this function, its possible to define
the day of the year, where the function works
example:
schedule.every().day.do(nth_day_yearly(1, job_func)) # do job_func() on first day of the year
schedule.every().day.do(nth_day_yearly(-1, job_func)) # do job_func() on last day of the year
:param n: number of day, can be 1 to 365, if leap year 366 or
-1 to -365, if leap year -366
:param job_func: function
:param args: list of positional arguments
:param kwargs: dict of keyworded arguments
:return: result of job_func(*args, **kwargs)
"""
year = datetime.datetime.today().year
days_of_year = 366 if calendar.isleap(year) else 365
assert n != 0, "The nth day cannot be 0 (Zero)"
assert n < days_of_year, "The nth day cannot be bigger than 365, if leap year 366"
assert n > -days_of_year, "The nth day cannot be smaller than -365, if leap year -366"
day_of_month = datetime.datetime.today().day
day_of_year = int(datetime.datetime.today().strftime("%j")) # %j = Day number of year 001-366
if n > 0 and n == day_of_year or \
n < 0 and days_of_year-n == day_of_year:
return _execute(job_func, args, kwargs)
else:
return # wrong day
def nth_month_yearly(n, job_func, *args, **kwargs):
"""
addition to schedule.every().day.do() or
schedule.every().day.at(time).do()
with this function, its possible to define
the month, where the function works
example:
schedule.every().monday.do(nth_month_yearly(1, job_func)) # do job_func() on every monday of the month=1 (january)
schedule.every().day.do(nth_month_yearly(-1, job_func)) # do job_func() on every day of the month=12 (december)
:param n: number of day, can be 1 to 28 or
-1 to -28
up to 28, because february, the shortest month, has 28 days
:param job_func: function
:param args: list of positional arguments
:param kwargs: dict of keyworded arguments
:return: result of job_func(*args, **kwargs)
"""
assert n != 0, "The nth month cannot be 0 (Zero)"
assert n < 12, "The nth month cannot be bigger than 12"
assert n > -12, "The nth month cannot be smaller than -12"
month = datetime.datetime.today().month
if n > 0 and n == month or \
n < 0 and 13-n == month:
return _execute(job_func, args, kwargs)
else:
return # wrong day
def nth_day_monthly(n, job_func, *args, **kwargs):
"""
addition to schedule.every().day.do() or
schedule.every().day.at(time).do()
with this function, its possible to define
the day of the month, where the function works
example:
schedule.every().day.do(nth_day_monthly(1, job_func)) # do job_func() on first day of the month
schedule.every().day.do(nth_day_monthly(-1, job_func)) # do job_func() on last day of the month
:param n: number of day, can be 1 to 28 or
-1 to -28
up to 28, because february, the shortest month, has 28 days
:param job_func: function
:param args: list of positional arguments
:param kwargs: dict of keyworded arguments
:return: result of job_func(*args, **kwargs)
"""
_, num_days_of_month = calendar.monthrange(datetime.datetime.today().year,
datetime.datetime.today().month)
assert n != 0, "The nth day cannot be 0 (Zero)"
assert n < 28, "The nth day cannot be bigger than 28"
assert n > -28, "The nth day cannot be smaller than -28"
day_of_month = datetime.datetime.today().day
if n > 0 and day_of_month == n or \
n < 0 and day_of_month+1 == num_days_of_month - n:
return _execute(job_func, args, kwargs)
else:
return # wrong day
def nth_week_monthly(n, job_func, *args, **kwargs):
"""
addition to schedule.every().weekday.do() or
schedule.every().day.at(time).do()
with this function, its possible to define
the number of the week, where the function works
example:
schedule.every().monday.do(nth_week_monthly(1, job_func)) # do job_func() on first monday of the month
schedule.every().sunday.do(nth_week_monthly(-1, job_func)) # do job_func() on last sunday of the month
:param n: number of week, can be 1 to 4 or
-1 to -4
:param job_func: function
:param args: list of positional arguments
:param kwargs: dict of keyworded arguments
:return: result of job_func(*args, **kwargs)
"""
assert n != 0, "The nth week cannot be 0 (Zero)"
assert n < 4, "The nth week cannot be bigger than 4"
assert n > -4, "The nth week cannot be smaller than -4"
day_of_month = datetime.datetime.today().day
if n > 0:
week_n = lambda n: 7 * n
if week_n(n - 1) < day_of_month <= week_n(n):
return _execute(job_func, args, kwargs)
else:
return # wrong week
elif n < 0:
_, num_days_of_month = calendar.monthrange(datetime.datetime.today().year,
datetime.datetime.today().month)
reverse_week_n = lambda n: num_days_of_month + (n * 7)
"""
reverse week subtracts n weeks from the numbers of days of a month
reverse_week_n(0) == num_days_of_month (31, for example)
reverse_week_n(-1) == num_days_of_month - 7
reverse_week_n(-2) == num_days_of_month - 14
"""
if reverse_week_n(n) < day_of_month <= reverse_week_n(n + 1):
return _execute(job_func, args, kwargs)
else:
return # wrong week
def nth_year_ever(n, job_func, *args, **kwargs):
if datetime.datetime.today().year == n:
return _execute(job_func, args, kwargs)
def _execute(job_func, args, kwargs):
s_job_func = functools.partial(job_func, *args, **kwargs)
try:
functools.update_wrapper(s_job_func, job_func)
except AttributeError:
# job_funcs already wrapped by functools.partial won't have
# __name__, __module__ or __doc__ and the update_wrapper()
# call will fail.
pass
return s_job_func()
|
schedule-filter
|
/schedule_filter-0.2.1.tar.gz/schedule_filter-0.2.1/schedule_filter/__init__.py
|
__init__.py
|
import datetime
import calendar
import functools
def nth_day_yearly(n, job_func, *args, **kwargs):
"""
addition to schedule.every().day.do() or
schedule.every().day.at(time).do()
with this function, its possible to define
the day of the year, where the function works
example:
schedule.every().day.do(nth_day_yearly(1, job_func)) # do job_func() on first day of the year
schedule.every().day.do(nth_day_yearly(-1, job_func)) # do job_func() on last day of the year
:param n: number of day, can be 1 to 365, if leap year 366 or
-1 to -365, if leap year -366
:param job_func: function
:param args: list of positional arguments
:param kwargs: dict of keyworded arguments
:return: result of job_func(*args, **kwargs)
"""
year = datetime.datetime.today().year
days_of_year = 366 if calendar.isleap(year) else 365
assert n != 0, "The nth day cannot be 0 (Zero)"
assert n < days_of_year, "The nth day cannot be bigger than 365, if leap year 366"
assert n > -days_of_year, "The nth day cannot be smaller than -365, if leap year -366"
day_of_month = datetime.datetime.today().day
day_of_year = int(datetime.datetime.today().strftime("%j")) # %j = Day number of year 001-366
if n > 0 and n == day_of_year or \
n < 0 and days_of_year-n == day_of_year:
return _execute(job_func, args, kwargs)
else:
return # wrong day
def nth_month_yearly(n, job_func, *args, **kwargs):
"""
addition to schedule.every().day.do() or
schedule.every().day.at(time).do()
with this function, its possible to define
the month, where the function works
example:
schedule.every().monday.do(nth_month_yearly(1, job_func)) # do job_func() on every monday of the month=1 (january)
schedule.every().day.do(nth_month_yearly(-1, job_func)) # do job_func() on every day of the month=12 (december)
:param n: number of day, can be 1 to 28 or
-1 to -28
up to 28, because february, the shortest month, has 28 days
:param job_func: function
:param args: list of positional arguments
:param kwargs: dict of keyworded arguments
:return: result of job_func(*args, **kwargs)
"""
assert n != 0, "The nth month cannot be 0 (Zero)"
assert n < 12, "The nth month cannot be bigger than 12"
assert n > -12, "The nth month cannot be smaller than -12"
month = datetime.datetime.today().month
if n > 0 and n == month or \
n < 0 and 13-n == month:
return _execute(job_func, args, kwargs)
else:
return # wrong day
def nth_day_monthly(n, job_func, *args, **kwargs):
"""
addition to schedule.every().day.do() or
schedule.every().day.at(time).do()
with this function, its possible to define
the day of the month, where the function works
example:
schedule.every().day.do(nth_day_monthly(1, job_func)) # do job_func() on first day of the month
schedule.every().day.do(nth_day_monthly(-1, job_func)) # do job_func() on last day of the month
:param n: number of day, can be 1 to 28 or
-1 to -28
up to 28, because february, the shortest month, has 28 days
:param job_func: function
:param args: list of positional arguments
:param kwargs: dict of keyworded arguments
:return: result of job_func(*args, **kwargs)
"""
_, num_days_of_month = calendar.monthrange(datetime.datetime.today().year,
datetime.datetime.today().month)
assert n != 0, "The nth day cannot be 0 (Zero)"
assert n < 28, "The nth day cannot be bigger than 28"
assert n > -28, "The nth day cannot be smaller than -28"
day_of_month = datetime.datetime.today().day
if n > 0 and day_of_month == n or \
n < 0 and day_of_month+1 == num_days_of_month - n:
return _execute(job_func, args, kwargs)
else:
return # wrong day
def nth_week_monthly(n, job_func, *args, **kwargs):
"""
addition to schedule.every().weekday.do() or
schedule.every().day.at(time).do()
with this function, its possible to define
the number of the week, where the function works
example:
schedule.every().monday.do(nth_week_monthly(1, job_func)) # do job_func() on first monday of the month
schedule.every().sunday.do(nth_week_monthly(-1, job_func)) # do job_func() on last sunday of the month
:param n: number of week, can be 1 to 4 or
-1 to -4
:param job_func: function
:param args: list of positional arguments
:param kwargs: dict of keyworded arguments
:return: result of job_func(*args, **kwargs)
"""
assert n != 0, "The nth week cannot be 0 (Zero)"
assert n < 4, "The nth week cannot be bigger than 4"
assert n > -4, "The nth week cannot be smaller than -4"
day_of_month = datetime.datetime.today().day
if n > 0:
week_n = lambda n: 7 * n
if week_n(n - 1) < day_of_month <= week_n(n):
return _execute(job_func, args, kwargs)
else:
return # wrong week
elif n < 0:
_, num_days_of_month = calendar.monthrange(datetime.datetime.today().year,
datetime.datetime.today().month)
reverse_week_n = lambda n: num_days_of_month + (n * 7)
"""
reverse week subtracts n weeks from the numbers of days of a month
reverse_week_n(0) == num_days_of_month (31, for example)
reverse_week_n(-1) == num_days_of_month - 7
reverse_week_n(-2) == num_days_of_month - 14
"""
if reverse_week_n(n) < day_of_month <= reverse_week_n(n + 1):
return _execute(job_func, args, kwargs)
else:
return # wrong week
def nth_year_ever(n, job_func, *args, **kwargs):
if datetime.datetime.today().year == n:
return _execute(job_func, args, kwargs)
def _execute(job_func, args, kwargs):
s_job_func = functools.partial(job_func, *args, **kwargs)
try:
functools.update_wrapper(s_job_func, job_func)
except AttributeError:
# job_funcs already wrapped by functools.partial won't have
# __name__, __module__ or __doc__ and the update_wrapper()
# call will fail.
pass
return s_job_func()
| 0.603581 | 0.632304 |
from multiprocessing import Process, Queue as PQueue, Value
from schedule_jobs.core.helper import underline2hump
from pprint import pprint
from typing import Union
import schedule
import logging
import time
import os
class App(object):
"""
计划任务
:ivar _logger
:ivar _worker_num
:ivar _jobs_module
"""
logger: Union[Value, None] = None
jobs_module: Union[Value, None] = None
def __init__(self, log_level: int = logging.DEBUG, log_file_path: Union[None, str] = None, worker_num: int = 4,
jobs_module: str = 'jobs', app_name: Union[None, str] = None):
handlers = []
console = logging.StreamHandler()
console.setLevel(log_level)
handlers.append(console)
if log_file_path is not None:
file_handler = logging.FileHandler(log_file_path)
file_handler.setLevel(log_level)
handlers.append(file_handler)
logging.basicConfig(level=log_level, handlers=handlers)
self._logger = logging.getLogger(app_name if app_name is not None else __name__)
self._worker_num = worker_num
self._jobs_module = jobs_module
def _worker_main(worker_no: int, jobs_module_name: str, queue: PQueue, logger: logging.Logger):
"""
:param jobs_module_name:
:param queue:
:param logger:
:return:
"""
print('worker no:', worker_no, ' starting...')
jobs_module = __import__(jobs_module_name)
module_path = getattr(jobs_module, '__path__')[0]
module_names = [v.replace('.py', '') for v in os.listdir(module_path) if 'job.py' in v]
jobs_map = {}
for v in module_names:
try:
job_module = __import__(jobs_module_name + '.' + v)
cls_name = underline2hump(v)
jobs_map[v] = (cls_name, job_module)
except (ImportError, AttributeError) as e:
print(e.__repr__())
except Exception as e:
print(e.__repr__())
while 1:
job_name = queue.get()
job = getattr(getattr(jobs_map[job_name][1], job_name), jobs_map[job_name][0])
job_obj = job(logger)
job_obj.run(worker_no)
def schedule(self, daemon: bool = False):
"""
计划
:return:
"""
jobs_module = __import__(self._jobs_module)
module_path = getattr(jobs_module, '__path__')[0]
module_names = [v.replace('.py', '') for v in os.listdir(module_path) if 'job.py' in v]
pprint(module_names)
job_queue = PQueue(20)
print("daemon: %d" % daemon)
for v in module_names:
try:
job_module = __import__(self._jobs_module + '.' + v)
cls_name = underline2hump(v)
job = getattr(getattr(job_module, v), cls_name)
job_obj = job(self._logger)
schedule_obj = job_obj.schedule()
if schedule_obj is not None:
schedule_obj(job_queue.put, v)
except (ImportError, AttributeError) as e:
print(e.__repr__())
except Exception as e:
print(e.__repr__())
workers = []
for i in range(0, self._worker_num):
workers.append(Process(target=__class__._worker_main, daemon=daemon,
kwargs={'worker_no': i, 'jobs_module_name': self._jobs_module,
'queue': job_queue,
'logger': self._logger}))
workers[i].start()
while 1:
schedule.run_pending()
time.sleep(1)
def run(self, name: str):
"""
执行单个job
:return:
"""
module_name = name + '_job'
cls_name = underline2hump(module_name)
try:
job_module = __import__(self._jobs_module + '.' + module_name)
job = getattr(getattr(job_module, module_name), cls_name)
job(self._logger).run()
except (ImportError, AttributeError) as e:
print(e.__repr__())
def test(self, name: str):
"""
测试job是否正常
:param name:
:return:
"""
module_name = name + '_job'
cls_name = underline2hump(module_name)
try:
job_module = __import__(self._jobs_module + '.' + module_name)
job = getattr(getattr(job_module, module_name), cls_name)
if callable(job(self._logger).run):
self._logger.info('job: ' + name + ' success')
else:
self._logger.error('job: ' + name + ' failed, msg: 请确认是否实现了 run 方法')
except (ImportError, AttributeError) as e:
self._logger.error('job: ' + name + ' failed, msg: jobs.' + module_name + ' 不存在 或 run 方法不存在')
except Exception as e:
self._logger.error('job: ' + name + ' failed, msg: ' + e.__repr__())
|
schedule-jobs
|
/schedule_jobs-0.0.4-py3-none-any.whl/schedule_jobs/app.py
|
app.py
|
from multiprocessing import Process, Queue as PQueue, Value
from schedule_jobs.core.helper import underline2hump
from pprint import pprint
from typing import Union
import schedule
import logging
import time
import os
class App(object):
"""
计划任务
:ivar _logger
:ivar _worker_num
:ivar _jobs_module
"""
logger: Union[Value, None] = None
jobs_module: Union[Value, None] = None
def __init__(self, log_level: int = logging.DEBUG, log_file_path: Union[None, str] = None, worker_num: int = 4,
jobs_module: str = 'jobs', app_name: Union[None, str] = None):
handlers = []
console = logging.StreamHandler()
console.setLevel(log_level)
handlers.append(console)
if log_file_path is not None:
file_handler = logging.FileHandler(log_file_path)
file_handler.setLevel(log_level)
handlers.append(file_handler)
logging.basicConfig(level=log_level, handlers=handlers)
self._logger = logging.getLogger(app_name if app_name is not None else __name__)
self._worker_num = worker_num
self._jobs_module = jobs_module
def _worker_main(worker_no: int, jobs_module_name: str, queue: PQueue, logger: logging.Logger):
"""
:param jobs_module_name:
:param queue:
:param logger:
:return:
"""
print('worker no:', worker_no, ' starting...')
jobs_module = __import__(jobs_module_name)
module_path = getattr(jobs_module, '__path__')[0]
module_names = [v.replace('.py', '') for v in os.listdir(module_path) if 'job.py' in v]
jobs_map = {}
for v in module_names:
try:
job_module = __import__(jobs_module_name + '.' + v)
cls_name = underline2hump(v)
jobs_map[v] = (cls_name, job_module)
except (ImportError, AttributeError) as e:
print(e.__repr__())
except Exception as e:
print(e.__repr__())
while 1:
job_name = queue.get()
job = getattr(getattr(jobs_map[job_name][1], job_name), jobs_map[job_name][0])
job_obj = job(logger)
job_obj.run(worker_no)
def schedule(self, daemon: bool = False):
"""
计划
:return:
"""
jobs_module = __import__(self._jobs_module)
module_path = getattr(jobs_module, '__path__')[0]
module_names = [v.replace('.py', '') for v in os.listdir(module_path) if 'job.py' in v]
pprint(module_names)
job_queue = PQueue(20)
print("daemon: %d" % daemon)
for v in module_names:
try:
job_module = __import__(self._jobs_module + '.' + v)
cls_name = underline2hump(v)
job = getattr(getattr(job_module, v), cls_name)
job_obj = job(self._logger)
schedule_obj = job_obj.schedule()
if schedule_obj is not None:
schedule_obj(job_queue.put, v)
except (ImportError, AttributeError) as e:
print(e.__repr__())
except Exception as e:
print(e.__repr__())
workers = []
for i in range(0, self._worker_num):
workers.append(Process(target=__class__._worker_main, daemon=daemon,
kwargs={'worker_no': i, 'jobs_module_name': self._jobs_module,
'queue': job_queue,
'logger': self._logger}))
workers[i].start()
while 1:
schedule.run_pending()
time.sleep(1)
def run(self, name: str):
"""
执行单个job
:return:
"""
module_name = name + '_job'
cls_name = underline2hump(module_name)
try:
job_module = __import__(self._jobs_module + '.' + module_name)
job = getattr(getattr(job_module, module_name), cls_name)
job(self._logger).run()
except (ImportError, AttributeError) as e:
print(e.__repr__())
def test(self, name: str):
"""
测试job是否正常
:param name:
:return:
"""
module_name = name + '_job'
cls_name = underline2hump(module_name)
try:
job_module = __import__(self._jobs_module + '.' + module_name)
job = getattr(getattr(job_module, module_name), cls_name)
if callable(job(self._logger).run):
self._logger.info('job: ' + name + ' success')
else:
self._logger.error('job: ' + name + ' failed, msg: 请确认是否实现了 run 方法')
except (ImportError, AttributeError) as e:
self._logger.error('job: ' + name + ' failed, msg: jobs.' + module_name + ' 不存在 或 run 方法不存在')
except Exception as e:
self._logger.error('job: ' + name + ' failed, msg: ' + e.__repr__())
| 0.501221 | 0.073065 |
Usage
~~~~~
::
$ mkdir path && cd path
$ virtualenv venv --no-site-packages
$ . venv/bin/activate
$ mkdir jobs
$ touch jobs/__init__.py
``jobs/date_job.py``
.. code:: python
from schedule_jobs.core.base_job import BaseJob
import datetime
class DateJob(BaseJob):
def run(self, *args, **kwargs):
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
``main.py``
.. code:: python
from schedule_jobs.app import App
def main():
print("main")
app = App()
app.schedule()
if __name__ == "__main__":
main()
dir
^^^
::
path
path/jobs/__init__.py
path/jobs/date_job.py
path/main.py
finally
^^^^^^^
::
$ python main.py
|
schedule-jobs
|
/schedule_jobs-0.0.4-py3-none-any.whl/schedule_jobs-0.0.4.dist-info/DESCRIPTION.rst
|
DESCRIPTION.rst
|
Usage
~~~~~
::
$ mkdir path && cd path
$ virtualenv venv --no-site-packages
$ . venv/bin/activate
$ mkdir jobs
$ touch jobs/__init__.py
``jobs/date_job.py``
.. code:: python
from schedule_jobs.core.base_job import BaseJob
import datetime
class DateJob(BaseJob):
def run(self, *args, **kwargs):
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
``main.py``
.. code:: python
from schedule_jobs.app import App
def main():
print("main")
app = App()
app.schedule()
if __name__ == "__main__":
main()
dir
^^^
::
path
path/jobs/__init__.py
path/jobs/date_job.py
path/main.py
finally
^^^^^^^
::
$ python main.py
| 0.242923 | 0.051893 |
import threading
import uuid
import re
import time
from datetime import datetime, timedelta
import math
from .exceptions import TaskNameDuplicateError
from .exceptions import TaskNotFoundError
from .exceptions import TimeFormatError
from .exceptions import OperationFailError
class ScheduleManager:
"""Task schedule manager."""
def __init__(self):
self._tasks = dict()
def __del__(self):
"""Destructor"""
# Make sure all tasks are not running.
self.running_tasks.stop()
def __contains__(self, name):
"""Returns True if task name is registered."""
return name in self._tasks
def __iter__(self):
"""Iterate over tasks name."""
return iter(self._tasks)
def __repr__(self):
return ("ScheduleManager<("
"Tasks: {c}, Running: {r}, Pending: {p}"
")>").format(c=self.count,
r=self.running_tasks.count,
p=self.pending_tasks.count)
@property
def count(self):
"""int: Number of tasks registered in the schedule manager."""
return len(self._tasks)
@property
def all_tasks(self):
"""TaskGroup: Get all tasks."""
return TaskGroup(list(self._tasks.values()))
@property
def running_tasks(self):
"""TaskGroup: Get all running tasks."""
task_list = list()
for name in self._tasks:
if self._tasks[name].is_running:
task_list.append(self._tasks[name])
return TaskGroup(task_list)
@property
def pending_tasks(self):
"""TaskGroup: Get all pending tasks."""
task_list = list()
for name in self._tasks:
if not self._tasks[name].is_running:
task_list.append(self._tasks[name])
return TaskGroup(task_list)
def task(self, name):
"""Get task registerd in schedule manager by name.
Args:
name (str): Task name.
Returns:
Task: Task instance.
Raises:
TaskNotFoundError: Task is not registered in schedule manager.
"""
if name not in self._tasks:
raise TaskNotFoundError
return self._tasks[name]
def _task_list(self, tag):
task_list = list()
if isinstance(tag, list):
for tag_ in tag:
for name in self._tasks:
if tag_ in self._tasks[name].tag:
if self._tasks[name] not in task_list:
task_list.append(self._tasks[name])
else:
for name in self._tasks:
if tag in self._tasks[name].tag:
task_list.append(self._tasks[name])
return task_list
def tasks(self, tag):
"""Get tasks registerd in schedule manager by name.
Args:
tag (Union[obj, list]): Tag or tag list.
Returns:
TaskGroup: TaskGroup instance.
"""
task_list = self._task_list(tag)
return TaskGroup(task_list)
def register(self, task):
"""Register a task.
Args:
task (Task): Task.
Returns:
Task: Registered task instance.
Raises:
TaskNameDuplicateError: Duplicate task name.
"""
if task.name in self._tasks:
raise TaskNameDuplicateError
self._tasks[task.name] = task
task.manager = self
return task
def register_task(self, job, name=None, args=(), kwargs=None,
ignore_skipped=True, daemon=True):
"""Create and register a task.
Args:
job (callable): Job to be scheduled.
name (str): Task name.
By default, a unique name is constructed.
args (tuple): Argument tuple for the job invocation.
Defaults to ().
kwargs (dict): Dictionary of keyword arguments for the job
invocation.
Defaults to {}.
ignore_skipped (bool): Set True to ignore skipped job if time
spent on job is longer than the task cycle time.
Defaults to True.
daemon (bool): Set True to use as a daemon task.
Defaults to True.
Returns:
Task: Registered task instance.
Raises:
TaskNameDuplicateError: Duplicate task name.
"""
if name is None:
name = "Task-{}".format(uuid.uuid4().hex)
while name in self._tasks:
name = "Task-{}".format(uuid.uuid4().hex)
elif name in self._tasks:
raise TaskNameDuplicateError
task = Task(name=name, job=job, args=args, kwargs=kwargs)
self._tasks[name] = task
task.manager = self
return task
def unregister(self, name=None, tag=None):
"""Unregister the task.
Args:
name (str): Unregister task by name.
tag (Union[obj, list]): Unregister tasks by tag or by
a list of tags.
"""
if name:
if name in self._tasks:
task = self._tasks[name]
del self._tasks[name]
task.manager = None
if tag:
task_list = self._task_list(tag)
for task in task_list:
del self._tasks[task.name]
task.manager = None
class Task(threading.Thread):
"""Thread-based Task.
Task will be considered as periodic task by default.
:class:`Task` is able to registered in :class:`ScheduleManager` or run
directly.
Args:
job (callable): Job to be scheduled as a task.
name (str): Task name.
By default, a unique name is constructed.
args (tuple): Argument tuple for the job invocation.
Defaults to ().
kwargs (dict): Dictionary of keyword arguments for the job
invocation.
Defaults to {}.
ignore_skipped (bool): Set True to ignore skipped job if time
spent on job is longer than the task cycle time.
Defaults to True.
daemon (bool): Set True to use as a daemon task.
Defaults to True.
Attributes:
name (str): Task name.
daemon (bool): A boolean value indicating whether this task is based
on a daemon thread.
See for `threading.Thread.daemon <https://docs.python.org/3/library/threading.html#threading.Thread.daemon>`_ more detail.
"""
def __init__(self, job, name=None, args=(), kwargs=None,
ignore_skipped=True, daemon=True):
self.CHECK_INTERVAL = 1
# Flag (start task): Set to True is start() is called.
self._start = False
# Flag (stop task): Used to stop current task
self._stop_task = False
# Flag (pause task):
# Used re-registercurrent task because threads can only
# be started once
self._pause_task = False
self._manager = None
self._tag = list() # Tag list
self._ignore_skipped = ignore_skipped # Ignore skipped job activity.
self._next_run = None # datetime when the job run at next time
self._delay = None # Task delay time
self._start_at = None # Task start time
self._is_periodic = True # A periodic task or a non-periodic task.
self._nonperiod_count = 0 # Count used for non-periodic task.
self._periodic_unit = None
self._periodic = None
self._at_time = None
self._at_week_day = None
self._at_day = None
if name is None:
name = "Task-{}".format(uuid.uuid4().hex)
super().__init__(target=job,
name=name,
args=args,
kwargs=kwargs,
daemon=daemon)
def __repr__(self):
status = "initial"
if self._start:
status = "started"
if self._stop_task:
status = "stopping"
if self._is_stopped:
status = "stopped"
if self._daemonic:
status += " daemon"
if self._ident is not None:
status += " %s" % self._ident
d_format = "%y-%m-%d %H:%M:%S"
if self._next_run:
time_next_run = self._next_run.strftime(d_format)
else:
if self._start and self._start_at:
time_next_run = "Start At {}".format((self
._start_at
.strftime(d_format)))
else:
time_next_run = None
return "Task<({}, {}, {})>".format(self._name, status, time_next_run)
@property
def next_run(self):
"""datetime: Datetime when the job run at next time."""
returns = self._next_run
if self._start:
if not returns and self._start_at:
returns = self._start_at
return returns
@property
def is_running(self):
"""bool: Return True if the task is running."""
return self._start
@property
def manager(self):
"""ScheduleManager: Schedule manager which manages current task."""
return self._manager
@manager.setter
def manager(self, manager):
"""Register task into schedule manager.
Use ScheduleManager.register(Task) instead of using
Task.set_manager(manager).
Args:
manager (ScheduleManager): ScheduleManager instance.
"""
if not manager:
if self._manager is None:
raise OperationFailError("Use ScheduleManager.register(Task)"
" instead.")
if self.name in self._manager:
raise OperationFailError("Use ScheduleManager.register(Task)"
" instead.")
self._manager = None
return
if self.name not in manager:
raise OperationFailError("Use ScheduleManager.register(Task)"
" instead.")
if self is not manager.task(self.name):
raise OperationFailError("Use ScheduleManager.register(Task)"
" instead.")
self._manager = manager
@property
def tag(self):
"""list: Tag list of the task."""
return self._tag
def add_tag(self, tag):
"""Add tag to task.
Args:
tag (obj): Tag.
Returns:
Task: Invoked task instance.
"""
if tag not in self._tag:
self._tag.append(tag)
return self
def add_tags(self, tags):
"""Add a list of tags to task.
Args:
tags (iterable): Tag list.
Returns:
Task: Invoked task instance.
"""
for tag in tags:
self.add_tag(tag)
return self
def remove_tag(self, tag):
"""Remove tag from task.
Args:
tag (obj): Tag.
Returns:
Task: Invoked task instance.
"""
if tag in self._tag:
self._tag.remove(tag)
return self
def remove_tags(self, tags):
"""Remove a list of tags from task.
Args:
tags (iterable): Tag list.
Returns:
Task: Invoked task instance.
"""
for tag in tags:
self.remove_tag(tag)
return self
def set_tags(self, tags):
"""Set tag list to task.
Replace old tag list.
Args:
tags (iterable): Tag list.
Returns:
Task: Invoked task instance.
"""
self._tag.clear()
for tag in tags:
if tag not in self._tag:
self._tag.append(tag)
return self
def delay(self, interval=None):
"""Delay task start time.
Args:
interval (Union[str, timedelta, int]): Time interval.
A string with format `HH:MM:SS` or :obj:`timedelta` or int in
seconds.
Or set None to cancel task delay time.
Defaults to None.
Returns:
Task: Invoked task instance.
Raises:
TimeFormatError: Invalid time format.
"""
if self._start:
raise OperationFailError("Task is already running.")
if interval is None:
self._delay = None
else:
if isinstance(interval, timedelta):
self._start_at = None # Use delay instead of start time.
self._delay = interval
elif isinstance(interval, int):
self._start_at = None # Use delay instead of start time.
self._delay = timedelta(seconds=interval)
else:
time_pattern = r'^([0-1]?\d|[2][0-3]):[0-5]?\d:[0-5]?\d$'
if re.match(time_pattern, interval):
self._start_at = None # Use delay instead of start time.
tsp = interval.split(":")
self._delay = timedelta(hours=int(tsp[0]),
minutes=int(tsp[1]),
seconds=int(tsp[2]))
else:
raise TimeFormatError
return self
def start_at(self, at_time=None):
"""Set task start time.
Specify a particular time that the job should be start.
Args:
at_time (Union[str, datetime]): Start time.
A string or :obj:`datetime`.
A string can be in one of the following formats:
[`HH:MM:SS`, `mm-dd HH:MM:SS`].
Or set None to cancel task start time.
Defaults to None.
Returns:
Task: Invoked task instance.
Raises:
TimeFormatError: Invalid time format.
"""
if self._start:
raise OperationFailError("Task is already running.")
if at_time is None:
self._start_at = None
else:
if isinstance(at_time, datetime):
self._delay = None # Use start time instead of delay.
self._start_at = at_time
else:
match1 = r'^([0-1]?\d|[2][0-3]):[0-5]?\d:[0-5]?\d$'
match2 = (r'^([0]?\d|[1][0-2])-([0-2]?\d|[3][0-1])'
r' ([0-1]?\d|[2][0-3]):[0-5]?\d:[0-5]?\d$')
if re.match(match1, at_time):
self._delay = None # Use start time instead of delay.
tsp = at_time.split(":")
self._start_at = datetime.now().replace(hour=int(tsp[0]),
minute=int(tsp[1]),
second=int(tsp[2]))
elif re.match(match2, at_time):
self._delay = None # Use start time instead of delay.
dtsp = at_time.split(" ")
dsp = dtsp[0].split("-")
tsp = dtsp[1].split(":")
self._start_at = datetime.now().replace(month=int(dsp[0]),
day=int(dsp[1]),
hour=int(tsp[0]),
minute=int(tsp[1]),
second=int(tsp[2]))
else:
raise TimeFormatError
return self
def nonperiodic(self, count):
"""See as an non-periodic task.
Args:
count (int): Do the job for a certain number of times.
Returns:
Task: Invoked task instance.
"""
if self._start:
raise OperationFailError("Task is already running.")
if count <= 0:
raise OperationFailError("Number of times must be greater than 0.")
self._is_periodic = False
self._nonperiod_count = count
return self
def periodic(self):
"""See as an periodic task.
Returns:
Task: Invoked task instance.
"""
if self._start:
raise OperationFailError("Task is already running.")
self._is_periodic = True
return self
def period(self, interval):
"""Scheduling periodic task.
Args:
interval (Union[str, timedelta, int]): Time interval.
A string with format `HH:MM:SS` or :obj:`timedelta` or int in
seconds.
Returns:
Task: Invoked task instance.
Raises:
TimeFormatError: Invalid time format.
"""
if self._start:
raise OperationFailError("Task is already running.")
self._periodic_unit = "every"
if isinstance(interval, timedelta):
self._periodic = interval
elif isinstance(interval, int):
self._periodic = timedelta(seconds=interval)
else:
if re.match(r'^([0-1]?\d|[2][0-3]):[0-5]?\d:[0-5]?\d$', interval):
tsp = interval.split(":")
self._periodic = timedelta(hours=int(tsp[0]),
minutes=int(tsp[1]),
seconds=int(tsp[2]))
else:
raise TimeFormatError
return self
def period_at(self, unit="day", at_time="00:00:00",
week_day="Monday", day=1):
"""Scheduling periodic task.
Specify a particular time that the job should be run at.
Args:
unit (str): Time unit of the periodic task.
Defaults to `day`.
The following unit is available:
1. `day`: Run job everyday.
2. `week`: Run job every week.
3. `month`: Run job every month.
at_time (str): Time to do the job.
A string with format `HH:MM:SS`.
Defaults to `00:00:00`.
week_day (str): Week to do the job.
Defaults to `Monday`.
This argument will only be used is unit is `week`.
A string should be one of following value:
[`"Monday"`, `"Tuesday"`, `"Wednesday"`, `"Thursday"`,
`"Friday"`, `"Saturday"`, `"Sunday"`]
day (int): Day to do the job.
Defaults to 1.
This argument will only be used is unit is `month`.
Value should be in 1 ~ 31.
Job will be skipped if specific date is not available.
Returns:
Task: Invoked task instance.
Raises:
TimeFormatError: Invalid time format.
"""
if self._start:
raise OperationFailError("Task is already running.")
time_pattern = r'^([0-1]?\d|[2][0-3]):[0-5]?\d:[0-5]?\d$'
week_day_list = {
"Monday": 0,
"Tuesday": 1,
"Wednesday": 2,
"Thursday": 3,
"Friday": 4,
"Saturday": 5,
"Sunday": 6
}
if unit == "day":
self._periodic_unit = unit
if not re.match(time_pattern, at_time):
raise TimeFormatError
tsp = at_time.split(":")
self._at_time = [int(i) for i in tsp]
elif unit == "week":
self._periodic_unit = unit
if not re.match(time_pattern, at_time):
raise TimeFormatError
tsp = at_time.split(":")
self._at_time = [int(i) for i in tsp]
if week_day not in week_day_list:
raise TimeFormatError
self._at_week_day = week_day_list[week_day]
elif unit == "month":
self._periodic_unit = unit
if not re.match(time_pattern, at_time):
raise TimeFormatError
tsp = at_time.split(":")
self._at_time = [int(i) for i in tsp]
if day not in range(1, 32):
raise TimeFormatError
self._at_day = day
else:
raise TimeFormatError
return self
def period_day_at(self, at_time="00:00:00"):
"""Scheduling periodic task.
Specify a particular time that the job should be run at.
Job runs everyday.
Args:
at_time (str): Time to do the job.
A string with format `HH:MM:SS`.
Defaults to `00:00:00`.
week_day (str): Week to do the job.
Returns:
Task: Invoked task instance.
Raises:
TimeFormatError: Invalid time format.
"""
self.period_at(unit="day", at_time=at_time)
return self
def period_week_at(self, at_time="00:00:00", week_day="Monday"):
"""Scheduling periodic task.
Specify a particular time that the job should be run at.
Job runs every week.
Args:
at_time (str): Time to do the job.
A string with format `HH:MM:SS`.
Defaults to `00:00:00`.
week_day (str): Week to do the job.
Defaults to `Monday`.
A string should be one of following value:
[`"Monday"`, `"Tuesday"`, `"Wednesday"`, `"Thursday"`,
`"Friday"`, `"Saturday"`, `"Sunday"`]
Returns:
Task: Invoked task instance.
Raises:
TimeFormatError: Invalid time format.
"""
self.period_at(unit="week", at_time=at_time, week_day=week_day)
return self
def period_month_at(self, at_time="00:00:00", day=1):
"""Scheduling periodic task.
Specify a particular time that the job should be run at.
Job runs every month.
Args:
at_time (str): Time to do the job.
A string with format `HH:MM:SS`.
Defaults to `00:00:00`.
day (int): Day to do the job.
Defaults to 1.
Value should be in 1 ~ 31.
Job will be skipped if specific date is not available.
Returns:
Task: Invoked task instance.
Raises:
TimeFormatError: Invalid time format.
"""
self.period_at(unit="month", at_time=at_time, day=day)
return self
def _set_next_run_init(self):
# First time the job run at.
if self._periodic_unit == "every":
self._next_run = datetime.now()
elif self._periodic_unit == "day":
self._set_next_run_init_day()
elif self._periodic_unit == "week":
self._set_next_run_init_week()
elif self._periodic_unit == "month":
self._set_next_run_init_month()
def _set_next_run_init_day(self):
run_time = datetime.now().replace(hour=self._at_time[0],
minute=self._at_time[1],
second=self._at_time[2])
if run_time < datetime.now():
self._next_run = run_time + timedelta(days=1)
else:
self._next_run = run_time
def _set_next_run_init_week(self):
tmp_runtime = datetime.now().replace(hour=self._at_time[0],
minute=self._at_time[1],
second=self._at_time[2])
now_weekday = tmp_runtime.date().weekday()
if now_weekday < self._at_week_day:
tmp_runtime += timedelta(days=self._at_week_day-now_weekday)
elif now_weekday > self._at_week_day:
tmp_runtime += timedelta(days=7+self._at_week_day-now_weekday)
else:
if tmp_runtime < datetime.now():
tmp_runtime += timedelta(days=7)
self._next_run = tmp_runtime
def _set_next_run_init_month(self):
try:
tmp_runtime = datetime.now().replace(day=self._at_day,
hour=self._at_time[0],
minute=self._at_time[1],
second=self._at_time[2])
if datetime.now().day > self._at_day:
if tmp_runtime.month == 12:
tmp_runtime = tmp_runtime.replace(year=tmp_runtime.year+1,
month=1)
else:
try:
tmp_runtime = tmp_runtime.replace(month=(tmp_runtime
.month)+1)
except ValueError:
# Because day is out of range in next month.
tmp_runtime = tmp_runtime.replace(month=(tmp_runtime
.month)+2)
elif datetime.now().day == self._at_day:
if tmp_runtime < datetime.now():
if tmp_runtime.month == 12:
tmp_runtime = tmp_runtime.replace(year=(tmp_runtime
.year)+1,
month=1)
else:
try:
tmp_runtime = (tmp_runtime
.replace(month=tmp_runtime.month+1))
except ValueError:
# Because day is out of range in next month.
tmp_runtime = (tmp_runtime
.replace(month=tmp_runtime.month+2))
self._next_run = tmp_runtime
except ValueError:
# Because day is out of range in this month.
self._next_run = datetime.now().replace(month=(datetime
.now()
.month)+1,
day=self._at_day,
hour=self._at_time[0],
minute=self._at_time[1],
second=self._at_time[2])
def _set_next_run(self):
if self._periodic_unit == "every":
self._set_next_run_every()
elif self._periodic_unit == "day":
self._set_next_run_day()
elif self._periodic_unit == "week":
self._set_next_run_week()
elif self._periodic_unit == "month":
self._set_next_run_month()
def _set_next_run_every(self):
if self._ignore_skipped:
next_ = self._next_run + self._periodic
if next_ < datetime.now():
rate = (datetime.now() - self._next_run) / self._periodic
next_ = self._next_run + math.ceil(rate) * self._periodic
if next_ == datetime.now():
next_ += self._periodic
self._next_run = next_
else:
self._next_run += self._periodic
def _set_next_run_day(self):
if self._ignore_skipped:
next_ = self._next_run + timedelta(days=1)
if next_ < datetime.now():
# Record current datetime to avoid 23:59:XX situation.
time_now = datetime.now()
next_ = next_.replace(month=time_now.month,
day=time_now.day)
if next_ <= datetime.now():
next_ += timedelta(days=1)
self._next_run = next_
else:
self._next_run += timedelta(days=1)
def _set_next_run_week(self):
if self._ignore_skipped:
next_ = self._next_run + timedelta(days=7)
if next_ < datetime.now():
next_ = datetime.now().replace(hour=self._at_time[0],
minute=self._at_time[1],
second=self._at_time[2])
weekday_ = next_.date().weekday()
if weekday_ < self._at_week_day:
next_ += timedelta(days=self._at_week_day-weekday_)
elif weekday_ > self._at_week_day:
next_ += timedelta(days=7+self._at_week_day-weekday_)
else:
if next_ < datetime.now():
next_ += timedelta(days=7)
if next_ <= datetime.now():
next_ += timedelta(days=7)
self._next_run = next_
else:
self._next_run += timedelta(days=7)
def _set_next_run_month(self):
if self._ignore_skipped:
if self._next_run.month == 12:
next_ = self._next_run.replace(year=self._next_run.year+1,
month=1)
else:
try:
next_ = self._next_run.replace(month=(self
._next_run
.month)+1)
except ValueError:
# Because day is out of range in next month.
next_ = self._next_run.replace(month=(self
._next_run
.month)+2)
if next_ < datetime.now():
try:
next_ = datetime.now().replace(day=self._at_day,
hour=self._at_time[0],
minute=self._at_time[1],
second=self._at_time[2])
if datetime.now().day > self._at_day:
if next_.month == 12:
next_ = next_.replace(year=next_.year+1,
month=1)
else:
try:
next_ = next_.replace(month=next_.month+1)
except ValueError:
# Because day is out of range in next month.
next_ = next_.replace(month=next_.month+2)
elif datetime.now().day == self._at_day:
if next_ < datetime.now():
if next_.month == 12:
next_ = next_.replace(year=next_.year+1,
month=1)
else:
try:
next_ = next_.replace(month=next_.month+1)
except ValueError:
# Because day is out of range in next
# month.
next_ = next_.replace(month=next_.month+2)
except ValueError:
next_ = datetime.now().replace(month=(datetime
.now()
.month)+1,
day=self._at_day,
hour=self._at_time[0],
minute=self._at_time[1],
second=self._at_time[2])
if next_ <= datetime.now():
if next_.month == 12:
next_ = next_.replace(year=next_.year+1,
month=1)
else:
try:
next_ = next_.replace(month=next_.month+1)
except ValueError:
# Because day is out of range in next month.
next_ = next_.replace(month=next_.month+2)
self._next_run = next_
else:
if self._next_run.month == 12:
self._next_run = self._next_run.replace(year=(self
._next_run
.year)+1,
month=1)
else:
try:
month_next = self._next_run.month+1
self._next_run = self._next_run.replace(month=month_next)
except ValueError:
# Because day is out of range in next month.
month_next = self._next_run.month+2
self._next_run = self._next_run.replace(month=month_next)
def _next_run_at(self):
if self._next_run is None:
self._set_next_run_init()
else:
self._set_next_run()
def start(self):
"""Start the Task's activity."""
if not self._periodic_unit:
raise OperationFailError("Please set period first.")
self._start = True
# Set start at by delay time
if self._delay:
self._start_at = datetime.now() + self._delay
super().start()
def stop(self):
"""Stop the Task's activity."""
if not self._start:
raise OperationFailError("Task is not running.")
self._start = False
self._stop_task = True
def pause(self):
"""Pause the Task's activity.
Works only the task is registered into :class:`ScheduleManager`.
"""
if not self._start:
raise OperationFailError("Task is not running.")
if not self._manager:
raise OperationFailError("Register task into "
"ScheduleManager first.")
self._start = False
self._stop_task = True
self._pause_task = True
def _action_after_finish(self):
# Remove task from manager
if self._manager:
# Keep ScheduleManager instance
manager = self._manager
manager.unregister(self.name)
if self._pause_task:
# Thread-based object can only be started once.
# So create new task with same action and register task after
# delete.
# current task to realize pause action.
kwargs = None if self._kwargs == {} else self._kwargs
# New task
new_task = manager.register_task(name=self.name,
job=self._target,
args=self._args,
kwargs=kwargs)
new_task.set_tags(self.tag)
# schedule task
if self._periodic_unit == "every":
new_task.period(self._periodic)
else:
ref_week = {
0: "Monday",
1: "Tuesday",
2: "Wednesday",
3: "Thursday",
4: "Friday",
5: "Saturday",
6: "Sunday",
None: None
}
time_str = "{}:{}:{}".format(str(self._at_time[0]),
str(self._at_time[1]),
str(self._at_time[2]))
new_task.period_at(unit=self._periodic_unit,
at_time=time_str,
week_day=ref_week[self._at_week_day],
day=self._at_day)
if not self._is_periodic:
new_task.nonperiodic(self._nonperiod_count)
if self._delay:
new_task.delay(self._start_at - datetime.now())
elif self._start_at:
if datetime.now() < self._start_at:
new_task.start_at(self._start_at)
def run(self):
"""Representing the Task's activity.
DO NOT CALL DIRECTLY.
"""
if not self._start:
raise OperationFailError("Use Task.start() instead.")
# Modified from :meth:`Thread.run`.
try:
# Delay or start at.
if self._start_at:
while not self._stop_task:
if datetime.now() >= self._start_at:
break
time.sleep(self.CHECK_INTERVAL)
self._next_run_at()
while not self._stop_task:
if datetime.now() >= self._next_run:
self._target(*self._args, **self._kwargs)
self._next_run_at()
if not self._is_periodic:
self._nonperiod_count -= 1
if self._nonperiod_count <= 0:
self._stop_task = True
break
time.sleep(self.CHECK_INTERVAL)
finally:
self._action_after_finish()
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self._target, self._args, self._kwargs
class TaskGroup:
"""Task group.
A set of tasks.
"""
def __init__(self, tasks=None):
"""Constructor
Args:
tasks (iterable): Task list.
"""
if not tasks:
self._tasks = list()
else:
self._tasks = list()
if isinstance(tasks, list):
self._tasks = tasks[:]
else:
for task in tasks:
self._tasks.append(task)
def __repr__(self):
return ("TaskGroup<("
"Tasks: {task_count}"
")>").format(task_count=len(self._tasks))
def __contains__(self, task):
"""Returns True if task is in the group."""
return task in self._tasks
def __iter__(self):
"""Iterate over tasks."""
return iter(self._tasks)
def __add__(self, other):
if isinstance(other, TaskGroup):
task_list = self._tasks + other._tasks
return TaskGroup(task_list)
return NotImplemented
@property
def count(self):
"""int: Number of tasks contained in the group."""
return len(self._tasks)
def set_manager(self, manager=None):
"""Change schedule manager of all tasks.
Task will be unregistered from old manager if it has been registered
in a manager.
Args:
manager (ScheduleManager): A exist schedule manager object.
Set None to create new schedule manager.
Returns:
ScheduleManager: Invoked ScheduleManager instance.
Raises:
TaskNameDuplicateError: There is a duplicate task name.
"""
if not manager:
manager = ScheduleManager()
else:
for task in self._tasks:
if task.name in manager:
error = "Duplicate task name <{}>.".format(task.name)
raise TaskNameDuplicateError(error)
for task in self._tasks:
if task.manager:
task.manager.unregister(name=task.name)
manager.register(task)
return manager
def add_tag(self, tag):
"""Add tag to tasks.
Args:
tag (obj): Tag.
Returns:
TaskGroup: Invoked TaskGroup instance.
"""
for task in self._tasks:
task.add_tag(tag)
return self
def add_tags(self, tags):
"""Add a list of tags to tasks.
Args:
tags (iterable): Tag list.
Returns:
TaskGroup: Invoked TaskGroup instance.
"""
for task in self._tasks:
task.add_tags(tags)
return self
def remove_tag(self, tag):
"""Remove tag from tasks.
Args:
tag (obj): Tag.
Returns:
TaskGroup: Invoked TaskGroup instance.
"""
for task in self._tasks:
task.remove_tag(tag)
return self
def remove_tags(self, tags):
"""Remove a list of tags from tasks.
Args:
tags (iterable): Tag list.
Returns:
TaskGroup: Invoked TaskGroup instance.
"""
for task in self._tasks:
task.remove_tags(tags)
return self
def set_tags(self, tags):
"""Set tag list to tasks.
Replace old tag list.
Args:
tags (iterable): Tag list.
Returns:
TaskGroup: Invoked TaskGroup instance.
"""
for task in self._tasks:
task.set_tags(tags)
return self
def delay(self, interval=None):
"""Delay task start time.
Args:
interval (Union[str, timedelta, int]): Time interval.
A string with format `HH:MM:SS` or :obj:`timedelta` or int in
seconds.
Or set None to cancel task delay time.
Defaults to None.
Returns:
TaskGroup: Invoked TaskGroup instance.
Raises:
TimeFormatError: Invalid time format.
"""
for task in self._tasks:
task.delay(interval)
return self
def start_at(self, at_time):
"""Set task start time.
Specify a particular time that the job should be start.
Args:
at_time (Union[str, datetime]): Start time.
A string or :obj:`datetime`.
A string can be in one of the following formats:
[`HH:MM:SS`, `mm-dd HH:MM:SS`].
Or set None to cancel task start time.
Defaults to None.
Returns:
TaskGroup: Invoked TaskGroup instance.
Raises:
TimeFormatError: Invalid time format.
"""
for task in self._tasks:
task.start_at(at_time)
return self
def nonperiodic(self, count):
"""See as non-periodic tasks.
Args:
count (int): Do the job for a certain number of times.
Returns:
TaskGroup: Invoked TaskGroup instance.
"""
for task in self._tasks:
task.nonperiodic(count)
return self
def periodic(self):
"""See as periodic tasks.
Returns:
TaskGroup: Invoked TaskGroup instance.
"""
for task in self._tasks:
task.periodic()
return self
def period(self, interval):
"""Scheduling periodic tasks.
Args:
interval (Union[str, timedelta, int]): Time interval.
A string with format `HH:MM:SS` or :obj:`timedelta` or int
in seconds.
Returns:
TaskGroup: Invoked TaskGroup instance.
Raises:
TimeFormatError: Invalid time format.
"""
for task in self._tasks:
task.period(interval)
return self
def period_at(self,
unit="day", at_time="00:00:00",
week_day="Monday", day=1):
"""Scheduling periodic tasks.
Specify a particular time that the job should be run at.
Args:
unit (str): Time unit of the periodic task.
Defaults to `day`.
The following unit is available:
1. `day`: Run job everyday.
2. `week`: Run job every week.
3. `month`: Run job every month.
at_time (str): Time to do the job.
A string with format `HH:MM:SS`.
Defaults to `00:00:00`.
week_day (str): Week to do the job.
Defaults to `Monday`.
This argument will only be used is unit is `week`.
A string should be one of following value:
[`"Monday"`, `"Tuesday"`, `"Wednesday"`, `"Thursday"`,
`"Friday"`, `"Saturday"`, `"Sunday"`]
day (int): Day to do the job.
Defaults to 1.
This argument will only be used is unit is `month`.
Value should be in 1 ~ 31.
Job will be skipped if specific date is not available.
Returns:
TaskGroup: Invoked TaskGroup instance.
Raises:
TimeFormatError: Invalid time format.
"""
for task in self._tasks:
task.period_at(unit=unit,
at_time=at_time,
week_day=week_day,
day=day)
return self
def period_day_at(self, at_time="00:00:00"):
"""Scheduling periodic tasks.
Specify a particular time that the job should be run at.
Job runs everyday.
Args:
at_time (str): Time to do the job.
A string with format `HH:MM:SS`.
Defaults to `00:00:00`.
week_day (str): Week to do the job.
Returns:
TaskGroup: Invoked TaskGroup instance.
Raises:
TimeFormatError: Invalid time format.
"""
for task in self._tasks:
task.period_day_at(at_time=at_time)
return self
def period_week_at(self, at_time="00:00:00", week_day="Monday"):
"""Scheduling periodic tasks.
Specify a particular time that the job should be run at.
Job runs every week.
Args:
at_time (str): Time to do the job.
A string with format `HH:MM:SS`.
Defaults to `00:00:00`.
week_day (str): Week to do the job.
Defaults to `Monday`.
A string should be one of following value:
[`"Monday"`, `"Tuesday"`, `"Wednesday"`, `"Thursday"`,
`"Friday"`, `"Saturday"`, `"Sunday"`]
Returns:
TaskGroup: Invoked TaskGroup instance.
Raises:
TimeFormatError: Invalid time format.
"""
for task in self._tasks:
task.period_week_at(at_time=at_time, week_day=week_day)
return self
def period_month_at(self, at_time="00:00:00", day=1):
"""Scheduling periodic tasks.
Specify a particular time that the job should be run at.
Job runs every month.
Args:
at_time (str): Time to do the job.
A string with format `HH:MM:SS`.
Defaults to `00:00:00`.
day (int): Day to do the job.
Defaults to 1.
Value should be in 1 ~ 31.
Job will be skipped if specific date is not available.
Returns:
TaskGroup: Invoked TaskGroup instance.
Raises:
TimeFormatError: Invalid time format.
"""
for task in self._tasks:
task.period_month_at(at_time=at_time, day=day)
return self
def start(self):
"""Start the Tasks' activity."""
for task in self._tasks:
task.start()
def stop(self):
"""Stop the Tasks' activity."""
for task in self._tasks:
task.stop()
def pause(self):
"""Pause the Tasks' activity.
Works only the task is registered into :class:`ScheduleManager`.
"""
new_task_list = list()
for task in self._tasks:
manager = task.manager
task_name = task.name
task.pause()
while task.manager is not None:
time.sleep(1)
if manager:
new_task_list.append(manager.task(task_name))
self._tasks = new_task_list[:]
|
schedule-manager
|
/schedule_manager-0.1.1-py3-none-any.whl/schedule_manager/manager.py
|
manager.py
|
import threading
import uuid
import re
import time
from datetime import datetime, timedelta
import math
from .exceptions import TaskNameDuplicateError
from .exceptions import TaskNotFoundError
from .exceptions import TimeFormatError
from .exceptions import OperationFailError
class ScheduleManager:
"""Task schedule manager."""
def __init__(self):
self._tasks = dict()
def __del__(self):
"""Destructor"""
# Make sure all tasks are not running.
self.running_tasks.stop()
def __contains__(self, name):
"""Returns True if task name is registered."""
return name in self._tasks
def __iter__(self):
"""Iterate over tasks name."""
return iter(self._tasks)
def __repr__(self):
return ("ScheduleManager<("
"Tasks: {c}, Running: {r}, Pending: {p}"
")>").format(c=self.count,
r=self.running_tasks.count,
p=self.pending_tasks.count)
@property
def count(self):
"""int: Number of tasks registered in the schedule manager."""
return len(self._tasks)
@property
def all_tasks(self):
"""TaskGroup: Get all tasks."""
return TaskGroup(list(self._tasks.values()))
@property
def running_tasks(self):
"""TaskGroup: Get all running tasks."""
task_list = list()
for name in self._tasks:
if self._tasks[name].is_running:
task_list.append(self._tasks[name])
return TaskGroup(task_list)
@property
def pending_tasks(self):
"""TaskGroup: Get all pending tasks."""
task_list = list()
for name in self._tasks:
if not self._tasks[name].is_running:
task_list.append(self._tasks[name])
return TaskGroup(task_list)
def task(self, name):
"""Get task registerd in schedule manager by name.
Args:
name (str): Task name.
Returns:
Task: Task instance.
Raises:
TaskNotFoundError: Task is not registered in schedule manager.
"""
if name not in self._tasks:
raise TaskNotFoundError
return self._tasks[name]
def _task_list(self, tag):
task_list = list()
if isinstance(tag, list):
for tag_ in tag:
for name in self._tasks:
if tag_ in self._tasks[name].tag:
if self._tasks[name] not in task_list:
task_list.append(self._tasks[name])
else:
for name in self._tasks:
if tag in self._tasks[name].tag:
task_list.append(self._tasks[name])
return task_list
def tasks(self, tag):
"""Get tasks registerd in schedule manager by name.
Args:
tag (Union[obj, list]): Tag or tag list.
Returns:
TaskGroup: TaskGroup instance.
"""
task_list = self._task_list(tag)
return TaskGroup(task_list)
def register(self, task):
"""Register a task.
Args:
task (Task): Task.
Returns:
Task: Registered task instance.
Raises:
TaskNameDuplicateError: Duplicate task name.
"""
if task.name in self._tasks:
raise TaskNameDuplicateError
self._tasks[task.name] = task
task.manager = self
return task
def register_task(self, job, name=None, args=(), kwargs=None,
ignore_skipped=True, daemon=True):
"""Create and register a task.
Args:
job (callable): Job to be scheduled.
name (str): Task name.
By default, a unique name is constructed.
args (tuple): Argument tuple for the job invocation.
Defaults to ().
kwargs (dict): Dictionary of keyword arguments for the job
invocation.
Defaults to {}.
ignore_skipped (bool): Set True to ignore skipped job if time
spent on job is longer than the task cycle time.
Defaults to True.
daemon (bool): Set True to use as a daemon task.
Defaults to True.
Returns:
Task: Registered task instance.
Raises:
TaskNameDuplicateError: Duplicate task name.
"""
if name is None:
name = "Task-{}".format(uuid.uuid4().hex)
while name in self._tasks:
name = "Task-{}".format(uuid.uuid4().hex)
elif name in self._tasks:
raise TaskNameDuplicateError
task = Task(name=name, job=job, args=args, kwargs=kwargs)
self._tasks[name] = task
task.manager = self
return task
def unregister(self, name=None, tag=None):
"""Unregister the task.
Args:
name (str): Unregister task by name.
tag (Union[obj, list]): Unregister tasks by tag or by
a list of tags.
"""
if name:
if name in self._tasks:
task = self._tasks[name]
del self._tasks[name]
task.manager = None
if tag:
task_list = self._task_list(tag)
for task in task_list:
del self._tasks[task.name]
task.manager = None
class Task(threading.Thread):
"""Thread-based Task.
Task will be considered as periodic task by default.
:class:`Task` is able to registered in :class:`ScheduleManager` or run
directly.
Args:
job (callable): Job to be scheduled as a task.
name (str): Task name.
By default, a unique name is constructed.
args (tuple): Argument tuple for the job invocation.
Defaults to ().
kwargs (dict): Dictionary of keyword arguments for the job
invocation.
Defaults to {}.
ignore_skipped (bool): Set True to ignore skipped job if time
spent on job is longer than the task cycle time.
Defaults to True.
daemon (bool): Set True to use as a daemon task.
Defaults to True.
Attributes:
name (str): Task name.
daemon (bool): A boolean value indicating whether this task is based
on a daemon thread.
See for `threading.Thread.daemon <https://docs.python.org/3/library/threading.html#threading.Thread.daemon>`_ more detail.
"""
def __init__(self, job, name=None, args=(), kwargs=None,
ignore_skipped=True, daemon=True):
self.CHECK_INTERVAL = 1
# Flag (start task): Set to True is start() is called.
self._start = False
# Flag (stop task): Used to stop current task
self._stop_task = False
# Flag (pause task):
# Used re-registercurrent task because threads can only
# be started once
self._pause_task = False
self._manager = None
self._tag = list() # Tag list
self._ignore_skipped = ignore_skipped # Ignore skipped job activity.
self._next_run = None # datetime when the job run at next time
self._delay = None # Task delay time
self._start_at = None # Task start time
self._is_periodic = True # A periodic task or a non-periodic task.
self._nonperiod_count = 0 # Count used for non-periodic task.
self._periodic_unit = None
self._periodic = None
self._at_time = None
self._at_week_day = None
self._at_day = None
if name is None:
name = "Task-{}".format(uuid.uuid4().hex)
super().__init__(target=job,
name=name,
args=args,
kwargs=kwargs,
daemon=daemon)
def __repr__(self):
status = "initial"
if self._start:
status = "started"
if self._stop_task:
status = "stopping"
if self._is_stopped:
status = "stopped"
if self._daemonic:
status += " daemon"
if self._ident is not None:
status += " %s" % self._ident
d_format = "%y-%m-%d %H:%M:%S"
if self._next_run:
time_next_run = self._next_run.strftime(d_format)
else:
if self._start and self._start_at:
time_next_run = "Start At {}".format((self
._start_at
.strftime(d_format)))
else:
time_next_run = None
return "Task<({}, {}, {})>".format(self._name, status, time_next_run)
@property
def next_run(self):
"""datetime: Datetime when the job run at next time."""
returns = self._next_run
if self._start:
if not returns and self._start_at:
returns = self._start_at
return returns
@property
def is_running(self):
"""bool: Return True if the task is running."""
return self._start
@property
def manager(self):
"""ScheduleManager: Schedule manager which manages current task."""
return self._manager
@manager.setter
def manager(self, manager):
"""Register task into schedule manager.
Use ScheduleManager.register(Task) instead of using
Task.set_manager(manager).
Args:
manager (ScheduleManager): ScheduleManager instance.
"""
if not manager:
if self._manager is None:
raise OperationFailError("Use ScheduleManager.register(Task)"
" instead.")
if self.name in self._manager:
raise OperationFailError("Use ScheduleManager.register(Task)"
" instead.")
self._manager = None
return
if self.name not in manager:
raise OperationFailError("Use ScheduleManager.register(Task)"
" instead.")
if self is not manager.task(self.name):
raise OperationFailError("Use ScheduleManager.register(Task)"
" instead.")
self._manager = manager
@property
def tag(self):
"""list: Tag list of the task."""
return self._tag
def add_tag(self, tag):
"""Add tag to task.
Args:
tag (obj): Tag.
Returns:
Task: Invoked task instance.
"""
if tag not in self._tag:
self._tag.append(tag)
return self
def add_tags(self, tags):
"""Add a list of tags to task.
Args:
tags (iterable): Tag list.
Returns:
Task: Invoked task instance.
"""
for tag in tags:
self.add_tag(tag)
return self
def remove_tag(self, tag):
"""Remove tag from task.
Args:
tag (obj): Tag.
Returns:
Task: Invoked task instance.
"""
if tag in self._tag:
self._tag.remove(tag)
return self
def remove_tags(self, tags):
"""Remove a list of tags from task.
Args:
tags (iterable): Tag list.
Returns:
Task: Invoked task instance.
"""
for tag in tags:
self.remove_tag(tag)
return self
def set_tags(self, tags):
"""Set tag list to task.
Replace old tag list.
Args:
tags (iterable): Tag list.
Returns:
Task: Invoked task instance.
"""
self._tag.clear()
for tag in tags:
if tag not in self._tag:
self._tag.append(tag)
return self
def delay(self, interval=None):
"""Delay task start time.
Args:
interval (Union[str, timedelta, int]): Time interval.
A string with format `HH:MM:SS` or :obj:`timedelta` or int in
seconds.
Or set None to cancel task delay time.
Defaults to None.
Returns:
Task: Invoked task instance.
Raises:
TimeFormatError: Invalid time format.
"""
if self._start:
raise OperationFailError("Task is already running.")
if interval is None:
self._delay = None
else:
if isinstance(interval, timedelta):
self._start_at = None # Use delay instead of start time.
self._delay = interval
elif isinstance(interval, int):
self._start_at = None # Use delay instead of start time.
self._delay = timedelta(seconds=interval)
else:
time_pattern = r'^([0-1]?\d|[2][0-3]):[0-5]?\d:[0-5]?\d$'
if re.match(time_pattern, interval):
self._start_at = None # Use delay instead of start time.
tsp = interval.split(":")
self._delay = timedelta(hours=int(tsp[0]),
minutes=int(tsp[1]),
seconds=int(tsp[2]))
else:
raise TimeFormatError
return self
def start_at(self, at_time=None):
"""Set task start time.
Specify a particular time that the job should be start.
Args:
at_time (Union[str, datetime]): Start time.
A string or :obj:`datetime`.
A string can be in one of the following formats:
[`HH:MM:SS`, `mm-dd HH:MM:SS`].
Or set None to cancel task start time.
Defaults to None.
Returns:
Task: Invoked task instance.
Raises:
TimeFormatError: Invalid time format.
"""
if self._start:
raise OperationFailError("Task is already running.")
if at_time is None:
self._start_at = None
else:
if isinstance(at_time, datetime):
self._delay = None # Use start time instead of delay.
self._start_at = at_time
else:
match1 = r'^([0-1]?\d|[2][0-3]):[0-5]?\d:[0-5]?\d$'
match2 = (r'^([0]?\d|[1][0-2])-([0-2]?\d|[3][0-1])'
r' ([0-1]?\d|[2][0-3]):[0-5]?\d:[0-5]?\d$')
if re.match(match1, at_time):
self._delay = None # Use start time instead of delay.
tsp = at_time.split(":")
self._start_at = datetime.now().replace(hour=int(tsp[0]),
minute=int(tsp[1]),
second=int(tsp[2]))
elif re.match(match2, at_time):
self._delay = None # Use start time instead of delay.
dtsp = at_time.split(" ")
dsp = dtsp[0].split("-")
tsp = dtsp[1].split(":")
self._start_at = datetime.now().replace(month=int(dsp[0]),
day=int(dsp[1]),
hour=int(tsp[0]),
minute=int(tsp[1]),
second=int(tsp[2]))
else:
raise TimeFormatError
return self
def nonperiodic(self, count):
"""See as an non-periodic task.
Args:
count (int): Do the job for a certain number of times.
Returns:
Task: Invoked task instance.
"""
if self._start:
raise OperationFailError("Task is already running.")
if count <= 0:
raise OperationFailError("Number of times must be greater than 0.")
self._is_periodic = False
self._nonperiod_count = count
return self
def periodic(self):
"""See as an periodic task.
Returns:
Task: Invoked task instance.
"""
if self._start:
raise OperationFailError("Task is already running.")
self._is_periodic = True
return self
def period(self, interval):
"""Scheduling periodic task.
Args:
interval (Union[str, timedelta, int]): Time interval.
A string with format `HH:MM:SS` or :obj:`timedelta` or int in
seconds.
Returns:
Task: Invoked task instance.
Raises:
TimeFormatError: Invalid time format.
"""
if self._start:
raise OperationFailError("Task is already running.")
self._periodic_unit = "every"
if isinstance(interval, timedelta):
self._periodic = interval
elif isinstance(interval, int):
self._periodic = timedelta(seconds=interval)
else:
if re.match(r'^([0-1]?\d|[2][0-3]):[0-5]?\d:[0-5]?\d$', interval):
tsp = interval.split(":")
self._periodic = timedelta(hours=int(tsp[0]),
minutes=int(tsp[1]),
seconds=int(tsp[2]))
else:
raise TimeFormatError
return self
def period_at(self, unit="day", at_time="00:00:00",
week_day="Monday", day=1):
"""Scheduling periodic task.
Specify a particular time that the job should be run at.
Args:
unit (str): Time unit of the periodic task.
Defaults to `day`.
The following unit is available:
1. `day`: Run job everyday.
2. `week`: Run job every week.
3. `month`: Run job every month.
at_time (str): Time to do the job.
A string with format `HH:MM:SS`.
Defaults to `00:00:00`.
week_day (str): Week to do the job.
Defaults to `Monday`.
This argument will only be used is unit is `week`.
A string should be one of following value:
[`"Monday"`, `"Tuesday"`, `"Wednesday"`, `"Thursday"`,
`"Friday"`, `"Saturday"`, `"Sunday"`]
day (int): Day to do the job.
Defaults to 1.
This argument will only be used is unit is `month`.
Value should be in 1 ~ 31.
Job will be skipped if specific date is not available.
Returns:
Task: Invoked task instance.
Raises:
TimeFormatError: Invalid time format.
"""
if self._start:
raise OperationFailError("Task is already running.")
time_pattern = r'^([0-1]?\d|[2][0-3]):[0-5]?\d:[0-5]?\d$'
week_day_list = {
"Monday": 0,
"Tuesday": 1,
"Wednesday": 2,
"Thursday": 3,
"Friday": 4,
"Saturday": 5,
"Sunday": 6
}
if unit == "day":
self._periodic_unit = unit
if not re.match(time_pattern, at_time):
raise TimeFormatError
tsp = at_time.split(":")
self._at_time = [int(i) for i in tsp]
elif unit == "week":
self._periodic_unit = unit
if not re.match(time_pattern, at_time):
raise TimeFormatError
tsp = at_time.split(":")
self._at_time = [int(i) for i in tsp]
if week_day not in week_day_list:
raise TimeFormatError
self._at_week_day = week_day_list[week_day]
elif unit == "month":
self._periodic_unit = unit
if not re.match(time_pattern, at_time):
raise TimeFormatError
tsp = at_time.split(":")
self._at_time = [int(i) for i in tsp]
if day not in range(1, 32):
raise TimeFormatError
self._at_day = day
else:
raise TimeFormatError
return self
def period_day_at(self, at_time="00:00:00"):
"""Scheduling periodic task.
Specify a particular time that the job should be run at.
Job runs everyday.
Args:
at_time (str): Time to do the job.
A string with format `HH:MM:SS`.
Defaults to `00:00:00`.
week_day (str): Week to do the job.
Returns:
Task: Invoked task instance.
Raises:
TimeFormatError: Invalid time format.
"""
self.period_at(unit="day", at_time=at_time)
return self
def period_week_at(self, at_time="00:00:00", week_day="Monday"):
"""Scheduling periodic task.
Specify a particular time that the job should be run at.
Job runs every week.
Args:
at_time (str): Time to do the job.
A string with format `HH:MM:SS`.
Defaults to `00:00:00`.
week_day (str): Week to do the job.
Defaults to `Monday`.
A string should be one of following value:
[`"Monday"`, `"Tuesday"`, `"Wednesday"`, `"Thursday"`,
`"Friday"`, `"Saturday"`, `"Sunday"`]
Returns:
Task: Invoked task instance.
Raises:
TimeFormatError: Invalid time format.
"""
self.period_at(unit="week", at_time=at_time, week_day=week_day)
return self
def period_month_at(self, at_time="00:00:00", day=1):
"""Scheduling periodic task.
Specify a particular time that the job should be run at.
Job runs every month.
Args:
at_time (str): Time to do the job.
A string with format `HH:MM:SS`.
Defaults to `00:00:00`.
day (int): Day to do the job.
Defaults to 1.
Value should be in 1 ~ 31.
Job will be skipped if specific date is not available.
Returns:
Task: Invoked task instance.
Raises:
TimeFormatError: Invalid time format.
"""
self.period_at(unit="month", at_time=at_time, day=day)
return self
def _set_next_run_init(self):
# First time the job run at.
if self._periodic_unit == "every":
self._next_run = datetime.now()
elif self._periodic_unit == "day":
self._set_next_run_init_day()
elif self._periodic_unit == "week":
self._set_next_run_init_week()
elif self._periodic_unit == "month":
self._set_next_run_init_month()
def _set_next_run_init_day(self):
run_time = datetime.now().replace(hour=self._at_time[0],
minute=self._at_time[1],
second=self._at_time[2])
if run_time < datetime.now():
self._next_run = run_time + timedelta(days=1)
else:
self._next_run = run_time
def _set_next_run_init_week(self):
tmp_runtime = datetime.now().replace(hour=self._at_time[0],
minute=self._at_time[1],
second=self._at_time[2])
now_weekday = tmp_runtime.date().weekday()
if now_weekday < self._at_week_day:
tmp_runtime += timedelta(days=self._at_week_day-now_weekday)
elif now_weekday > self._at_week_day:
tmp_runtime += timedelta(days=7+self._at_week_day-now_weekday)
else:
if tmp_runtime < datetime.now():
tmp_runtime += timedelta(days=7)
self._next_run = tmp_runtime
def _set_next_run_init_month(self):
try:
tmp_runtime = datetime.now().replace(day=self._at_day,
hour=self._at_time[0],
minute=self._at_time[1],
second=self._at_time[2])
if datetime.now().day > self._at_day:
if tmp_runtime.month == 12:
tmp_runtime = tmp_runtime.replace(year=tmp_runtime.year+1,
month=1)
else:
try:
tmp_runtime = tmp_runtime.replace(month=(tmp_runtime
.month)+1)
except ValueError:
# Because day is out of range in next month.
tmp_runtime = tmp_runtime.replace(month=(tmp_runtime
.month)+2)
elif datetime.now().day == self._at_day:
if tmp_runtime < datetime.now():
if tmp_runtime.month == 12:
tmp_runtime = tmp_runtime.replace(year=(tmp_runtime
.year)+1,
month=1)
else:
try:
tmp_runtime = (tmp_runtime
.replace(month=tmp_runtime.month+1))
except ValueError:
# Because day is out of range in next month.
tmp_runtime = (tmp_runtime
.replace(month=tmp_runtime.month+2))
self._next_run = tmp_runtime
except ValueError:
# Because day is out of range in this month.
self._next_run = datetime.now().replace(month=(datetime
.now()
.month)+1,
day=self._at_day,
hour=self._at_time[0],
minute=self._at_time[1],
second=self._at_time[2])
def _set_next_run(self):
if self._periodic_unit == "every":
self._set_next_run_every()
elif self._periodic_unit == "day":
self._set_next_run_day()
elif self._periodic_unit == "week":
self._set_next_run_week()
elif self._periodic_unit == "month":
self._set_next_run_month()
def _set_next_run_every(self):
if self._ignore_skipped:
next_ = self._next_run + self._periodic
if next_ < datetime.now():
rate = (datetime.now() - self._next_run) / self._periodic
next_ = self._next_run + math.ceil(rate) * self._periodic
if next_ == datetime.now():
next_ += self._periodic
self._next_run = next_
else:
self._next_run += self._periodic
def _set_next_run_day(self):
if self._ignore_skipped:
next_ = self._next_run + timedelta(days=1)
if next_ < datetime.now():
# Record current datetime to avoid 23:59:XX situation.
time_now = datetime.now()
next_ = next_.replace(month=time_now.month,
day=time_now.day)
if next_ <= datetime.now():
next_ += timedelta(days=1)
self._next_run = next_
else:
self._next_run += timedelta(days=1)
def _set_next_run_week(self):
if self._ignore_skipped:
next_ = self._next_run + timedelta(days=7)
if next_ < datetime.now():
next_ = datetime.now().replace(hour=self._at_time[0],
minute=self._at_time[1],
second=self._at_time[2])
weekday_ = next_.date().weekday()
if weekday_ < self._at_week_day:
next_ += timedelta(days=self._at_week_day-weekday_)
elif weekday_ > self._at_week_day:
next_ += timedelta(days=7+self._at_week_day-weekday_)
else:
if next_ < datetime.now():
next_ += timedelta(days=7)
if next_ <= datetime.now():
next_ += timedelta(days=7)
self._next_run = next_
else:
self._next_run += timedelta(days=7)
def _set_next_run_month(self):
if self._ignore_skipped:
if self._next_run.month == 12:
next_ = self._next_run.replace(year=self._next_run.year+1,
month=1)
else:
try:
next_ = self._next_run.replace(month=(self
._next_run
.month)+1)
except ValueError:
# Because day is out of range in next month.
next_ = self._next_run.replace(month=(self
._next_run
.month)+2)
if next_ < datetime.now():
try:
next_ = datetime.now().replace(day=self._at_day,
hour=self._at_time[0],
minute=self._at_time[1],
second=self._at_time[2])
if datetime.now().day > self._at_day:
if next_.month == 12:
next_ = next_.replace(year=next_.year+1,
month=1)
else:
try:
next_ = next_.replace(month=next_.month+1)
except ValueError:
# Because day is out of range in next month.
next_ = next_.replace(month=next_.month+2)
elif datetime.now().day == self._at_day:
if next_ < datetime.now():
if next_.month == 12:
next_ = next_.replace(year=next_.year+1,
month=1)
else:
try:
next_ = next_.replace(month=next_.month+1)
except ValueError:
# Because day is out of range in next
# month.
next_ = next_.replace(month=next_.month+2)
except ValueError:
next_ = datetime.now().replace(month=(datetime
.now()
.month)+1,
day=self._at_day,
hour=self._at_time[0],
minute=self._at_time[1],
second=self._at_time[2])
if next_ <= datetime.now():
if next_.month == 12:
next_ = next_.replace(year=next_.year+1,
month=1)
else:
try:
next_ = next_.replace(month=next_.month+1)
except ValueError:
# Because day is out of range in next month.
next_ = next_.replace(month=next_.month+2)
self._next_run = next_
else:
if self._next_run.month == 12:
self._next_run = self._next_run.replace(year=(self
._next_run
.year)+1,
month=1)
else:
try:
month_next = self._next_run.month+1
self._next_run = self._next_run.replace(month=month_next)
except ValueError:
# Because day is out of range in next month.
month_next = self._next_run.month+2
self._next_run = self._next_run.replace(month=month_next)
def _next_run_at(self):
if self._next_run is None:
self._set_next_run_init()
else:
self._set_next_run()
def start(self):
"""Start the Task's activity."""
if not self._periodic_unit:
raise OperationFailError("Please set period first.")
self._start = True
# Set start at by delay time
if self._delay:
self._start_at = datetime.now() + self._delay
super().start()
def stop(self):
"""Stop the Task's activity."""
if not self._start:
raise OperationFailError("Task is not running.")
self._start = False
self._stop_task = True
def pause(self):
"""Pause the Task's activity.
Works only the task is registered into :class:`ScheduleManager`.
"""
if not self._start:
raise OperationFailError("Task is not running.")
if not self._manager:
raise OperationFailError("Register task into "
"ScheduleManager first.")
self._start = False
self._stop_task = True
self._pause_task = True
def _action_after_finish(self):
# Remove task from manager
if self._manager:
# Keep ScheduleManager instance
manager = self._manager
manager.unregister(self.name)
if self._pause_task:
# Thread-based object can only be started once.
# So create new task with same action and register task after
# delete.
# current task to realize pause action.
kwargs = None if self._kwargs == {} else self._kwargs
# New task
new_task = manager.register_task(name=self.name,
job=self._target,
args=self._args,
kwargs=kwargs)
new_task.set_tags(self.tag)
# schedule task
if self._periodic_unit == "every":
new_task.period(self._periodic)
else:
ref_week = {
0: "Monday",
1: "Tuesday",
2: "Wednesday",
3: "Thursday",
4: "Friday",
5: "Saturday",
6: "Sunday",
None: None
}
time_str = "{}:{}:{}".format(str(self._at_time[0]),
str(self._at_time[1]),
str(self._at_time[2]))
new_task.period_at(unit=self._periodic_unit,
at_time=time_str,
week_day=ref_week[self._at_week_day],
day=self._at_day)
if not self._is_periodic:
new_task.nonperiodic(self._nonperiod_count)
if self._delay:
new_task.delay(self._start_at - datetime.now())
elif self._start_at:
if datetime.now() < self._start_at:
new_task.start_at(self._start_at)
def run(self):
"""Representing the Task's activity.
DO NOT CALL DIRECTLY.
"""
if not self._start:
raise OperationFailError("Use Task.start() instead.")
# Modified from :meth:`Thread.run`.
try:
# Delay or start at.
if self._start_at:
while not self._stop_task:
if datetime.now() >= self._start_at:
break
time.sleep(self.CHECK_INTERVAL)
self._next_run_at()
while not self._stop_task:
if datetime.now() >= self._next_run:
self._target(*self._args, **self._kwargs)
self._next_run_at()
if not self._is_periodic:
self._nonperiod_count -= 1
if self._nonperiod_count <= 0:
self._stop_task = True
break
time.sleep(self.CHECK_INTERVAL)
finally:
self._action_after_finish()
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self._target, self._args, self._kwargs
class TaskGroup:
"""Task group.
A set of tasks.
"""
def __init__(self, tasks=None):
"""Constructor
Args:
tasks (iterable): Task list.
"""
if not tasks:
self._tasks = list()
else:
self._tasks = list()
if isinstance(tasks, list):
self._tasks = tasks[:]
else:
for task in tasks:
self._tasks.append(task)
def __repr__(self):
return ("TaskGroup<("
"Tasks: {task_count}"
")>").format(task_count=len(self._tasks))
def __contains__(self, task):
"""Returns True if task is in the group."""
return task in self._tasks
def __iter__(self):
"""Iterate over tasks."""
return iter(self._tasks)
def __add__(self, other):
if isinstance(other, TaskGroup):
task_list = self._tasks + other._tasks
return TaskGroup(task_list)
return NotImplemented
@property
def count(self):
"""int: Number of tasks contained in the group."""
return len(self._tasks)
def set_manager(self, manager=None):
"""Change schedule manager of all tasks.
Task will be unregistered from old manager if it has been registered
in a manager.
Args:
manager (ScheduleManager): A exist schedule manager object.
Set None to create new schedule manager.
Returns:
ScheduleManager: Invoked ScheduleManager instance.
Raises:
TaskNameDuplicateError: There is a duplicate task name.
"""
if not manager:
manager = ScheduleManager()
else:
for task in self._tasks:
if task.name in manager:
error = "Duplicate task name <{}>.".format(task.name)
raise TaskNameDuplicateError(error)
for task in self._tasks:
if task.manager:
task.manager.unregister(name=task.name)
manager.register(task)
return manager
def add_tag(self, tag):
"""Add tag to tasks.
Args:
tag (obj): Tag.
Returns:
TaskGroup: Invoked TaskGroup instance.
"""
for task in self._tasks:
task.add_tag(tag)
return self
def add_tags(self, tags):
"""Add a list of tags to tasks.
Args:
tags (iterable): Tag list.
Returns:
TaskGroup: Invoked TaskGroup instance.
"""
for task in self._tasks:
task.add_tags(tags)
return self
def remove_tag(self, tag):
"""Remove tag from tasks.
Args:
tag (obj): Tag.
Returns:
TaskGroup: Invoked TaskGroup instance.
"""
for task in self._tasks:
task.remove_tag(tag)
return self
def remove_tags(self, tags):
"""Remove a list of tags from tasks.
Args:
tags (iterable): Tag list.
Returns:
TaskGroup: Invoked TaskGroup instance.
"""
for task in self._tasks:
task.remove_tags(tags)
return self
def set_tags(self, tags):
"""Set tag list to tasks.
Replace old tag list.
Args:
tags (iterable): Tag list.
Returns:
TaskGroup: Invoked TaskGroup instance.
"""
for task in self._tasks:
task.set_tags(tags)
return self
def delay(self, interval=None):
"""Delay task start time.
Args:
interval (Union[str, timedelta, int]): Time interval.
A string with format `HH:MM:SS` or :obj:`timedelta` or int in
seconds.
Or set None to cancel task delay time.
Defaults to None.
Returns:
TaskGroup: Invoked TaskGroup instance.
Raises:
TimeFormatError: Invalid time format.
"""
for task in self._tasks:
task.delay(interval)
return self
def start_at(self, at_time):
"""Set task start time.
Specify a particular time that the job should be start.
Args:
at_time (Union[str, datetime]): Start time.
A string or :obj:`datetime`.
A string can be in one of the following formats:
[`HH:MM:SS`, `mm-dd HH:MM:SS`].
Or set None to cancel task start time.
Defaults to None.
Returns:
TaskGroup: Invoked TaskGroup instance.
Raises:
TimeFormatError: Invalid time format.
"""
for task in self._tasks:
task.start_at(at_time)
return self
def nonperiodic(self, count):
"""See as non-periodic tasks.
Args:
count (int): Do the job for a certain number of times.
Returns:
TaskGroup: Invoked TaskGroup instance.
"""
for task in self._tasks:
task.nonperiodic(count)
return self
def periodic(self):
"""See as periodic tasks.
Returns:
TaskGroup: Invoked TaskGroup instance.
"""
for task in self._tasks:
task.periodic()
return self
def period(self, interval):
"""Scheduling periodic tasks.
Args:
interval (Union[str, timedelta, int]): Time interval.
A string with format `HH:MM:SS` or :obj:`timedelta` or int
in seconds.
Returns:
TaskGroup: Invoked TaskGroup instance.
Raises:
TimeFormatError: Invalid time format.
"""
for task in self._tasks:
task.period(interval)
return self
def period_at(self,
unit="day", at_time="00:00:00",
week_day="Monday", day=1):
"""Scheduling periodic tasks.
Specify a particular time that the job should be run at.
Args:
unit (str): Time unit of the periodic task.
Defaults to `day`.
The following unit is available:
1. `day`: Run job everyday.
2. `week`: Run job every week.
3. `month`: Run job every month.
at_time (str): Time to do the job.
A string with format `HH:MM:SS`.
Defaults to `00:00:00`.
week_day (str): Week to do the job.
Defaults to `Monday`.
This argument will only be used is unit is `week`.
A string should be one of following value:
[`"Monday"`, `"Tuesday"`, `"Wednesday"`, `"Thursday"`,
`"Friday"`, `"Saturday"`, `"Sunday"`]
day (int): Day to do the job.
Defaults to 1.
This argument will only be used is unit is `month`.
Value should be in 1 ~ 31.
Job will be skipped if specific date is not available.
Returns:
TaskGroup: Invoked TaskGroup instance.
Raises:
TimeFormatError: Invalid time format.
"""
for task in self._tasks:
task.period_at(unit=unit,
at_time=at_time,
week_day=week_day,
day=day)
return self
def period_day_at(self, at_time="00:00:00"):
"""Scheduling periodic tasks.
Specify a particular time that the job should be run at.
Job runs everyday.
Args:
at_time (str): Time to do the job.
A string with format `HH:MM:SS`.
Defaults to `00:00:00`.
week_day (str): Week to do the job.
Returns:
TaskGroup: Invoked TaskGroup instance.
Raises:
TimeFormatError: Invalid time format.
"""
for task in self._tasks:
task.period_day_at(at_time=at_time)
return self
def period_week_at(self, at_time="00:00:00", week_day="Monday"):
"""Scheduling periodic tasks.
Specify a particular time that the job should be run at.
Job runs every week.
Args:
at_time (str): Time to do the job.
A string with format `HH:MM:SS`.
Defaults to `00:00:00`.
week_day (str): Week to do the job.
Defaults to `Monday`.
A string should be one of following value:
[`"Monday"`, `"Tuesday"`, `"Wednesday"`, `"Thursday"`,
`"Friday"`, `"Saturday"`, `"Sunday"`]
Returns:
TaskGroup: Invoked TaskGroup instance.
Raises:
TimeFormatError: Invalid time format.
"""
for task in self._tasks:
task.period_week_at(at_time=at_time, week_day=week_day)
return self
def period_month_at(self, at_time="00:00:00", day=1):
"""Scheduling periodic tasks.
Specify a particular time that the job should be run at.
Job runs every month.
Args:
at_time (str): Time to do the job.
A string with format `HH:MM:SS`.
Defaults to `00:00:00`.
day (int): Day to do the job.
Defaults to 1.
Value should be in 1 ~ 31.
Job will be skipped if specific date is not available.
Returns:
TaskGroup: Invoked TaskGroup instance.
Raises:
TimeFormatError: Invalid time format.
"""
for task in self._tasks:
task.period_month_at(at_time=at_time, day=day)
return self
def start(self):
"""Start the Tasks' activity."""
for task in self._tasks:
task.start()
def stop(self):
"""Stop the Tasks' activity."""
for task in self._tasks:
task.stop()
def pause(self):
"""Pause the Tasks' activity.
Works only the task is registered into :class:`ScheduleManager`.
"""
new_task_list = list()
for task in self._tasks:
manager = task.manager
task_name = task.name
task.pause()
while task.manager is not None:
time.sleep(1)
if manager:
new_task_list.append(manager.task(task_name))
self._tasks = new_task_list[:]
| 0.801081 | 0.154983 |
# Simple parser portal (SharePoint) petrocollege
## Installation
```cmd
pip install schedule_parser_portal_petrocollege
```
## Use SharePoint сlass
Create object of class SharePoint
```python
import SharePoint
share_point = SharePoint(username, password)
```
### Send any Request to server
```python
# Return Response JSON result
result = share_point.get_request_json(<some_url>)
```
### Get json object elements of List
```python
# result make list of title and link elements
links = share_point.get_data_from_lists_type(result)
print(links)
#[ {
# "title": Title,
# "link" : "Lists(guid'9c095153-274d-4c73-9b8b-4e3dd6af89e5')/Items(16)"
# }
#]
```
### Get files (AttachmentFiles)
```python
# url_list is a link like "Lists(guid'9c095153-274d-4c73-9b8b-4e3dd6af89e5')/Items(16)"
files = share_point.get_data_from_attachment_files_type((share_point.get_request_json(<url_list> + "/AttachmentFiles")))
#save files
for file in files:
share_point.save_file_by_url(file['ServerRelativeUrl'], file['FileName'], 'files')
```
## Get dict from file
```python
import File
file = ExcelFile('<path_to_xlsx_file>')
data = file.get_object()
```
return dict like
```
{
'teacher': 'Ярошенко С.П.',
'debug_column': 324,
'teacher_lessons':
[
{
'lesson':
{
'discipline': 'Теор.гос.и права',
'room': '101',
'building': '4',
'groups': ['11-29'],
'_Lesson__current_string': 'Теор.гос.и права 4/101',
'is_dop': False,
'subgroup': 0
},
'date_lesson': datetime.datetime(2022, 9, 1, 0, 0),
'number_of_lesson': 2,
'debug_position_row': 21,
'debug_position_column': 324,
'debug_position_coordinate': 'LL21'
}
}
...
]
}
```
|
schedule-parser-portal-petrocollege
|
/schedule_parser_portal_petrocollege-0.0.6.tar.gz/schedule_parser_portal_petrocollege-0.0.6/README.md
|
README.md
|
pip install schedule_parser_portal_petrocollege
import SharePoint
share_point = SharePoint(username, password)
# Return Response JSON result
result = share_point.get_request_json(<some_url>)
# result make list of title and link elements
links = share_point.get_data_from_lists_type(result)
print(links)
#[ {
# "title": Title,
# "link" : "Lists(guid'9c095153-274d-4c73-9b8b-4e3dd6af89e5')/Items(16)"
# }
#]
# url_list is a link like "Lists(guid'9c095153-274d-4c73-9b8b-4e3dd6af89e5')/Items(16)"
files = share_point.get_data_from_attachment_files_type((share_point.get_request_json(<url_list> + "/AttachmentFiles")))
#save files
for file in files:
share_point.save_file_by_url(file['ServerRelativeUrl'], file['FileName'], 'files')
import File
file = ExcelFile('<path_to_xlsx_file>')
data = file.get_object()
{
'teacher': 'Ярошенко С.П.',
'debug_column': 324,
'teacher_lessons':
[
{
'lesson':
{
'discipline': 'Теор.гос.и права',
'room': '101',
'building': '4',
'groups': ['11-29'],
'_Lesson__current_string': 'Теор.гос.и права 4/101',
'is_dop': False,
'subgroup': 0
},
'date_lesson': datetime.datetime(2022, 9, 1, 0, 0),
'number_of_lesson': 2,
'debug_position_row': 21,
'debug_position_column': 324,
'debug_position_coordinate': 'LL21'
}
}
...
]
}
| 0.44746 | 0.654363 |
import requests
from requests_ntlm import HttpNtlmAuth
import os
import warnings
warnings.filterwarnings("ignore")
class SharePoint:
def __init__(self, username, password):
self.auth = HttpNtlmAuth(username, password)
self.headers = {'Accept': 'application/json;odata=verbose'}
def __make_request(self, request_url):
"""
Создает запрос на сервер с учетными данными
:param request_url: string
:return: Response
"""
return requests.get(request_url, verify=False, auth=self.auth, headers=self.headers)
def get_request_json(self, request_url):
"""
Создает запрос на сервер, возращает ответ в виде json
:param request_url: string
:return: json
"""
return self.__make_request(request_url).json()
@staticmethod
def get_data_from_lists_type(response_json):
"""
Получение доступных страниц для типа List SharePoint
Возвращает список вида
[
{
"title": Title,
"link" : "Lists(guid'9c095153-274d-4c73-9b8b-4e3dd6af89e5')/Items(16)"
}
]
:param response_json: json result of get_request_json()
:return: []
"""
name_url_list = []
for k in response_json['d']['results']:
name_url_list.append({'title': k['Title'], 'link': k['__metadata']['id']})
return name_url_list
@staticmethod
def get_data_from_attachment_files_type(response_json):
"""
Получение доступных файлов на страние
Возвращает список вида
[
{
"FileName": FileName,
"ServerRelativeUrl" : "/Lists/2014/Attachments/16/01_преп_текущие_01.09-11.09.xlsx"
}
]
:param response_json: json result of get_request_json()
:return: []
"""
name_url_list = []
for k in response_json['d']['results']:
name_url_list.append({'FileName': k['FileName'], 'ServerRelativeUrl': k['ServerRelativeUrl']})
return name_url_list
def save_file_by_url(self, api_url_with_web, server_relative_url, file_name, to_path=''):
"""
Сохранение файла по его ServerRelativeUrl
:param server_relative_url: ServerRelativeUrl ex.
:param api_url_with_web: ex. https://portal.petrocollege.ru/_api/web/
:param file_name: FileName
:param to_path: path to save
:return:
"""
if not os.path.exists(to_path):
os.makedirs(to_path)
full_path = to_path + "/" + file_name
response = self.__make_request(
api_url_with_web + "/GetFileByServerRelativeUrl('" + server_relative_url + "')/$value")
open(full_path, 'wb').write(response.content)
print("File " + file_name + " added to path " + to_path)
return os.path.isfile(full_path)
|
schedule-parser-portal-petrocollege
|
/schedule_parser_portal_petrocollege-0.0.6.tar.gz/schedule_parser_portal_petrocollege-0.0.6/src/SharePoint.py
|
SharePoint.py
|
import requests
from requests_ntlm import HttpNtlmAuth
import os
import warnings
warnings.filterwarnings("ignore")
class SharePoint:
def __init__(self, username, password):
self.auth = HttpNtlmAuth(username, password)
self.headers = {'Accept': 'application/json;odata=verbose'}
def __make_request(self, request_url):
"""
Создает запрос на сервер с учетными данными
:param request_url: string
:return: Response
"""
return requests.get(request_url, verify=False, auth=self.auth, headers=self.headers)
def get_request_json(self, request_url):
"""
Создает запрос на сервер, возращает ответ в виде json
:param request_url: string
:return: json
"""
return self.__make_request(request_url).json()
@staticmethod
def get_data_from_lists_type(response_json):
"""
Получение доступных страниц для типа List SharePoint
Возвращает список вида
[
{
"title": Title,
"link" : "Lists(guid'9c095153-274d-4c73-9b8b-4e3dd6af89e5')/Items(16)"
}
]
:param response_json: json result of get_request_json()
:return: []
"""
name_url_list = []
for k in response_json['d']['results']:
name_url_list.append({'title': k['Title'], 'link': k['__metadata']['id']})
return name_url_list
@staticmethod
def get_data_from_attachment_files_type(response_json):
"""
Получение доступных файлов на страние
Возвращает список вида
[
{
"FileName": FileName,
"ServerRelativeUrl" : "/Lists/2014/Attachments/16/01_преп_текущие_01.09-11.09.xlsx"
}
]
:param response_json: json result of get_request_json()
:return: []
"""
name_url_list = []
for k in response_json['d']['results']:
name_url_list.append({'FileName': k['FileName'], 'ServerRelativeUrl': k['ServerRelativeUrl']})
return name_url_list
def save_file_by_url(self, api_url_with_web, server_relative_url, file_name, to_path=''):
"""
Сохранение файла по его ServerRelativeUrl
:param server_relative_url: ServerRelativeUrl ex.
:param api_url_with_web: ex. https://portal.petrocollege.ru/_api/web/
:param file_name: FileName
:param to_path: path to save
:return:
"""
if not os.path.exists(to_path):
os.makedirs(to_path)
full_path = to_path + "/" + file_name
response = self.__make_request(
api_url_with_web + "/GetFileByServerRelativeUrl('" + server_relative_url + "')/$value")
open(full_path, 'wb').write(response.content)
print("File " + file_name + " added to path " + to_path)
return os.path.isfile(full_path)
| 0.295738 | 0.094636 |
import openpyxl
from datetime import datetime
from Lesson import Lesson
class ExcelFile:
def __init__(self, path_file):
self.file = openpyxl.load_workbook(path_file)
self.active_sheet = self.file.active
self.dates = self.active_sheet['A']
def get_dates_with_rows_number(self):
"""
Return list of dates lessons with rows excel number
:return: list [{'date': datetime.datetime(2022, 9, 1, 0, 0), 'rows': [20, 21, 22, 23, 24]}]
"""
list_dates_rows_number = []
for x in self.dates:
if x.value is not None and any(map(str.isdigit, x.value)):
list_dates_rows_number.append({
"date": self.__split_date(x.value),
"rows": list(range(x.row, x.row + 5))
})
return list_dates_rows_number
def __split_date(self, text_date):
"""
Get data from dates schedule
:param text_date: string ex. Чт 01.09.22
:return: datetime.datetime(2022, 9, 1, 0, 0)
"""
date = datetime.strptime(text_date.split(" ")[-1], "%d.%m.%y")
return date
def get_object(self):
"""
:return: dict
{
'teacher': 'Ярошенко С.П.',
'debug_column': 324,
'teacher_lessons':
[
{
'lesson':
{
'discipline': 'Теор.гос.и права',
'room': '101',
'building': '4',
'groups': ['11-29'],
'_Lesson__current_string': 'Теор.гос.и права 4/101',
'is_dop': False,
'subgroup': 0
},
'date_lesson': datetime.datetime(2022, 9, 1, 0, 0),
'number_of_lesson': 2,
'debug_position_row': 21,
'debug_position_column': 324,
'debug_position_coordinate': 'LL21'
}
}
...
]
}
"""
teachers_has_lessons = []
date_lesson = datetime.min
dates_with_rows_number = self.get_dates_with_rows_number()
cols = self.active_sheet.max_column - 1
for col in self.active_sheet.iter_cols(min_col=3, min_row=1, max_col=cols):
teacher_has_lesson = {}
for cell in col:
if cell.row == 1:
teacher_has_lesson['teacher'] = cell.value
teacher_has_lesson['debug_column'] = cell.column
teacher_has_lesson["teacher_lessons"] = []
elif cell.value is not None and cell.value.strip():
for item in dates_with_rows_number:
if cell.row in item['rows']:
date_lesson = item['date'] # Дата проведения занятия
teacher_has_lesson["teacher_lessons"].append(
{
"lesson": Lesson(cell.value).__dict__,
'date_lesson': date_lesson,
"number_of_lesson": self.active_sheet["B" + str(cell.row)].value,
"debug_position_row": cell.row,
"debug_position_column": cell.column,
"debug_position_coordinate": cell.coordinate,
})
teachers_has_lessons.append(teacher_has_lesson)
return teachers_has_lessons
|
schedule-parser-portal-petrocollege
|
/schedule_parser_portal_petrocollege-0.0.6.tar.gz/schedule_parser_portal_petrocollege-0.0.6/src/File.py
|
File.py
|
import openpyxl
from datetime import datetime
from Lesson import Lesson
class ExcelFile:
def __init__(self, path_file):
self.file = openpyxl.load_workbook(path_file)
self.active_sheet = self.file.active
self.dates = self.active_sheet['A']
def get_dates_with_rows_number(self):
"""
Return list of dates lessons with rows excel number
:return: list [{'date': datetime.datetime(2022, 9, 1, 0, 0), 'rows': [20, 21, 22, 23, 24]}]
"""
list_dates_rows_number = []
for x in self.dates:
if x.value is not None and any(map(str.isdigit, x.value)):
list_dates_rows_number.append({
"date": self.__split_date(x.value),
"rows": list(range(x.row, x.row + 5))
})
return list_dates_rows_number
def __split_date(self, text_date):
"""
Get data from dates schedule
:param text_date: string ex. Чт 01.09.22
:return: datetime.datetime(2022, 9, 1, 0, 0)
"""
date = datetime.strptime(text_date.split(" ")[-1], "%d.%m.%y")
return date
def get_object(self):
"""
:return: dict
{
'teacher': 'Ярошенко С.П.',
'debug_column': 324,
'teacher_lessons':
[
{
'lesson':
{
'discipline': 'Теор.гос.и права',
'room': '101',
'building': '4',
'groups': ['11-29'],
'_Lesson__current_string': 'Теор.гос.и права 4/101',
'is_dop': False,
'subgroup': 0
},
'date_lesson': datetime.datetime(2022, 9, 1, 0, 0),
'number_of_lesson': 2,
'debug_position_row': 21,
'debug_position_column': 324,
'debug_position_coordinate': 'LL21'
}
}
...
]
}
"""
teachers_has_lessons = []
date_lesson = datetime.min
dates_with_rows_number = self.get_dates_with_rows_number()
cols = self.active_sheet.max_column - 1
for col in self.active_sheet.iter_cols(min_col=3, min_row=1, max_col=cols):
teacher_has_lesson = {}
for cell in col:
if cell.row == 1:
teacher_has_lesson['teacher'] = cell.value
teacher_has_lesson['debug_column'] = cell.column
teacher_has_lesson["teacher_lessons"] = []
elif cell.value is not None and cell.value.strip():
for item in dates_with_rows_number:
if cell.row in item['rows']:
date_lesson = item['date'] # Дата проведения занятия
teacher_has_lesson["teacher_lessons"].append(
{
"lesson": Lesson(cell.value).__dict__,
'date_lesson': date_lesson,
"number_of_lesson": self.active_sheet["B" + str(cell.row)].value,
"debug_position_row": cell.row,
"debug_position_column": cell.column,
"debug_position_coordinate": cell.coordinate,
})
teachers_has_lessons.append(teacher_has_lesson)
return teachers_has_lessons
| 0.384797 | 0.275425 |
from datetime import datetime
class Lesson:
__current_string = ''
def __init__(self, string_to_parse):
self.discipline = ''
self.room = ''
self.building = '0'
self.groups = [Lesson.get_course(string_to_parse[:6].strip())]
self.__current_string = string_to_parse[6:].strip()
self.is_dop = False
self.subgroup = 0
self.__get_one_more_group_if_have()
self.__split_cabinet_room_from_str() \
.__get_attr_from_discipline(). \
__split_room()
def __get_one_more_group_if_have(self):
"""
Если у одной дисциплины две группы
:return: Lesson
"""
if self.__current_string[0].isdigit():
group_to_add = self.__current_string[:6].strip()
self.groups.append(Lesson.get_course(group_to_add))
self.__current_string = self.__current_string[6:].strip()
return self
def __split_cabinet_room_from_str(self):
"""
Что бы корректно получить строки ищем начало кабинета в строке (используя пробел)
:return:
"""
place_space_from_end = self.__current_string.rfind(" ")
if place_space_from_end != -1:
self.discipline = self.__current_string[:place_space_from_end + 1].strip()
self.room = self.__current_string[place_space_from_end + 1:].strip()
return self
def __split_room(self):
"""
Разделение кабинета на номер и корпус
:return:
"""
if "/" in self.room:
self.building, self.room = self.room.split("/")
self.room = self.room.replace("'", "")
return self
def __get_attr_from_discipline(self):
"""
Получаем ифнормацию о дисуиплине: ДОП?, Подгруппа?
:return:
"""
discipline = self.discipline.replace('[СДО]', '').replace("[]", '')
if Lesson.has_numbers(discipline):
if discipline[1] == 'О':
self.subgroup = 0
else:
self.subgroup = discipline[1]
discipline = discipline[4:]
if 'ДОП' in discipline:
discipline = discipline.replace("ДОП", '').strip()
self.is_dop = True
self.discipline = discipline
return self
@staticmethod
def has_numbers(inputString):
return any(char.isdigit() for char in inputString)
@staticmethod
def get_course(group_name):
group_data = {"group": group_name, 'year': "2222", "course": "0"}
today = datetime.now()
year_end = today.year
year_start = year_end - 10
for i in range(year_start+1, year_end+1, 1):
year = str(i)
if (year[-1] == group_name[1]):
course = year_end - i + 1
group_data = {"group": group_name, "year": year, "course": course}
return group_data
|
schedule-parser-portal-petrocollege
|
/schedule_parser_portal_petrocollege-0.0.6.tar.gz/schedule_parser_portal_petrocollege-0.0.6/src/Lesson.py
|
Lesson.py
|
from datetime import datetime
class Lesson:
__current_string = ''
def __init__(self, string_to_parse):
self.discipline = ''
self.room = ''
self.building = '0'
self.groups = [Lesson.get_course(string_to_parse[:6].strip())]
self.__current_string = string_to_parse[6:].strip()
self.is_dop = False
self.subgroup = 0
self.__get_one_more_group_if_have()
self.__split_cabinet_room_from_str() \
.__get_attr_from_discipline(). \
__split_room()
def __get_one_more_group_if_have(self):
"""
Если у одной дисциплины две группы
:return: Lesson
"""
if self.__current_string[0].isdigit():
group_to_add = self.__current_string[:6].strip()
self.groups.append(Lesson.get_course(group_to_add))
self.__current_string = self.__current_string[6:].strip()
return self
def __split_cabinet_room_from_str(self):
"""
Что бы корректно получить строки ищем начало кабинета в строке (используя пробел)
:return:
"""
place_space_from_end = self.__current_string.rfind(" ")
if place_space_from_end != -1:
self.discipline = self.__current_string[:place_space_from_end + 1].strip()
self.room = self.__current_string[place_space_from_end + 1:].strip()
return self
def __split_room(self):
"""
Разделение кабинета на номер и корпус
:return:
"""
if "/" in self.room:
self.building, self.room = self.room.split("/")
self.room = self.room.replace("'", "")
return self
def __get_attr_from_discipline(self):
"""
Получаем ифнормацию о дисуиплине: ДОП?, Подгруппа?
:return:
"""
discipline = self.discipline.replace('[СДО]', '').replace("[]", '')
if Lesson.has_numbers(discipline):
if discipline[1] == 'О':
self.subgroup = 0
else:
self.subgroup = discipline[1]
discipline = discipline[4:]
if 'ДОП' in discipline:
discipline = discipline.replace("ДОП", '').strip()
self.is_dop = True
self.discipline = discipline
return self
@staticmethod
def has_numbers(inputString):
return any(char.isdigit() for char in inputString)
@staticmethod
def get_course(group_name):
group_data = {"group": group_name, 'year': "2222", "course": "0"}
today = datetime.now()
year_end = today.year
year_start = year_end - 10
for i in range(year_start+1, year_end+1, 1):
year = str(i)
if (year[-1] == group_name[1]):
course = year_end - i + 1
group_data = {"group": group_name, "year": year, "course": course}
return group_data
| 0.571169 | 0.25389 |
Remind — Schedule Notification Reminders
========================================
.. image:: https://img.shields.io/pypi/v/schedule-reminder.svg
:target: https://pypi.python.org/pypi/schedule-reminder
.. image:: https://img.shields.io/pypi/pyversions/schedule-reminder.svg
:target: https://pypi.python.org/pypi/schedule-reminder/
:Author: Ken Kundert
:Version: 1.1
:Released: 2022-11-07
Remind schedules notification reminders. You can specify the time either using
a specific time, such as 3:30pm, or you can specify it by the time from now in
minutes or hours, or you can use both. So, you can say::
remind 4pm
remind 4pm -15m
remind 10m
In the first case the reminder goes off at 4pm, in the second it goes off at
3:45pm, and in the third it goes off in 10 minutes. When the time expires
a notification is raised. You can specify the message in the notification using
the -m or --msg option. Or you can add the message after specifying the time.
Any argument that cannot be processed as a time switches the argument processing
from time to message, and all subsequent arguments are taken to be part of the
message::
remind 1h meet Maria
You can specify the time from now using seconds, minutes, hours, etc. For
example::
remind 3h 15m
You can use *noon* and *midnight* as aliases for 12PM and 12AM.
When specifying the time of day, you can use the following formats::
'h:mm:ss A': ex. 1:30:00 PM, 1:30:00 pm
'h:mm:ssA': ex. 1:30:00PM, 1:30:00pm
'h:mm A': ex. 1:30 PM, 1:30 pm
'h:mmA': ex. 1:30PM, 1:30pm
'hA': ex. 1PM or 1pm
'HH:mm:ss': ex. 13:00:00
'HH:mm': ex. 13:00
Be aware that *remind* runs in the background until the appointed time, issues
the notification, and only then terminates. If the process is killed or some
how lost, perhaps by restarting the computer, the reminder is also lost.
However, you can put the computer to sleep. When the computer wakes, you will
either receive a past due notification with an indication that it is being given
late, or the process will resume waiting for the appointed time.
You can use pip to install the program::
pip3 install --user schedule-reminder
Releases
--------
**Latest development release**:
| Version: 1.1
| Released: 2022-11-07
**1.1 (2022-11-07)**:
- Enhance implementation so reminders re-synchronize after computer wakes.
**1.0 (2020-07-19)**:
- Initial production release.
|
schedule-reminder
|
/schedule-reminder-1.1.tar.gz/schedule-reminder-1.1/README.rst
|
README.rst
|
Remind — Schedule Notification Reminders
========================================
.. image:: https://img.shields.io/pypi/v/schedule-reminder.svg
:target: https://pypi.python.org/pypi/schedule-reminder
.. image:: https://img.shields.io/pypi/pyversions/schedule-reminder.svg
:target: https://pypi.python.org/pypi/schedule-reminder/
:Author: Ken Kundert
:Version: 1.1
:Released: 2022-11-07
Remind schedules notification reminders. You can specify the time either using
a specific time, such as 3:30pm, or you can specify it by the time from now in
minutes or hours, or you can use both. So, you can say::
remind 4pm
remind 4pm -15m
remind 10m
In the first case the reminder goes off at 4pm, in the second it goes off at
3:45pm, and in the third it goes off in 10 minutes. When the time expires
a notification is raised. You can specify the message in the notification using
the -m or --msg option. Or you can add the message after specifying the time.
Any argument that cannot be processed as a time switches the argument processing
from time to message, and all subsequent arguments are taken to be part of the
message::
remind 1h meet Maria
You can specify the time from now using seconds, minutes, hours, etc. For
example::
remind 3h 15m
You can use *noon* and *midnight* as aliases for 12PM and 12AM.
When specifying the time of day, you can use the following formats::
'h:mm:ss A': ex. 1:30:00 PM, 1:30:00 pm
'h:mm:ssA': ex. 1:30:00PM, 1:30:00pm
'h:mm A': ex. 1:30 PM, 1:30 pm
'h:mmA': ex. 1:30PM, 1:30pm
'hA': ex. 1PM or 1pm
'HH:mm:ss': ex. 13:00:00
'HH:mm': ex. 13:00
Be aware that *remind* runs in the background until the appointed time, issues
the notification, and only then terminates. If the process is killed or some
how lost, perhaps by restarting the computer, the reminder is also lost.
However, you can put the computer to sleep. When the computer wakes, you will
either receive a past due notification with an indication that it is being given
late, or the process will resume waiting for the appointed time.
You can use pip to install the program::
pip3 install --user schedule-reminder
Releases
--------
**Latest development release**:
| Version: 1.1
| Released: 2022-11-07
**1.1 (2022-11-07)**:
- Enhance implementation so reminders re-synchronize after computer wakes.
**1.0 (2020-07-19)**:
- Initial production release.
| 0.681939 | 0.385693 |
import logging
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
logger = logging.getLogger('schedule_tweet.browser')
class Browser():
def __init__(self, driver):
self.driver = driver or webdriver.Firefox()
self.wait = WebDriverWait(self.driver, 5)
def load(self, url):
logger.debug(f'load {url}')
self.driver.get(url)
def _when_visible(self, css_selector):
locator = (By.CSS_SELECTOR, css_selector)
condition = expected_conditions.visibility_of_element_located(locator)
return self.wait.until(condition)
def _when_visible_all(self, css_selector):
self._when_visible(css_selector)
return self.driver.find_elements(By.CSS_SELECTOR, css_selector)
def _when_clickable(self, css_selector):
locator = (By.CSS_SELECTOR, css_selector)
condition = expected_conditions.element_to_be_clickable(locator)
return self.wait.until(condition)
def find(self, css_selector):
logger.debug(f'find {css_selector}')
return self._when_visible(css_selector)
def find_all(self, css_selector):
logger.debug(f'find_all {css_selector}')
return self._when_visible_all(css_selector)
def title_all(self, css_selector):
logger.debug(f'title_all {css_selector}')
return [
element.get_attribute('title').strip() for element
in self._when_visible_all(css_selector)
]
def count(self, css_selector):
logger.debug(f'count {css_selector}')
return len(self._when_visible_all(css_selector))
def text(self, css_selector):
logger.debug(f'text {css_selector}')
return self._when_visible(css_selector).text.strip()
def is_visible(self, css_selector):
logger.debug(f'is_visible {css_selector}')
try:
return bool(self._when_visible(css_selector))
except:
return False
def click(self, css_selector):
logger.debug(f'click {css_selector}')
self._when_clickable(css_selector).click()
def fill(self, css_selector, text):
logger.debug(f'fill {css_selector} ({len(text)} characters)')
element = self._when_clickable(css_selector)
element.clear()
element.send_keys(text)
def value(self, css_selector):
logger.debug(f'value {css_selector}')
return self._when_clickable(css_selector).get_attribute('value').strip()
def submit(self, css_selector):
logger.debug(f'submit {css_selector}')
element = self._when_clickable(css_selector)
element.submit()
def scroll_to(self, css_selector):
logger.debug(f'scroll_to {css_selector}')
self.driver.execute_script(f'document.querySelector("{css_selector}").scrollIntoView()')
def save_screenshot(self, path):
logger.debug(f'save_screenshot {path}')
self.driver.save_screenshot(path)
def quit(self, screenshot_file=None):
with_screenshot = f'(screenshot: {screenshot_file})' if screenshot_file else ''
logger.debug(f'quit {with_screenshot}')
try:
if screenshot_file:
self.driver.save_screenshot(screenshot_file)
self.driver.quit()
except:
pass
|
schedule-tweet
|
/schedule_tweet-1.0.0-py3-none-any.whl/schedule_tweet/browser.py
|
browser.py
|
import logging
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
logger = logging.getLogger('schedule_tweet.browser')
class Browser():
def __init__(self, driver):
self.driver = driver or webdriver.Firefox()
self.wait = WebDriverWait(self.driver, 5)
def load(self, url):
logger.debug(f'load {url}')
self.driver.get(url)
def _when_visible(self, css_selector):
locator = (By.CSS_SELECTOR, css_selector)
condition = expected_conditions.visibility_of_element_located(locator)
return self.wait.until(condition)
def _when_visible_all(self, css_selector):
self._when_visible(css_selector)
return self.driver.find_elements(By.CSS_SELECTOR, css_selector)
def _when_clickable(self, css_selector):
locator = (By.CSS_SELECTOR, css_selector)
condition = expected_conditions.element_to_be_clickable(locator)
return self.wait.until(condition)
def find(self, css_selector):
logger.debug(f'find {css_selector}')
return self._when_visible(css_selector)
def find_all(self, css_selector):
logger.debug(f'find_all {css_selector}')
return self._when_visible_all(css_selector)
def title_all(self, css_selector):
logger.debug(f'title_all {css_selector}')
return [
element.get_attribute('title').strip() for element
in self._when_visible_all(css_selector)
]
def count(self, css_selector):
logger.debug(f'count {css_selector}')
return len(self._when_visible_all(css_selector))
def text(self, css_selector):
logger.debug(f'text {css_selector}')
return self._when_visible(css_selector).text.strip()
def is_visible(self, css_selector):
logger.debug(f'is_visible {css_selector}')
try:
return bool(self._when_visible(css_selector))
except:
return False
def click(self, css_selector):
logger.debug(f'click {css_selector}')
self._when_clickable(css_selector).click()
def fill(self, css_selector, text):
logger.debug(f'fill {css_selector} ({len(text)} characters)')
element = self._when_clickable(css_selector)
element.clear()
element.send_keys(text)
def value(self, css_selector):
logger.debug(f'value {css_selector}')
return self._when_clickable(css_selector).get_attribute('value').strip()
def submit(self, css_selector):
logger.debug(f'submit {css_selector}')
element = self._when_clickable(css_selector)
element.submit()
def scroll_to(self, css_selector):
logger.debug(f'scroll_to {css_selector}')
self.driver.execute_script(f'document.querySelector("{css_selector}").scrollIntoView()')
def save_screenshot(self, path):
logger.debug(f'save_screenshot {path}')
self.driver.save_screenshot(path)
def quit(self, screenshot_file=None):
with_screenshot = f'(screenshot: {screenshot_file})' if screenshot_file else ''
logger.debug(f'quit {with_screenshot}')
try:
if screenshot_file:
self.driver.save_screenshot(screenshot_file)
self.driver.quit()
except:
pass
| 0.394318 | 0.065965 |
import os
import logging
from datetime import datetime, timedelta
from schedule_tweet.browser import Browser
from schedule_tweet.page_objects import landing_page, login_page, app_page
__all__ = ('Session', 'session')
logger = logging.getLogger('schedule_tweet.session')
class Session():
def __init__(self, username, password, phone=None, driver=None, screenshot_file=None):
self.browser = Browser(driver)
self.username = username.lstrip('@')
self.password = password
self.phone = phone
self.screenshot_file = (
screenshot_file or
os.getenv('SCREENSHOT_FILE') or
os.path.join(os.getcwd(), 'error.png')
)
def open(self):
browser = self.browser
try:
logger.info('loading')
browser.load('https://tweetdeck.twitter.com/')
browser.click(landing_page.login_button)
logger.info('logging in')
browser.fill(login_page.form_username, self.username)
while not browser.value(login_page.form_password):
browser.fill(login_page.form_password, self.password)
browser.submit(login_page.form)
logger.info('verifying phone number')
if browser.is_visible(login_page.form_phone):
if self.phone:
browser.fill(login_page.form_phone, self.phone)
browser.submit(login_page.form_phone)
else:
raise Exception('TweetDeck login prompted for phone number, but none was provided')
else:
logger.info('no phone number verification needed')
logger.info('opening left pane')
browser.find(app_page.left_pane)
if browser.is_visible(app_page.left_pane_open_button):
browser.click(app_page.left_pane_open_button)
browser.click(app_page.left_pane_remember_state_checkbox)
else:
logger.info('left pane is already open')
account = f'@{self.username}'
logger.info(f'selecting account: {account}')
if browser.count(app_page.account_buttons) > 1:
browser.click(app_page.account_button_selected)
for title in browser.title_all(app_page.account_buttons):
if title.lower() == account:
browser.click(app_page.account_button.format(title=title))
except:
browser.quit(self.screenshot_file)
raise
def tweet(self, dt, text):
browser = self.browser
try:
logger.info('filling tweet text')
browser.fill(app_page.textarea, text)
logger.info('checking tweet length')
if browser.is_visible(app_page.textarea_char_count):
char_count = browser.text(app_page.textarea_char_count)
count = int(char_count or 0)
if count < 0:
raise Exception(f'The tweet is too long: {count}')
else:
logger.info('tweet length is OK')
else:
logger.info('tweet length is OK')
logger.info('opening calendar widget')
browser.click(app_page.calendar_open_button)
browser.scroll_to(app_page.calendar_bottom)
hour = str(dt.hour)
minute = str(dt.minute)
am_pm = datetime.strftime(dt, '%p')
logger.info(f'setting time to {hour}:{minute}{am_pm}')
browser.fill(app_page.calendar_hour_input, hour)
browser.fill(app_page.calendar_minute_input, minute)
if browser.text(app_page.calendar_am_pm_button).upper() != am_pm.upper():
browser.click(app_page.calendar_am_pm_button)
expected_calendar_title = datetime.strftime(dt, '%B %Y')
logger.info(f'setting month to {expected_calendar_title}')
while True:
calendar_title = browser.text(app_page.calendar_title)
calendar_title_dt = datetime.strptime(calendar_title, '%B %Y')
if calendar_title != expected_calendar_title:
logger.debug(f'clicking on next month: {calendar_title} (current) ≠ {expected_calendar_title} (expected)')
browser.click(app_page.calendar_next_month_button)
else:
logger.debug(f'keeping current month: {calendar_title} (current) = {expected_calendar_title} (expected)')
break
logger.info(f'setting day to {dt.day}')
browser.click(app_page.calendar_day.format(day=dt.day))
logger.info(f'verifying date & time')
schedule_button_label = browser.text(app_page.calendar_open_button_label)
hour_not_zero_padded = datetime.strftime(dt, '%I').lstrip('0')
day_not_zero_padded = datetime.strftime(dt, '%d').lstrip('0')
# example: '4:01 PM · Mon 4 Dec 2017'
expected_schedule_button_label = datetime.strftime(
dt,
f'{hour_not_zero_padded}:%M %p · %a {day_not_zero_padded} %b %Y'
)
if schedule_button_label != expected_schedule_button_label:
raise Exception(f"TweetDeck UI displays '{schedule_button_label}' as the effective date & time, but '{expected_calendar_title}' is expected")
else:
logger.debug(f"correct, {schedule_button_label} = {expected_calendar_title}")
logger.info(f'submitting tweet')
browser.click(app_page.textarea)
browser.click(app_page.submit_button)
browser.click(app_page.textarea)
except:
browser.quit(self.screenshot_file)
raise
def close(self):
self.browser.quit()
def __enter__(self):
self.open()
return self
def __exit__(self, exc, *args):
self.close()
session = Session
|
schedule-tweet
|
/schedule_tweet-1.0.0-py3-none-any.whl/schedule_tweet/__init__.py
|
__init__.py
|
import os
import logging
from datetime import datetime, timedelta
from schedule_tweet.browser import Browser
from schedule_tweet.page_objects import landing_page, login_page, app_page
__all__ = ('Session', 'session')
logger = logging.getLogger('schedule_tweet.session')
class Session():
def __init__(self, username, password, phone=None, driver=None, screenshot_file=None):
self.browser = Browser(driver)
self.username = username.lstrip('@')
self.password = password
self.phone = phone
self.screenshot_file = (
screenshot_file or
os.getenv('SCREENSHOT_FILE') or
os.path.join(os.getcwd(), 'error.png')
)
def open(self):
browser = self.browser
try:
logger.info('loading')
browser.load('https://tweetdeck.twitter.com/')
browser.click(landing_page.login_button)
logger.info('logging in')
browser.fill(login_page.form_username, self.username)
while not browser.value(login_page.form_password):
browser.fill(login_page.form_password, self.password)
browser.submit(login_page.form)
logger.info('verifying phone number')
if browser.is_visible(login_page.form_phone):
if self.phone:
browser.fill(login_page.form_phone, self.phone)
browser.submit(login_page.form_phone)
else:
raise Exception('TweetDeck login prompted for phone number, but none was provided')
else:
logger.info('no phone number verification needed')
logger.info('opening left pane')
browser.find(app_page.left_pane)
if browser.is_visible(app_page.left_pane_open_button):
browser.click(app_page.left_pane_open_button)
browser.click(app_page.left_pane_remember_state_checkbox)
else:
logger.info('left pane is already open')
account = f'@{self.username}'
logger.info(f'selecting account: {account}')
if browser.count(app_page.account_buttons) > 1:
browser.click(app_page.account_button_selected)
for title in browser.title_all(app_page.account_buttons):
if title.lower() == account:
browser.click(app_page.account_button.format(title=title))
except:
browser.quit(self.screenshot_file)
raise
def tweet(self, dt, text):
browser = self.browser
try:
logger.info('filling tweet text')
browser.fill(app_page.textarea, text)
logger.info('checking tweet length')
if browser.is_visible(app_page.textarea_char_count):
char_count = browser.text(app_page.textarea_char_count)
count = int(char_count or 0)
if count < 0:
raise Exception(f'The tweet is too long: {count}')
else:
logger.info('tweet length is OK')
else:
logger.info('tweet length is OK')
logger.info('opening calendar widget')
browser.click(app_page.calendar_open_button)
browser.scroll_to(app_page.calendar_bottom)
hour = str(dt.hour)
minute = str(dt.minute)
am_pm = datetime.strftime(dt, '%p')
logger.info(f'setting time to {hour}:{minute}{am_pm}')
browser.fill(app_page.calendar_hour_input, hour)
browser.fill(app_page.calendar_minute_input, minute)
if browser.text(app_page.calendar_am_pm_button).upper() != am_pm.upper():
browser.click(app_page.calendar_am_pm_button)
expected_calendar_title = datetime.strftime(dt, '%B %Y')
logger.info(f'setting month to {expected_calendar_title}')
while True:
calendar_title = browser.text(app_page.calendar_title)
calendar_title_dt = datetime.strptime(calendar_title, '%B %Y')
if calendar_title != expected_calendar_title:
logger.debug(f'clicking on next month: {calendar_title} (current) ≠ {expected_calendar_title} (expected)')
browser.click(app_page.calendar_next_month_button)
else:
logger.debug(f'keeping current month: {calendar_title} (current) = {expected_calendar_title} (expected)')
break
logger.info(f'setting day to {dt.day}')
browser.click(app_page.calendar_day.format(day=dt.day))
logger.info(f'verifying date & time')
schedule_button_label = browser.text(app_page.calendar_open_button_label)
hour_not_zero_padded = datetime.strftime(dt, '%I').lstrip('0')
day_not_zero_padded = datetime.strftime(dt, '%d').lstrip('0')
# example: '4:01 PM · Mon 4 Dec 2017'
expected_schedule_button_label = datetime.strftime(
dt,
f'{hour_not_zero_padded}:%M %p · %a {day_not_zero_padded} %b %Y'
)
if schedule_button_label != expected_schedule_button_label:
raise Exception(f"TweetDeck UI displays '{schedule_button_label}' as the effective date & time, but '{expected_calendar_title}' is expected")
else:
logger.debug(f"correct, {schedule_button_label} = {expected_calendar_title}")
logger.info(f'submitting tweet')
browser.click(app_page.textarea)
browser.click(app_page.submit_button)
browser.click(app_page.textarea)
except:
browser.quit(self.screenshot_file)
raise
def close(self):
self.browser.quit()
def __enter__(self):
self.open()
return self
def __exit__(self, exc, *args):
self.close()
session = Session
| 0.367043 | 0.064949 |
Schedule Tweet
==============
Automated tweet scheduling using TweetDeck. Uses `Selenium <http://docs.seleniumhq.org/>`_ to spawn a browser, log in to TweetDeck with your credentials, and schedule a tweet on your behalf by clicking on things in the app.
Installation
------------
Install `geckodriver <https://github.com/mozilla/geckodriver>`_. On macOS with `Homebrew <http://homebrew.sh/>`_ you can use ``brew install geckodriver``. Then...
..code:: shell
$ pip install schedule_tweet
Only **Python 3.6** and higher is supported.
Usage
-----
..code:: python
>>> import os
>>> from datetime import datetime, timedelta
>>>
>>> import schedule_tweet
>>>
>>> username = 'schedule_tw'
>>> password = os.getenv('PASSWORD')
>>> now_dt = datetime.now()
>>>
>>> with schedule_tweet.session(username, password) as session:
... dt = now_dt + timedelta(minutes=2)
... session.tweet(dt, f'First Tweet 🚀 {dt.isoformat()}')
...
... dt = now_dt + timedelta(minutes=3)
... session.tweet(dt, f'Second Tweet 💥 {dt.isoformat()}')
Tests
-----
Obviously, TweetDeck authors can change anything anytime, which may or may not break this tool. That's why it is tested by a `regular nightly Travis CI build <https://travis-ci.org/honzajavorek/schedule-tweet>`_. If it's able to schedule a tweet in the sample `@schedule_tw <https://twitter.com/schedule_tw>_` account, it assumes the tool still works and the build will pass. If the build badge is red, it means the tool doesn't work anymore and it needs to be updated.
.. image:: https://travis-ci.org/honzajavorek/schedule-tweet.svg?branch=master
:target: https://travis-ci.org/honzajavorek/schedule-tweet
Why
---
Twitter doesn't provide scheduling tweets in their API. It is provided only as a feature of `TweetDeck <http://tweetdeck.twitter.com/>`_, their advanced official client. Unlike other clients which support scheduling tweets, TweetDeck is free of charge, it is an official tool made by Twitter themselves, and it supports `teams <https://blog.twitter.com/official/en_us/a/2015/introducing-tweetdeck-teams.html>`_.
The ``schedule-tweet`` tool was originally built to save precious volunteering time of the `PyCon CZ 2017 <https://cz.pycon.org/2017/>`_ social media team.
|
schedule-tweet
|
/schedule_tweet-1.0.0-py3-none-any.whl/schedule_tweet-1.0.0.dist-info/DESCRIPTION.rst
|
DESCRIPTION.rst
|
Schedule Tweet
==============
Automated tweet scheduling using TweetDeck. Uses `Selenium <http://docs.seleniumhq.org/>`_ to spawn a browser, log in to TweetDeck with your credentials, and schedule a tweet on your behalf by clicking on things in the app.
Installation
------------
Install `geckodriver <https://github.com/mozilla/geckodriver>`_. On macOS with `Homebrew <http://homebrew.sh/>`_ you can use ``brew install geckodriver``. Then...
..code:: shell
$ pip install schedule_tweet
Only **Python 3.6** and higher is supported.
Usage
-----
..code:: python
>>> import os
>>> from datetime import datetime, timedelta
>>>
>>> import schedule_tweet
>>>
>>> username = 'schedule_tw'
>>> password = os.getenv('PASSWORD')
>>> now_dt = datetime.now()
>>>
>>> with schedule_tweet.session(username, password) as session:
... dt = now_dt + timedelta(minutes=2)
... session.tweet(dt, f'First Tweet 🚀 {dt.isoformat()}')
...
... dt = now_dt + timedelta(minutes=3)
... session.tweet(dt, f'Second Tweet 💥 {dt.isoformat()}')
Tests
-----
Obviously, TweetDeck authors can change anything anytime, which may or may not break this tool. That's why it is tested by a `regular nightly Travis CI build <https://travis-ci.org/honzajavorek/schedule-tweet>`_. If it's able to schedule a tweet in the sample `@schedule_tw <https://twitter.com/schedule_tw>_` account, it assumes the tool still works and the build will pass. If the build badge is red, it means the tool doesn't work anymore and it needs to be updated.
.. image:: https://travis-ci.org/honzajavorek/schedule-tweet.svg?branch=master
:target: https://travis-ci.org/honzajavorek/schedule-tweet
Why
---
Twitter doesn't provide scheduling tweets in their API. It is provided only as a feature of `TweetDeck <http://tweetdeck.twitter.com/>`_, their advanced official client. Unlike other clients which support scheduling tweets, TweetDeck is free of charge, it is an official tool made by Twitter themselves, and it supports `teams <https://blog.twitter.com/official/en_us/a/2015/introducing-tweetdeck-teams.html>`_.
The ``schedule-tweet`` tool was originally built to save precious volunteering time of the `PyCon CZ 2017 <https://cz.pycon.org/2017/>`_ social media team.
| 0.752286 | 0.370795 |
.. :changelog:
History
-------
1.2.0 (2023-04-10)
++++++++++++++++++
- Dropped support for Python 3.6, add support for Python 3.10 and 3.11.
- Add timezone support for .at(). See #517. Thanks @chrimaho!
- Get next run by tag (#463) Thanks @jweijers!
- Add py.typed file. See #521. Thanks @Akuli!
- Fix the re pattern of the 'days'. See #506 Thanks @sunpro108!
- Fix test_until_time failure when run early. See #563. Thanks @emollier!
- Fix crash repr on partially constructed job. See #569. Thanks @CPickens42!
- Code cleanup and modernization. See #567, #536. Thanks @masa-08 and @SergBobrovsky!
- Documentation improvements and fix typos. See #469, #479, #493, #519, #520. Thanks to @NaelsonDouglas, @chrimaho, @rudSarkar
1.1.0 (2021-04-09)
++++++++++++++++++
- Added @repeat() decorator. See #148. Thanks @rhagenaars!
- Added execute .until(). See #195. Thanks @fredthomsen!
- Added job retrieval filtered by tags using get_jobs('tag'). See #419. Thanks @skenvy!
- Added type annotations. See #427. Thanks @martinthoma!
- Bugfix: str() of job when there is no __name__. See #430. Thanks @biggerfisch!
- Improved error messages. See #280, #439. Thanks @connorskees and @sosolidkk!
- Improved logging. See #193. Thanks @zcking!
- Documentation improvements and fix typos. See #424, #435, #436, #453, #437, #448. Thanks @ebllg!
1.0.0 (2021-01-20)
++++++++++++++++++
Depending on your configuration, the following bugfixes might change schedule's behaviour:
- Fix: idle_seconds crashes when no jobs are scheduled. See #401. Thanks @yoonghm!
- Fix: day.at('HH:MM:SS') where HMS=now+10s doesn't run today. See #331. Thanks @qmorek!
- Fix: hour.at('MM:SS'), the seconds are set to 00. See #290. Thanks @eladbi!
- Fix: Long-running jobs skip a day when they finish in the next day #404. Thanks @4379711!
Other changes:
- Dropped Python 2.7 and 3.5 support, added 3.8 and 3.9 support. See #409
- Fix RecursionError when the job is passed to the do function as an arg. See #190. Thanks @connorskees!
- Fix DeprecationWarning of 'collections'. See #296. Thanks @gaguirregabiria!
- Replaced Travis with Github Actions for automated testing
- Revamp and extend documentation. See #395
- Improved tests. Thanks @connorskees and @Jamim!
- Changed log messages to DEBUG level. Thanks @aisk!
0.6.0 (2019-01-20)
++++++++++++++++++
- Make at() accept timestamps with 1 second precision (#267). Thanks @NathanWailes!
- Introduce proper exception hierarchy (#271). Thanks @ConnorSkees!
0.5.0 (2017-11-16)
++++++++++++++++++
- Keep partially scheduled jobs from breaking the scheduler (#125)
- Add support for random intervals (Thanks @grampajoe and @gilbsgilbs)
0.4.3 (2017-06-10)
++++++++++++++++++
- Improve docs & clean up docstrings
0.4.2 (2016-11-29)
++++++++++++++++++
- Publish to PyPI as a universal (py2/py3) wheel
0.4.0 (2016-11-28)
++++++++++++++++++
- Add proper HTML (Sphinx) docs available at https://schedule.readthedocs.io/
- CI builds now run against Python 2.7 and 3.5 (3.3 and 3.4 should work fine but are untested)
- Fixed an issue with ``run_all()`` and having more than one job that deletes itself in the same iteration. Thanks @alaingilbert.
- Add ability to tag jobs and to cancel jobs by tag. Thanks @Zerrossetto.
- Improve schedule docs. Thanks @Zerrossetto.
- Additional docs fixes by @fkromer and @yetingsky.
0.3.2 (2015-07-02)
++++++++++++++++++
- Fixed issues where scheduling a job with a functools.partial as the job function fails. Thanks @dylwhich.
- Fixed an issue where scheduling a job to run every >= 2 days would cause the initial execution to happen one day early. Thanks @WoLfulus for identifying this and providing a fix.
- Added a FAQ item to describe how to schedule a job that runs only once.
0.3.1 (2014-09-03)
++++++++++++++++++
- Fixed an issue with unicode handling in setup.py that was causing trouble on Python 3 and Debian (https://github.com/dbader/schedule/issues/27). Thanks to @waghanza for reporting it.
- Added an FAQ item to describe how to deal with job functions that throw exceptions. Thanks @mplewis.
0.3.0 (2014-06-14)
++++++++++++++++++
- Added support for scheduling jobs on specific weekdays. Example: ``schedule.every().tuesday.do(job)`` or ``schedule.every().wednesday.at("13:15").do(job)`` (Thanks @abultman.)
- Run tests against Python 2.7 and 3.4. Python 3.3 should continue to work but we're not actively testing it on CI anymore.
0.2.1 (2013-11-20)
++++++++++++++++++
- Fixed history (no code changes).
0.2.0 (2013-11-09)
++++++++++++++++++
- This release introduces two new features in a backwards compatible way:
- Allow jobs to cancel repeated execution: Jobs can be cancelled by calling ``schedule.cancel_job()`` or by returning ``schedule.CancelJob`` from the job function. (Thanks to @cfrco and @matrixise.)
- Updated ``at_time()`` to allow running jobs at a particular time every hour. Example: ``every().hour.at(':15').do(job)`` will run ``job`` 15 minutes after every full hour. (Thanks @mattss.)
- Refactored unit tests to mock ``datetime`` in a cleaner way. (Thanks @matts.)
0.1.11 (2013-07-30)
+++++++++++++++++++
- Fixed an issue with ``next_run()`` throwing a ``ValueError`` exception when the job queue is empty. Thanks to @dpagano for pointing this out and thanks to @mrhwick for quickly providing a fix.
0.1.10 (2013-06-07)
+++++++++++++++++++
- Fixed issue with ``at_time`` jobs not running on the same day the job is created (Thanks to @mattss)
0.1.9 (2013-05-27)
++++++++++++++++++
- Added ``schedule.next_run()``
- Added ``schedule.idle_seconds()``
- Args passed into ``do()`` are forwarded to the job function at call time
- Increased test coverage to 100%
0.1.8 (2013-05-21)
++++++++++++++++++
- Changed default ``delay_seconds`` for ``schedule.run_all()`` to 0 (from 60)
- Increased test coverage
0.1.7 (2013-05-20)
++++++++++++++++++
- API change: renamed ``schedule.run_all_jobs()`` to ``schedule.run_all()``
- API change: renamed ``schedule.run_pending_jobs()`` to ``schedule.run_pending()``
- API change: renamed ``schedule.clear_all_jobs()`` to ``schedule.clear()``
- Added ``schedule.jobs``
0.1.6 (2013-05-20)
++++++++++++++++++
- Fix packaging
- README fixes
0.1.4 (2013-05-20)
++++++++++++++++++
- API change: renamed ``schedule.tick()`` to ``schedule.run_pending_jobs()``
- Updated README and ``setup.py`` packaging
0.1.0 (2013-05-19)
++++++++++++++++++
- Initial release
|
schedule
|
/schedule-1.2.0.tar.gz/schedule-1.2.0/HISTORY.rst
|
HISTORY.rst
|
.. :changelog:
History
-------
1.2.0 (2023-04-10)
++++++++++++++++++
- Dropped support for Python 3.6, add support for Python 3.10 and 3.11.
- Add timezone support for .at(). See #517. Thanks @chrimaho!
- Get next run by tag (#463) Thanks @jweijers!
- Add py.typed file. See #521. Thanks @Akuli!
- Fix the re pattern of the 'days'. See #506 Thanks @sunpro108!
- Fix test_until_time failure when run early. See #563. Thanks @emollier!
- Fix crash repr on partially constructed job. See #569. Thanks @CPickens42!
- Code cleanup and modernization. See #567, #536. Thanks @masa-08 and @SergBobrovsky!
- Documentation improvements and fix typos. See #469, #479, #493, #519, #520. Thanks to @NaelsonDouglas, @chrimaho, @rudSarkar
1.1.0 (2021-04-09)
++++++++++++++++++
- Added @repeat() decorator. See #148. Thanks @rhagenaars!
- Added execute .until(). See #195. Thanks @fredthomsen!
- Added job retrieval filtered by tags using get_jobs('tag'). See #419. Thanks @skenvy!
- Added type annotations. See #427. Thanks @martinthoma!
- Bugfix: str() of job when there is no __name__. See #430. Thanks @biggerfisch!
- Improved error messages. See #280, #439. Thanks @connorskees and @sosolidkk!
- Improved logging. See #193. Thanks @zcking!
- Documentation improvements and fix typos. See #424, #435, #436, #453, #437, #448. Thanks @ebllg!
1.0.0 (2021-01-20)
++++++++++++++++++
Depending on your configuration, the following bugfixes might change schedule's behaviour:
- Fix: idle_seconds crashes when no jobs are scheduled. See #401. Thanks @yoonghm!
- Fix: day.at('HH:MM:SS') where HMS=now+10s doesn't run today. See #331. Thanks @qmorek!
- Fix: hour.at('MM:SS'), the seconds are set to 00. See #290. Thanks @eladbi!
- Fix: Long-running jobs skip a day when they finish in the next day #404. Thanks @4379711!
Other changes:
- Dropped Python 2.7 and 3.5 support, added 3.8 and 3.9 support. See #409
- Fix RecursionError when the job is passed to the do function as an arg. See #190. Thanks @connorskees!
- Fix DeprecationWarning of 'collections'. See #296. Thanks @gaguirregabiria!
- Replaced Travis with Github Actions for automated testing
- Revamp and extend documentation. See #395
- Improved tests. Thanks @connorskees and @Jamim!
- Changed log messages to DEBUG level. Thanks @aisk!
0.6.0 (2019-01-20)
++++++++++++++++++
- Make at() accept timestamps with 1 second precision (#267). Thanks @NathanWailes!
- Introduce proper exception hierarchy (#271). Thanks @ConnorSkees!
0.5.0 (2017-11-16)
++++++++++++++++++
- Keep partially scheduled jobs from breaking the scheduler (#125)
- Add support for random intervals (Thanks @grampajoe and @gilbsgilbs)
0.4.3 (2017-06-10)
++++++++++++++++++
- Improve docs & clean up docstrings
0.4.2 (2016-11-29)
++++++++++++++++++
- Publish to PyPI as a universal (py2/py3) wheel
0.4.0 (2016-11-28)
++++++++++++++++++
- Add proper HTML (Sphinx) docs available at https://schedule.readthedocs.io/
- CI builds now run against Python 2.7 and 3.5 (3.3 and 3.4 should work fine but are untested)
- Fixed an issue with ``run_all()`` and having more than one job that deletes itself in the same iteration. Thanks @alaingilbert.
- Add ability to tag jobs and to cancel jobs by tag. Thanks @Zerrossetto.
- Improve schedule docs. Thanks @Zerrossetto.
- Additional docs fixes by @fkromer and @yetingsky.
0.3.2 (2015-07-02)
++++++++++++++++++
- Fixed issues where scheduling a job with a functools.partial as the job function fails. Thanks @dylwhich.
- Fixed an issue where scheduling a job to run every >= 2 days would cause the initial execution to happen one day early. Thanks @WoLfulus for identifying this and providing a fix.
- Added a FAQ item to describe how to schedule a job that runs only once.
0.3.1 (2014-09-03)
++++++++++++++++++
- Fixed an issue with unicode handling in setup.py that was causing trouble on Python 3 and Debian (https://github.com/dbader/schedule/issues/27). Thanks to @waghanza for reporting it.
- Added an FAQ item to describe how to deal with job functions that throw exceptions. Thanks @mplewis.
0.3.0 (2014-06-14)
++++++++++++++++++
- Added support for scheduling jobs on specific weekdays. Example: ``schedule.every().tuesday.do(job)`` or ``schedule.every().wednesday.at("13:15").do(job)`` (Thanks @abultman.)
- Run tests against Python 2.7 and 3.4. Python 3.3 should continue to work but we're not actively testing it on CI anymore.
0.2.1 (2013-11-20)
++++++++++++++++++
- Fixed history (no code changes).
0.2.0 (2013-11-09)
++++++++++++++++++
- This release introduces two new features in a backwards compatible way:
- Allow jobs to cancel repeated execution: Jobs can be cancelled by calling ``schedule.cancel_job()`` or by returning ``schedule.CancelJob`` from the job function. (Thanks to @cfrco and @matrixise.)
- Updated ``at_time()`` to allow running jobs at a particular time every hour. Example: ``every().hour.at(':15').do(job)`` will run ``job`` 15 minutes after every full hour. (Thanks @mattss.)
- Refactored unit tests to mock ``datetime`` in a cleaner way. (Thanks @matts.)
0.1.11 (2013-07-30)
+++++++++++++++++++
- Fixed an issue with ``next_run()`` throwing a ``ValueError`` exception when the job queue is empty. Thanks to @dpagano for pointing this out and thanks to @mrhwick for quickly providing a fix.
0.1.10 (2013-06-07)
+++++++++++++++++++
- Fixed issue with ``at_time`` jobs not running on the same day the job is created (Thanks to @mattss)
0.1.9 (2013-05-27)
++++++++++++++++++
- Added ``schedule.next_run()``
- Added ``schedule.idle_seconds()``
- Args passed into ``do()`` are forwarded to the job function at call time
- Increased test coverage to 100%
0.1.8 (2013-05-21)
++++++++++++++++++
- Changed default ``delay_seconds`` for ``schedule.run_all()`` to 0 (from 60)
- Increased test coverage
0.1.7 (2013-05-20)
++++++++++++++++++
- API change: renamed ``schedule.run_all_jobs()`` to ``schedule.run_all()``
- API change: renamed ``schedule.run_pending_jobs()`` to ``schedule.run_pending()``
- API change: renamed ``schedule.clear_all_jobs()`` to ``schedule.clear()``
- Added ``schedule.jobs``
0.1.6 (2013-05-20)
++++++++++++++++++
- Fix packaging
- README fixes
0.1.4 (2013-05-20)
++++++++++++++++++
- API change: renamed ``schedule.tick()`` to ``schedule.run_pending_jobs()``
- Updated README and ``setup.py`` packaging
0.1.0 (2013-05-19)
++++++++++++++++++
- Initial release
| 0.669961 | 0.334617 |
`schedule <https://schedule.readthedocs.io/>`__
===============================================
.. image:: https://github.com/dbader/schedule/workflows/Tests/badge.svg
:target: https://github.com/dbader/schedule/actions?query=workflow%3ATests+branch%3Amaster
.. image:: https://coveralls.io/repos/dbader/schedule/badge.svg?branch=master
:target: https://coveralls.io/r/dbader/schedule
.. image:: https://img.shields.io/pypi/v/schedule.svg
:target: https://pypi.python.org/pypi/schedule
Python job scheduling for humans. Run Python functions (or any other callable) periodically using a friendly syntax.
- A simple to use API for scheduling jobs, made for humans.
- In-process scheduler for periodic jobs. No extra processes needed!
- Very lightweight and no external dependencies.
- Excellent test coverage.
- Tested on Python and 3.7, 3.8, 3.9, 3.10, 3.11
Usage
-----
.. code-block:: bash
$ pip install schedule
.. code-block:: python
import schedule
import time
def job():
print("I'm working...")
schedule.every(10).seconds.do(job)
schedule.every(10).minutes.do(job)
schedule.every().hour.do(job)
schedule.every().day.at("10:30").do(job)
schedule.every(5).to(10).minutes.do(job)
schedule.every().monday.do(job)
schedule.every().wednesday.at("13:15").do(job)
schedule.every().day.at("12:42", "Europe/Amsterdam").do(job)
schedule.every().minute.at(":17").do(job)
def job_with_argument(name):
print(f"I am {name}")
schedule.every(10).seconds.do(job_with_argument, name="Peter")
while True:
schedule.run_pending()
time.sleep(1)
Documentation
-------------
Schedule's documentation lives at `schedule.readthedocs.io <https://schedule.readthedocs.io/>`_.
Meta
----
Daniel Bader - `@dbader_org <https://twitter.com/dbader_org>`_ - [email protected]
Inspired by `Adam Wiggins' <https://github.com/adamwiggins>`_ article `"Rethinking Cron" <https://adam.herokuapp.com/past/2010/4/13/rethinking_cron/>`_ and the `clockwork <https://github.com/Rykian/clockwork>`_ Ruby module.
Distributed under the MIT license. See `LICENSE.txt <https://github.com/dbader/schedule/blob/master/LICENSE.txt>`_ for more information.
https://github.com/dbader/schedule
|
schedule
|
/schedule-1.2.0.tar.gz/schedule-1.2.0/README.rst
|
README.rst
|
`schedule <https://schedule.readthedocs.io/>`__
===============================================
.. image:: https://github.com/dbader/schedule/workflows/Tests/badge.svg
:target: https://github.com/dbader/schedule/actions?query=workflow%3ATests+branch%3Amaster
.. image:: https://coveralls.io/repos/dbader/schedule/badge.svg?branch=master
:target: https://coveralls.io/r/dbader/schedule
.. image:: https://img.shields.io/pypi/v/schedule.svg
:target: https://pypi.python.org/pypi/schedule
Python job scheduling for humans. Run Python functions (or any other callable) periodically using a friendly syntax.
- A simple to use API for scheduling jobs, made for humans.
- In-process scheduler for periodic jobs. No extra processes needed!
- Very lightweight and no external dependencies.
- Excellent test coverage.
- Tested on Python and 3.7, 3.8, 3.9, 3.10, 3.11
Usage
-----
.. code-block:: bash
$ pip install schedule
.. code-block:: python
import schedule
import time
def job():
print("I'm working...")
schedule.every(10).seconds.do(job)
schedule.every(10).minutes.do(job)
schedule.every().hour.do(job)
schedule.every().day.at("10:30").do(job)
schedule.every(5).to(10).minutes.do(job)
schedule.every().monday.do(job)
schedule.every().wednesday.at("13:15").do(job)
schedule.every().day.at("12:42", "Europe/Amsterdam").do(job)
schedule.every().minute.at(":17").do(job)
def job_with_argument(name):
print(f"I am {name}")
schedule.every(10).seconds.do(job_with_argument, name="Peter")
while True:
schedule.run_pending()
time.sleep(1)
Documentation
-------------
Schedule's documentation lives at `schedule.readthedocs.io <https://schedule.readthedocs.io/>`_.
Meta
----
Daniel Bader - `@dbader_org <https://twitter.com/dbader_org>`_ - [email protected]
Inspired by `Adam Wiggins' <https://github.com/adamwiggins>`_ article `"Rethinking Cron" <https://adam.herokuapp.com/past/2010/4/13/rethinking_cron/>`_ and the `clockwork <https://github.com/Rykian/clockwork>`_ Ruby module.
Distributed under the MIT license. See `LICENSE.txt <https://github.com/dbader/schedule/blob/master/LICENSE.txt>`_ for more information.
https://github.com/dbader/schedule
| 0.896679 | 0.511839 |
Thanks to all the wonderful folks who have contributed to schedule over the years:
- mattss <https://github.com/mattss>
- mrhwick <https://github.com/mrhwick>
- cfrco <https://github.com/cfrco>
- matrixise <https://github.com/matrixise>
- abultman <https://github.com/abultman>
- mplewis <https://github.com/mplewis>
- WoLfulus <https://github.com/WoLfulus>
- dylwhich <https://github.com/dylwhich>
- fkromer <https://github.com/fkromer>
- alaingilbert <https://github.com/alaingilbert>
- Zerrossetto <https://github.com/Zerrossetto>
- yetingsky <https://github.com/yetingsky>
- schnepp <https://github.com/schnepp> <https://bitbucket.org/saschaschnepp>
- grampajoe <https://github.com/grampajoe>
- gilbsgilbs <https://github.com/gilbsgilbs>
- Nathan Wailes <https://github.com/NathanWailes>
- Connor Skees <https://github.com/ConnorSkees>
- qmorek <https://github.com/qmorek>
- aisk <https://github.com/aisk>
- MichaelCorleoneLi <https://github.com/MichaelCorleoneLi>
- sijmenhuizenga <https://github.com/SijmenHuizenga>
- eladbi <https://github.com/eladbi>
- chankeypathak <https://github.com/chankeypathak>
- vubon <https://github.com/vubon>
- gaguirregabiria <https://github.com/gaguirregabiria>
- rhagenaars <https://github.com/RHagenaars>
- Skenvy <https://github.com/skenvy>
- zcking <https://github.com/zcking>
- Martin Thoma <https://github.com/MartinThoma>
- ebllg <https://github.com/ebllg>
- fredthomsen <https://github.com/fredthomsen>
- biggerfisch <https://github.com/biggerfisch>
- sosolidkk <https://github.com/sosolidkk>
- rudSarkar <https://github.com/rudSarkar>
- chrimaho <https://github.com/chrimaho>
- jweijers <https://github.com/jweijers>
- Akuli <https://github.com/Akuli>
- NaelsonDouglas <https://github.com/NaelsonDouglas>
- SergBobrovsky <https://github.com/SergBobrovsky>
- CPickens42 <https://github.com/CPickens42>
- emollier <https://github.com/emollier>
- sunpro108 <https://github.com/sunpro108>
|
schedule
|
/schedule-1.2.0.tar.gz/schedule-1.2.0/AUTHORS.rst
|
AUTHORS.rst
|
Thanks to all the wonderful folks who have contributed to schedule over the years:
- mattss <https://github.com/mattss>
- mrhwick <https://github.com/mrhwick>
- cfrco <https://github.com/cfrco>
- matrixise <https://github.com/matrixise>
- abultman <https://github.com/abultman>
- mplewis <https://github.com/mplewis>
- WoLfulus <https://github.com/WoLfulus>
- dylwhich <https://github.com/dylwhich>
- fkromer <https://github.com/fkromer>
- alaingilbert <https://github.com/alaingilbert>
- Zerrossetto <https://github.com/Zerrossetto>
- yetingsky <https://github.com/yetingsky>
- schnepp <https://github.com/schnepp> <https://bitbucket.org/saschaschnepp>
- grampajoe <https://github.com/grampajoe>
- gilbsgilbs <https://github.com/gilbsgilbs>
- Nathan Wailes <https://github.com/NathanWailes>
- Connor Skees <https://github.com/ConnorSkees>
- qmorek <https://github.com/qmorek>
- aisk <https://github.com/aisk>
- MichaelCorleoneLi <https://github.com/MichaelCorleoneLi>
- sijmenhuizenga <https://github.com/SijmenHuizenga>
- eladbi <https://github.com/eladbi>
- chankeypathak <https://github.com/chankeypathak>
- vubon <https://github.com/vubon>
- gaguirregabiria <https://github.com/gaguirregabiria>
- rhagenaars <https://github.com/RHagenaars>
- Skenvy <https://github.com/skenvy>
- zcking <https://github.com/zcking>
- Martin Thoma <https://github.com/MartinThoma>
- ebllg <https://github.com/ebllg>
- fredthomsen <https://github.com/fredthomsen>
- biggerfisch <https://github.com/biggerfisch>
- sosolidkk <https://github.com/sosolidkk>
- rudSarkar <https://github.com/rudSarkar>
- chrimaho <https://github.com/chrimaho>
- jweijers <https://github.com/jweijers>
- Akuli <https://github.com/Akuli>
- NaelsonDouglas <https://github.com/NaelsonDouglas>
- SergBobrovsky <https://github.com/SergBobrovsky>
- CPickens42 <https://github.com/CPickens42>
- emollier <https://github.com/emollier>
- sunpro108 <https://github.com/sunpro108>
| 0.46393 | 0.844537 |
# Scheduled Futures
concurrent.futures extension with a ScheduledThreadPoolExecutor to handle delayed and periodic futures with rescheduling
## Preamble
[concurrent.futures](https://docs.python.org/3/library/concurrent.futures.html) is pretty awesome, but it does not handle periodic work which comes up a fair amount when creating applications.
[apscheduler](https://github.com/agronholm/apscheduler) is great, but trying to wait for jobs to complete involves a racy mess of callbacks.
[asyncio](https://docs.python.org/3/library/asyncio.html) is also good and can simply handle periodic work with a `asyncio.sleep` call, but this is more trouble than it is worth when most of your codebase uses system calls / dlls / synchronous libraries you dont want to rewrite or coat with `asyncio.to_thread` calls.
This package was created to solve this problem. Please see [Features](#features) below.
Documentation is just what there is in this README and the code itself.
## Table of Contents
- [Scheduled Futures](#scheduled-futures)
- [Preamble](#preamble)
- [Table of Contents](#table-of-contents)
- [Inspiration](#inspiration)
- [Features](#features)
- [Technologies](#technologies)
- [Install](#install)
- [Development](#development)
- [Example](#example)
- [Code](#code)
- [Output](#output)
## Inspiration
This package was inspired by [this feature request](https://github.com/python/cpython/issues/62156) and some of the code there.
## Features
- Schedule futures at a fixed delay using a thread pool executor
- Rescheduling of `ScheduledFuture` instances after they have started
- `ScheduledThreadPoolExecutor` is a subclass of `ThreadPoolExecutor`, so it can be used anywhere `ThreadPoolExecutor` can, such as for the asyncio default executor
- Logged warnings when a `ScheduledFuture's` runtime exceeded its period, indicating it may be scheduled to run too quickly
- Logged warnings when a `ScheduledFuture` is run later scheduled, indicating you may need more workers to keep up
- Statistics on the number of executions / exceptions / total runtime / average runtime for each `ScheduledFuture`
## Technologies
- Python >= 3.5
## Install
```python
pip install scheduled_futures
```
## Development
Pull requests and new feature requests are welcome.
## Example
### Code
```python
import time
from concurrent.futures import wait, CancelledError
import logging
from scheduled_futures import ScheduledThreadPoolExecutor
logging.basicConfig() # to see warning messages from lib
# which are pumped into a null handler by default
def work():
print(time.time(), 'work!')
def slow_work():
time.sleep(0.25)
print(time.time(), 'slow work...huuh')
with ScheduledThreadPoolExecutor() as stpool:
stpool.submit(work) # same interface as regular ThreadPoolExecutor still works
# simple demo of scheduling a callable a little later
print('\nDelayed future example')
print(time.time(), 'work scheduled')
future = stpool.schedule(work, start=time.time() + 0.5)
time.sleep(1)
# simple demo of scheduling a callable periodically
print('\nPeriodic future example')
future = stpool.schedule(work, period=0.25)
time.sleep(1)
future.cancel()
wait([future])
# show log warning for each execution because the future
# ran for longer than the period between runs
print('\nPeriod too short for long-running future example')
future = stpool.schedule(slow_work, period=0.20)
time.sleep(1)
future.cancel()
wait([future])
with ScheduledThreadPoolExecutor() as stpool:
print('\nRescheduling work example')
future = stpool.schedule(work, period=0.25)
time.sleep(0.55)
print('Rescheduling')
stpool.reschedule(future, period=1)
time.sleep(2)
def slow_work2():
time.sleep(0.15)
print(time.time(), 'slow work2...huuuuuuh')
with ScheduledThreadPoolExecutor(late_run_limit=0.1, max_workers=1) as stpool:
# show log warning before some executions because there are not enough workers
# to keep up with the execution schedule
print('\nNot enough workers example')
futures = []
for _ in range(3):
futures.append(stpool.schedule(slow_work2, period=0.20))
time.sleep(1)
list(map(lambda f: f.cancel(), futures))
wait(futures)
i = 0
def cancelled_work():
global i
i += 1
if i >= 3:
raise CancelledError('Stop working, now!')
print(time.time(), 'ran without cancellation')
with ScheduledThreadPoolExecutor() as stpool:
# cancel a periodic from inside a periodic
print('\nCancel from inside callable example')
future = stpool.schedule(cancelled_work, period=0.25)
try:
future.result()
except CancelledError:
print('work cancelled!')
```
### Output
```text
1654566752.6947718 work!
Delayed future example
1654566752.6957695 work scheduled
1654566753.2106197 work!
Periodic future example
1654566753.700693 work!
1654566753.9519553 work!
1654566754.2048206 work!
1654566754.4566417 work!
1654566754.7094283 work!
Period too short for long-running future example
1654566754.963385 slow work...huuh
WARNING:scheduled_futures:Periodic scheduled future runtime exceeded period.
1654566755.4197686 slow work...huuh
WARNING:scheduled_futures:Periodic scheduled future runtime exceeded period.
1654566755.8865035 slow work...huuh
WARNING:scheduled_futures:Periodic scheduled future runtime exceeded period.
Rescheduling work example
1654566755.8884244 work!
1654566756.1499033 work!
1654566756.4140623 work!
Rescheduling
1654566756.4449573 work!
1654566757.454106 work!
1654566758.462207 work!
Not enough workers example
1654566758.6189985 slow work2...huuuuuuh
WARNING:scheduled_futures:Late to run scheduled future.
1654566758.7743778 slow work2...huuuuuuh
WARNING:scheduled_futures:Late to run scheduled future.
1654566758.932357 slow work2...huuuuuuh
WARNING:scheduled_futures:Late to run scheduled future.
1654566759.0854318 slow work2...huuuuuuh
WARNING:scheduled_futures:Late to run scheduled future.
1654566759.2414527 slow work2...huuuuuuh
WARNING:scheduled_futures:Late to run scheduled future.
1654566759.3976445 slow work2...huuuuuuh
WARNING:scheduled_futures:Late to run scheduled future.
1654566759.5528805 slow work2...huuuuuuh
Cancel from inside callable example
1654566759.554882 ran without cancellation
1654566759.8188088 ran without cancellation
work cancelled!
```
|
scheduled-futures
|
/scheduled_futures-1.1.0.tar.gz/scheduled_futures-1.1.0/README.md
|
README.md
|
pip install scheduled_futures
import time
from concurrent.futures import wait, CancelledError
import logging
from scheduled_futures import ScheduledThreadPoolExecutor
logging.basicConfig() # to see warning messages from lib
# which are pumped into a null handler by default
def work():
print(time.time(), 'work!')
def slow_work():
time.sleep(0.25)
print(time.time(), 'slow work...huuh')
with ScheduledThreadPoolExecutor() as stpool:
stpool.submit(work) # same interface as regular ThreadPoolExecutor still works
# simple demo of scheduling a callable a little later
print('\nDelayed future example')
print(time.time(), 'work scheduled')
future = stpool.schedule(work, start=time.time() + 0.5)
time.sleep(1)
# simple demo of scheduling a callable periodically
print('\nPeriodic future example')
future = stpool.schedule(work, period=0.25)
time.sleep(1)
future.cancel()
wait([future])
# show log warning for each execution because the future
# ran for longer than the period between runs
print('\nPeriod too short for long-running future example')
future = stpool.schedule(slow_work, period=0.20)
time.sleep(1)
future.cancel()
wait([future])
with ScheduledThreadPoolExecutor() as stpool:
print('\nRescheduling work example')
future = stpool.schedule(work, period=0.25)
time.sleep(0.55)
print('Rescheduling')
stpool.reschedule(future, period=1)
time.sleep(2)
def slow_work2():
time.sleep(0.15)
print(time.time(), 'slow work2...huuuuuuh')
with ScheduledThreadPoolExecutor(late_run_limit=0.1, max_workers=1) as stpool:
# show log warning before some executions because there are not enough workers
# to keep up with the execution schedule
print('\nNot enough workers example')
futures = []
for _ in range(3):
futures.append(stpool.schedule(slow_work2, period=0.20))
time.sleep(1)
list(map(lambda f: f.cancel(), futures))
wait(futures)
i = 0
def cancelled_work():
global i
i += 1
if i >= 3:
raise CancelledError('Stop working, now!')
print(time.time(), 'ran without cancellation')
with ScheduledThreadPoolExecutor() as stpool:
# cancel a periodic from inside a periodic
print('\nCancel from inside callable example')
future = stpool.schedule(cancelled_work, period=0.25)
try:
future.result()
except CancelledError:
print('work cancelled!')
1654566752.6947718 work!
Delayed future example
1654566752.6957695 work scheduled
1654566753.2106197 work!
Periodic future example
1654566753.700693 work!
1654566753.9519553 work!
1654566754.2048206 work!
1654566754.4566417 work!
1654566754.7094283 work!
Period too short for long-running future example
1654566754.963385 slow work...huuh
WARNING:scheduled_futures:Periodic scheduled future runtime exceeded period.
1654566755.4197686 slow work...huuh
WARNING:scheduled_futures:Periodic scheduled future runtime exceeded period.
1654566755.8865035 slow work...huuh
WARNING:scheduled_futures:Periodic scheduled future runtime exceeded period.
Rescheduling work example
1654566755.8884244 work!
1654566756.1499033 work!
1654566756.4140623 work!
Rescheduling
1654566756.4449573 work!
1654566757.454106 work!
1654566758.462207 work!
Not enough workers example
1654566758.6189985 slow work2...huuuuuuh
WARNING:scheduled_futures:Late to run scheduled future.
1654566758.7743778 slow work2...huuuuuuh
WARNING:scheduled_futures:Late to run scheduled future.
1654566758.932357 slow work2...huuuuuuh
WARNING:scheduled_futures:Late to run scheduled future.
1654566759.0854318 slow work2...huuuuuuh
WARNING:scheduled_futures:Late to run scheduled future.
1654566759.2414527 slow work2...huuuuuuh
WARNING:scheduled_futures:Late to run scheduled future.
1654566759.3976445 slow work2...huuuuuuh
WARNING:scheduled_futures:Late to run scheduled future.
1654566759.5528805 slow work2...huuuuuuh
Cancel from inside callable example
1654566759.554882 ran without cancellation
1654566759.8188088 ran without cancellation
work cancelled!
| 0.372049 | 0.694982 |
import time
from abc import ABCMeta, abstractmethod
DEFAULT_INTERVAL = 5.0
class Scheduled_poller(object):
"""
Base class for classes that polls a task regularly, with a constant minimum time interval between each poll.
Warning: Each polling interval is the maximum of a) polling_interval_secs and b) the time taken to do the task.
(so the polling interval might be longer than polling_interval_secs
ToDo: An alternative name might be Scheduled_task
"""
__metaclass__ = ABCMeta
def __init__(self):
"""
Construct a new Poller object (Poller is an abstract class)
"""
self.running = False
self.polling_interval_secs = DEFAULT_INTERVAL
@abstractmethod
def do_work(self):
"""
Perform the work to be done, during each poll (aka 'scheduled task')
:raises This procedure must be overridden or it will raise a NotImplemenetedError
"""
raise NotImplementedError("Must override method: do_work")
def start(self, polling_interval_secs=DEFAULT_INTERVAL):
"""
Start (or re-start) the poller. This will run the do_work procedure every self.polling_interval_secs seconds
If the do_work procedure takes longer than polling_interval_secs, the next poll will take place as
soon as the task has finished:
Each polling interval is the maximum of a) polling_interval_secs and b) the time taken to do the task.
:param polling_interval_secs: time interval (seconds) between scheduled runs.
:raises polling_interval_secs must be greater than 0 or a ValueError will be returned.
:type polling_interval_secs: float
"""
if polling_interval_secs <= 0.0:
raise ValueError("polling_interval_secs must be greater than 0")
else:
self.polling_interval_secs = polling_interval_secs
self.running = True
while self.running:
start = time.clock()
self.do_work()
work_duration = time.clock() - start
time.sleep(max(0, self.polling_interval_secs - work_duration))
def stop(self):
"""
Stop the poller. if it is running. If none is running, do nothing.
"""
self.running = False
|
scheduled-poller
|
/scheduled_poller-1.3-py3-none-any.whl/scheduled_poller/scheduled_poller.py
|
scheduled_poller.py
|
import time
from abc import ABCMeta, abstractmethod
DEFAULT_INTERVAL = 5.0
class Scheduled_poller(object):
"""
Base class for classes that polls a task regularly, with a constant minimum time interval between each poll.
Warning: Each polling interval is the maximum of a) polling_interval_secs and b) the time taken to do the task.
(so the polling interval might be longer than polling_interval_secs
ToDo: An alternative name might be Scheduled_task
"""
__metaclass__ = ABCMeta
def __init__(self):
"""
Construct a new Poller object (Poller is an abstract class)
"""
self.running = False
self.polling_interval_secs = DEFAULT_INTERVAL
@abstractmethod
def do_work(self):
"""
Perform the work to be done, during each poll (aka 'scheduled task')
:raises This procedure must be overridden or it will raise a NotImplemenetedError
"""
raise NotImplementedError("Must override method: do_work")
def start(self, polling_interval_secs=DEFAULT_INTERVAL):
"""
Start (or re-start) the poller. This will run the do_work procedure every self.polling_interval_secs seconds
If the do_work procedure takes longer than polling_interval_secs, the next poll will take place as
soon as the task has finished:
Each polling interval is the maximum of a) polling_interval_secs and b) the time taken to do the task.
:param polling_interval_secs: time interval (seconds) between scheduled runs.
:raises polling_interval_secs must be greater than 0 or a ValueError will be returned.
:type polling_interval_secs: float
"""
if polling_interval_secs <= 0.0:
raise ValueError("polling_interval_secs must be greater than 0")
else:
self.polling_interval_secs = polling_interval_secs
self.running = True
while self.running:
start = time.clock()
self.do_work()
work_duration = time.clock() - start
time.sleep(max(0, self.polling_interval_secs - work_duration))
def stop(self):
"""
Stop the poller. if it is running. If none is running, do nothing.
"""
self.running = False
| 0.708818 | 0.307839 |
import time
from abc import ABCMeta, abstractmethod
DEFAULT_INTERVAL = 5.0
class Poller(object):
"""
Base class for classes that polls a task regularly, with a constant minimum time interval between each poll.
Warning: Each polling interval is the maximum of a) polling_interval_secs and b) the time taken to do the task.
(so the polling interval might be longer than polling_interval_secs
ToDo: An alternative name might be ScheduledTask
"""
__metaclass__ = ABCMeta
def __init__(self):
"""
Construct a new Poller object (Poller is an abstract class)
"""
self.running = False
self.polling_interval = DEFAULT_INTERVAL
@abstractmethod
def do_work(self):
"""
Perform the work to be done, during each poll (aka 'scheduled task')
:raises This procedure must be overridden or it will raise a NotImplemenetedError
"""
raise NotImplementedError("Must override method: do_work")
def start(self, polling_interval_secs=DEFAULT_INTERVAL):
"""
Start (or re-start) the poller. This will run the do_work procedure every self.polling_interval_secs seconds
If the do_work procedure takes longer than polling_interval_secs, the next poll will take place as
soon as the task has finished:
Each polling interval is the maximum of a) polling_interval_secs and b) the time taken to do the task.
:param polling_interval_secs: time interval (seconds) between scheduled runs.
:raises polling_interval_secs must be greater than 0 or a ValueError will be returned.
:type polling_interval_secs: float
"""
if polling_interval_secs <= 0.0:
raise ValueError("polling_interval_secs must be greater than 0")
self.running = True
while self.running:
start = time.clock()
self.do_work()
work_duration = time.clock() - start
time.sleep(max(0, self.polling_interval_secs - work_duration))
def stop(self):
"""
Stop the poller. if it is running. If none is running, do nothing.
"""
self.running = False
|
scheduled-poller
|
/scheduled_poller-1.3-py3-none-any.whl/scheduled_poller/poller.py
|
poller.py
|
import time
from abc import ABCMeta, abstractmethod
DEFAULT_INTERVAL = 5.0
class Poller(object):
"""
Base class for classes that polls a task regularly, with a constant minimum time interval between each poll.
Warning: Each polling interval is the maximum of a) polling_interval_secs and b) the time taken to do the task.
(so the polling interval might be longer than polling_interval_secs
ToDo: An alternative name might be ScheduledTask
"""
__metaclass__ = ABCMeta
def __init__(self):
"""
Construct a new Poller object (Poller is an abstract class)
"""
self.running = False
self.polling_interval = DEFAULT_INTERVAL
@abstractmethod
def do_work(self):
"""
Perform the work to be done, during each poll (aka 'scheduled task')
:raises This procedure must be overridden or it will raise a NotImplemenetedError
"""
raise NotImplementedError("Must override method: do_work")
def start(self, polling_interval_secs=DEFAULT_INTERVAL):
"""
Start (or re-start) the poller. This will run the do_work procedure every self.polling_interval_secs seconds
If the do_work procedure takes longer than polling_interval_secs, the next poll will take place as
soon as the task has finished:
Each polling interval is the maximum of a) polling_interval_secs and b) the time taken to do the task.
:param polling_interval_secs: time interval (seconds) between scheduled runs.
:raises polling_interval_secs must be greater than 0 or a ValueError will be returned.
:type polling_interval_secs: float
"""
if polling_interval_secs <= 0.0:
raise ValueError("polling_interval_secs must be greater than 0")
self.running = True
while self.running:
start = time.clock()
self.do_work()
work_duration = time.clock() - start
time.sleep(max(0, self.polling_interval_secs - work_duration))
def stop(self):
"""
Stop the poller. if it is running. If none is running, do nothing.
"""
self.running = False
| 0.699357 | 0.3295 |
import time
from abc import ABCMeta, abstractmethod
DEFAULT_INTERVAL = 5.0
class Poller(object):
"""
Base class for classes that polls a task regularly, with a constant minimum time interval between each poll.
Warning: Each polling interval is the maximum of a) polling_interval_secs and b) the time taken to do the task.
(so the polling interval might be longer than polling_interval_secs
ToDo: An alternative name might be ScheduledTask
"""
__metaclass__ = ABCMeta
def __init__(self):
"""
Construct a new Poller object (Poller is an abstract class)
"""
self.running = False
self.polling_interval = DEFAULT_INTERVAL
@abstractmethod
def do_work(self):
"""
Perform the work to be done, during each poll (aka 'scheduled task')
:raises This procedure must be overridden or it will raise a NotImplemenetedError
"""
raise NotImplementedError("Must override method: do_work")
def start(self, polling_interval_secs=DEFAULT_INTERVAL):
"""
Start (or re-start) the poller. This will run the do_work procedure every self.polling_interval_secs seconds
If the do_work procedure takes longer than polling_interval_secs, the next poll will take place as
soon as the task has finished:
Each polling interval is the maximum of a) polling_interval_secs and b) the time taken to do the task.
:param polling_interval_secs: time interval (seconds) between scheduled runs.
:raises polling_interval_secs must be greater than 0 or a ValueError will be returned.
:type polling_interval_secs: float
"""
if polling_interval_secs <= 0.0:
raise ValueError("polling_interval_secs must be greater than 0")
self.running = True
while self.running:
start = time.clock()
self.do_work()
work_duration = time.clock() - start
time.sleep(max(0, self.polling_interval_secs - work_duration))
def stop(self):
"""
Stop the poller. if it is running. If none is running, do nothing.
"""
self.running = False
|
scheduled-poller
|
/scheduled_poller-1.3-py3-none-any.whl/poller/poller.py
|
poller.py
|
import time
from abc import ABCMeta, abstractmethod
DEFAULT_INTERVAL = 5.0
class Poller(object):
"""
Base class for classes that polls a task regularly, with a constant minimum time interval between each poll.
Warning: Each polling interval is the maximum of a) polling_interval_secs and b) the time taken to do the task.
(so the polling interval might be longer than polling_interval_secs
ToDo: An alternative name might be ScheduledTask
"""
__metaclass__ = ABCMeta
def __init__(self):
"""
Construct a new Poller object (Poller is an abstract class)
"""
self.running = False
self.polling_interval = DEFAULT_INTERVAL
@abstractmethod
def do_work(self):
"""
Perform the work to be done, during each poll (aka 'scheduled task')
:raises This procedure must be overridden or it will raise a NotImplemenetedError
"""
raise NotImplementedError("Must override method: do_work")
def start(self, polling_interval_secs=DEFAULT_INTERVAL):
"""
Start (or re-start) the poller. This will run the do_work procedure every self.polling_interval_secs seconds
If the do_work procedure takes longer than polling_interval_secs, the next poll will take place as
soon as the task has finished:
Each polling interval is the maximum of a) polling_interval_secs and b) the time taken to do the task.
:param polling_interval_secs: time interval (seconds) between scheduled runs.
:raises polling_interval_secs must be greater than 0 or a ValueError will be returned.
:type polling_interval_secs: float
"""
if polling_interval_secs <= 0.0:
raise ValueError("polling_interval_secs must be greater than 0")
self.running = True
while self.running:
start = time.clock()
self.do_work()
work_duration = time.clock() - start
time.sleep(max(0, self.polling_interval_secs - work_duration))
def stop(self):
"""
Stop the poller. if it is running. If none is running, do nothing.
"""
self.running = False
| 0.699357 | 0.3295 |
Released under MIT license
# Description
This package provides functionality to work with scheduled tasks (cron-like) in Python.
The main intention is to let you use planned scheduled tasks in lazy environments,
such as web server, by providing functions to check previous and next execution time of the task (**get_previous_time** and **get_next_time**).
This package doesn't parse cron string and is not fully compatible with cron.
It currently doesn't support last day of month and last weekday functionality,
however it supports providing the day of week number (#) or providing a week number.
Rules can be provided in a form of list of integers, range object (start, stop, and step will be used), or None.
# Installation
```
pip install scheduled_task
```
# Quick start
#### Task that executes twice a day: at 00:00 and 00:30, get next execution time
```python
from shceduled_task import ScheduledTask
task = ScheduledTask(minutes=[0, 30], hours=[0], days=None, months=None, years=None)
print(task.get_next_time())
```
#### Task that executes every 1st day of Month
```python
from shceduled_task import ScheduledTask
from datetime import datetime
task = ScheduledTask(minutes=[0], hours=[0], days=[1], months=None, years=None)
print(task.get_previous_time(current_datetime=datetime(2016, 11, 19))
# Prints datetime(2016, 12, 1, 0, 0))
```
#### More complex example:
Get next and previous USA presidential election day by getting the next day after first monday of November every 4rth year
```python
from scheduled_task import ScheduledTask
task = ScheduledTask(minutes=[0], hours=[0], days_of_week=[0], days_of_week_num=[0], months=[11],
years=range(1848, 9999, 4))
print(task.get_next_time() + timedelta(days=1))
print(task.get_previous_time() + timedelta(days=1))
```
# Rules
#### Rule types
When creating a ScheduledTask object, you can provide rules of when this task must be executed.
Every rule can be of 3 types:
- **list**: List of values. List can contain 1 value.
- **range**: Range of values, might contain valid step. For example, day=range(2, 31, 2) means "every even day of month".
- **None**: None means every valid value (* in cron).
#### Rule fields
| Field | Value | Strategies | Description |
|------------------|--------|---------------------------------|----------------------------------------------------------------------------------------|
| minutes | 0-59 | * | Minutes |
| hours | 0-23 | * | Hours |
| days | 1-31 | days_of_month | Days |
| days_of_week | 0-6 | days_of_week, days_of_week_num | Days of week - Monday to Sunday |
| days_of_week_num | 0-4 | days_of_week_num | Number of day of week. For example, 0 and Friday means every 1st Friday of a month |
| weeks | 0-5 | days_of_week | Week number. 0 and Friday means every Friday that happens in the first week of a month |
| months | 1-12 | * | Months |
| years | 0-9999 | * | Years |
#### Strategies
When creating a ScheduledTask, not all fields are compatible with each other.
Generally, there are 3 strategies that will be used:
- **days_of_month** - default strategy. Used if **days** rule is provided and non of week-related rules are provided.
- **days_of_week** - this strategy is chosen when **days_of_week** and/or **weeks** rules are provided. If that strategy is chosen, **days** or **days_of_week_num** rules are ignored.
- **days_of_week_num** - this strategy is chosen when **days_of_week** and **days_of_week_num** rules are provided. This is used to set up rules like "2nd Monday of July".
# Providing current time
When calling **get_previous_time** or **get_next_time**, you can provide **current_datetime** to check against.
If no current datetime is provided, datetime.utcnow() will be used.
**current_datetime** doesn't have to be in UTC-format. This library is timezone-agnostic and will return result using the same timezone as current_datetime.
# Contributing
If you find a bug in the library, please feel free to contribute by opening an issue or creating a pull request.
|
scheduled-task
|
/scheduled_task-1.0.1.tar.gz/scheduled_task-1.0.1/README.md
|
README.md
|
pip install scheduled_task
from shceduled_task import ScheduledTask
task = ScheduledTask(minutes=[0, 30], hours=[0], days=None, months=None, years=None)
print(task.get_next_time())
from shceduled_task import ScheduledTask
from datetime import datetime
task = ScheduledTask(minutes=[0], hours=[0], days=[1], months=None, years=None)
print(task.get_previous_time(current_datetime=datetime(2016, 11, 19))
# Prints datetime(2016, 12, 1, 0, 0))
from scheduled_task import ScheduledTask
task = ScheduledTask(minutes=[0], hours=[0], days_of_week=[0], days_of_week_num=[0], months=[11],
years=range(1848, 9999, 4))
print(task.get_next_time() + timedelta(days=1))
print(task.get_previous_time() + timedelta(days=1))
| 0.484868 | 0.931898 |
from datetime import datetime
from enum import Enum
from copy import copy
from .utils import get_biggest_value_less_or_equal_to, get_smallest_value_greater_or_equal_to, last, first, \
weekday_num, weekday_and_num_to_day, num_days_in_month, weekday_and_week_to_day, week_num, max_week_num
class DateTimeHolder:
__slots__ = ['minute', 'hour', 'day', 'day_of_week', 'day_of_week_num', 'month', 'week', 'year']
def __init__(self, minute=None, hour=None, day=None, day_of_week=None, day_of_week_num=None, week=None,
month=None, year=None):
self.minute = minute
self.hour = hour
self.day = day
self.day_of_week = day_of_week
self.day_of_week_num = day_of_week_num
self.week = week
self.month = month
self.year = year
@property
def datetime(self):
if self.day_of_week is not None and self.day_of_week_num is not None:
day = weekday_and_num_to_day(self.year, self.month, self.day_of_week_num, self.day_of_week)
return datetime(self.year, self.month, day, self.hour or 0, self.minute or 0)
elif self.day_of_week is not None and self.week is not None:
day = weekday_and_week_to_day(self.year, self.month, self.week, self.day_of_week)
return datetime(self.year, self.month, day, self.hour or 0, self.minute or 0)
else:
return datetime(self.year, self.month or 1, self.day or 1, self.hour or 0, self.minute or 0)
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
return setattr(self, key, value)
def __copy__(self):
return DateTimeHolder(minute=self.minute, hour=self.hour, day=self.day, day_of_week=self.day_of_week,
day_of_week_num=self.day_of_week_num, week=self.week, month=self.month, year=self.year)
def __lt__(self, other):
return self.datetime < other.datetime
def __gt__(self, other):
return self.datetime > other.datetime
def __eq__(self, other):
return self.datetime == other.datetime
def __le__(self, other):
return self.datetime <= other.datetime
def __ge__(self, other):
return self.datetime >= other.datetime
class TaskStrategy(Enum):
days_of_month = 0 # 1-31
days_of_week = 1 # Sun-Sat + week number
days_of_week_num = 2 # Sun-Sat + weekday number
class DayStrategyFraction(Enum):
minute = 0
hour = 1
day = 2
month = 3
year = 4
class DayOfWeekStrategyFraction(Enum):
minute = 0
hour = 1
day_of_week = 2
week = 3
month = 4
year = 5
class DayOfWeekNumStrategyFraction(Enum):
minute = 0
hour = 1
day_of_week = 2
day_of_week_num = 3
month = 4
year = 5
class ScheduledTask:
def __init__(self, minutes=None, hours=None, days=None, days_of_week=None, days_of_week_num=None, weeks=None,
months=None, years=None, max_iterations=100):
if days_of_week is not None and days_of_week_num is not None:
self.strategy = TaskStrategy.days_of_week_num
self.fractions = DayOfWeekNumStrategyFraction
self.candidates = [minutes or range(0, 60), hours or range(0, 24), days_of_week or range(0, 7),
days_of_week_num or range(0, 5), months or range(1, 13), years or range(0, 9999)]
elif days_of_week is not None or weeks is not None:
self.strategy = TaskStrategy.days_of_week
self.fractions = DayOfWeekStrategyFraction
self.candidates = [minutes or range(0, 60), hours or range(0, 24), days_of_week or range(0, 7),
weeks or range(0, 6), months or range(1, 13), years or range(0, 9999)]
else:
self.strategy = TaskStrategy.days_of_month
self.fractions = DayStrategyFraction
self.candidates = [minutes or range(0, 60), hours or range(0, 24), days or range(1, 32),
months or range(1, 13), years or range(0, 9999)]
self.highest_fraction = last([f for f in self.fractions])
# Settings
self.max_iterations = max_iterations
def _datetimeholder_valid(self, datetimeholder: DateTimeHolder, fraction: Enum):
"""Check if date time holder is valid for current fraction
i.e. if fraction is days, check if current day exists in the month
"""
# Check min value
if self.strategy == TaskStrategy.days_of_month:
min_value = 1 if fraction in [self.fractions.day, self.fractions.month, self.fractions.year] else 0
else:
min_value = 1 if fraction in [self.fractions.month, self.fractions.year] else 0
if datetimeholder[fraction.name] < min_value:
return False
# Check if day exceeds number of days in that month
if self.strategy == TaskStrategy.days_of_month and fraction == self.fractions.day:
n_days_in_month = num_days_in_month(datetimeholder.year, datetimeholder.month)
if datetimeholder.day > n_days_in_month:
return False
# Check if day of week number exceeds number of day of weeks for this month
if self.strategy == TaskStrategy.days_of_week_num and fraction == self.fractions.day_of_week_num:
# Since we don't know what day of week we are validating,
# assume that this number can't be more than max week number
if datetimeholder.day_of_week_num > max_week_num(datetimeholder.year, datetimeholder.month):
return False
# Check if day of week and day of week number exceeds maximum day of week number for this month
if self.strategy == TaskStrategy.days_of_week_num and fraction == self.fractions.day_of_week:
day = weekday_and_num_to_day(datetimeholder.year, datetimeholder.month, datetimeholder.day_of_week_num,
datetimeholder.day_of_week)
n_days_in_month = num_days_in_month(datetimeholder.year, datetimeholder.month)
if day > n_days_in_month:
return False
# Check if month has n weeks
if self.strategy == TaskStrategy.days_of_week and fraction == self.fractions.week:
if datetimeholder.week > max_week_num(datetimeholder.year, datetimeholder.month):
return False
# Check if weekday and week number combination
if self.strategy == TaskStrategy.days_of_week and fraction == self.fractions.day_of_week:
day = weekday_and_week_to_day(datetimeholder.year, datetimeholder.month, datetimeholder.week,
datetimeholder.day_of_week)
n_days_in_month = num_days_in_month(datetimeholder.year, datetimeholder.month)
if day is None:
return False
if day > n_days_in_month:
return False
# All checks are passed
return True
def _datetimeholders_equal(self, a: DateTimeHolder, b: DateTimeHolder, from_fraction: Enum):
"""Partially check a and b date time holders for equality, starting with fraction.
For example, if the fraction is DAY, compare only DAY, MONTH and YEAR
"""
return all([a[self.fractions(fv).name] == b[self.fractions(fv).name] for fv
in range(from_fraction.value, self.highest_fraction.value+1)])
def _datetimeholders_compare(self, a: DateTimeHolder, b: DateTimeHolder, from_fraction: Enum):
"""Partially compare a and b date time holders, starting with fraction.
For example, if the fraction is DAY, compare only DAY, MONTH and YEAR
"""
_a = DateTimeHolder()
_b = DateTimeHolder()
for fraction_value in range(from_fraction.value, self.highest_fraction.value+1):
fraction = self.fractions(fraction_value)
_a[fraction.name] = a[fraction.name]
_b[fraction.name] = b[fraction.name]
if _a > _b:
return 1
elif _a == _b:
return 0
else:
return -1
def _increase_fraction(self, result: DateTimeHolder, fraction: Enum, increment: int, current: DateTimeHolder):
"""Increase fraction on the datetimeholder
:param result:Value to increase
:param fraction:Fraction to increase
:param current:Original value - used to reset if we can't increase
:return:Number of fractions increased (to know from which to recalculate)
"""
# If candidates are range, perform step-aware increment
if type(self.candidates[fraction.value]) == list:
new_value = result[fraction.name] + increment
elif type(self.candidates[fraction.value]) == range:
new_value = result[fraction.name] + increment * self.candidates[fraction.value].step
else:
raise ValueError("candidate must be of type list or range")
datetimeholder_increased = copy(result)
datetimeholder_increased[fraction.name] = new_value
if increment > 0: # 1
new_value = get_smallest_value_greater_or_equal_to(self.candidates[fraction.value],
datetimeholder_increased[fraction.name])
in_range = new_value is not None
else: # -1
new_value = get_biggest_value_less_or_equal_to(self.candidates[fraction.value],
datetimeholder_increased[fraction.name])
in_range = new_value is not None
if self._datetimeholder_valid(datetimeholder_increased, fraction) and in_range:
result[fraction.name] = new_value
return 1
else:
if fraction == self.highest_fraction:
raise ValueError("Can't increase fraction - current " + self.highest_fraction +
" is " + result[fraction.value])
result[fraction.name] = current[fraction.name]
return 1 + self._increase_fraction(result, self.fractions(fraction.value + 1), increment, current)
def get_next_time(self, current_datetime: datetime = None):
"""Returns next task execution time nearest to the given datetime
"""
if current_datetime is None:
current_datetime = datetime.utcnow()
if self.strategy == TaskStrategy.days_of_month:
current = DateTimeHolder(minute=current_datetime.minute, hour=current_datetime.hour,
day=current_datetime.day, month=current_datetime.month, year=current_datetime.year)
elif self.strategy == TaskStrategy.days_of_week:
current = DateTimeHolder(minute=current_datetime.minute, hour=current_datetime.hour,
day_of_week=current_datetime.weekday(),
week=week_num(current_datetime),
month=current_datetime.month, year=current_datetime.year)
else:
current = DateTimeHolder(minute=current_datetime.minute, hour=current_datetime.hour,
day_of_week=current_datetime.weekday(),
day_of_week_num=weekday_num(current_datetime),
month=current_datetime.month, year=current_datetime.year)
result = self._get_next_time(current)
return result.datetime
def get_previous_time(self, current_datetime: datetime = None):
"""Returns previous task execution time nearest to the given datetime
"""
if current_datetime is None:
current_datetime = datetime.utcnow()
if self.strategy == TaskStrategy.days_of_month:
current = DateTimeHolder(minute=current_datetime.minute, hour=current_datetime.hour,
day=current_datetime.day, month=current_datetime.month, year=current_datetime.year)
elif self.strategy == TaskStrategy.days_of_week:
current = DateTimeHolder(minute=current_datetime.minute, hour=current_datetime.hour,
day_of_week=current_datetime.weekday(),
week=week_num(current_datetime),
month=current_datetime.month, year=current_datetime.year)
else:
current = DateTimeHolder(minute=current_datetime.minute, hour=current_datetime.hour,
day_of_week=current_datetime.weekday(),
day_of_week_num=weekday_num(current_datetime),
month=current_datetime.month, year=current_datetime.year)
result = self._get_previous_time(current)
return result.datetime
def _get_next_time(self, current: DateTimeHolder):
"""Calculates next task time using current
"""
result = DateTimeHolder()
fraction_value = self.highest_fraction.value
i = 0
while fraction_value != -1: # From year to minute
i += 1
if i > self.max_iterations: # Max iteration check
raise ValueError("maximum number of iterations exceeded. You found a bug with scheduledtask. Dump: " +
"candidates: {}, ".format(self.candidates) +
"current: {}, max_iterations: {}".format(current, self.max_iterations))
fraction = self.fractions(fraction_value)
if fraction is self.highest_fraction \
or self._datetimeholders_equal(result, current, self.fractions(fraction_value+1)):
result[fraction.name] = get_smallest_value_greater_or_equal_to(self.candidates[fraction_value],
current[fraction.name])
else:
result[fraction.name] = first(self.candidates[fraction_value])
if result[fraction.name] is None \
or not self._datetimeholder_valid(result, fraction) \
or not self._datetimeholders_compare(result, current, fraction) > -1: # In case with day_of_week_num
if fraction == self.highest_fraction:
return None # Can't find highest fraction match, event never happened in the past
# Decrease higher fractions on result datetime, recalculate starting from that fraction-1
fraction_value += self._increase_fraction(result, self.fractions(fraction_value + 1), +1, current) - 1
continue
fraction_value -= 1
return result
def _get_previous_time(self, current: DateTimeHolder):
"""Calculates previous task time using current
"""
result = DateTimeHolder()
fraction_value = self.highest_fraction.value
i = 0
while fraction_value != -1: # From year to minute
i += 1
if i > self.max_iterations: # Max iteration check
raise ValueError("maximum number of iterations exceeded. You found a bug with scheduledtask. Dump: " +
"candidates: {}, ".format(self.candidates) +
"current: {}, max_iterations: {}".format(current, self.max_iterations))
fraction = self.fractions(fraction_value)
if fraction is self.highest_fraction \
or self._datetimeholders_equal(result, current, self.fractions(fraction_value + 1)):
result[fraction.name] = get_biggest_value_less_or_equal_to(self.candidates[fraction_value],
current[fraction.name])
else:
result[fraction.name] = last(self.candidates[fraction_value])
if result[fraction.name] is None \
or not self._datetimeholder_valid(result, fraction) \
or not self._datetimeholders_compare(result, current, fraction) < 1: # In case with day_of_week_num
if fraction == self.highest_fraction:
return None # Can't find highest fraction match, event never happened in the past
# Decrease higher fractions on result datetime, recalculate starting from that fraction-1
fraction_value += self._increase_fraction(result, self.fractions(fraction_value + 1), -1, current) - 1
continue
fraction_value -= 1
return result
|
scheduled-task
|
/scheduled_task-1.0.1.tar.gz/scheduled_task-1.0.1/scheduled_task/timeconverter.py
|
timeconverter.py
|
from datetime import datetime
from enum import Enum
from copy import copy
from .utils import get_biggest_value_less_or_equal_to, get_smallest_value_greater_or_equal_to, last, first, \
weekday_num, weekday_and_num_to_day, num_days_in_month, weekday_and_week_to_day, week_num, max_week_num
class DateTimeHolder:
__slots__ = ['minute', 'hour', 'day', 'day_of_week', 'day_of_week_num', 'month', 'week', 'year']
def __init__(self, minute=None, hour=None, day=None, day_of_week=None, day_of_week_num=None, week=None,
month=None, year=None):
self.minute = minute
self.hour = hour
self.day = day
self.day_of_week = day_of_week
self.day_of_week_num = day_of_week_num
self.week = week
self.month = month
self.year = year
@property
def datetime(self):
if self.day_of_week is not None and self.day_of_week_num is not None:
day = weekday_and_num_to_day(self.year, self.month, self.day_of_week_num, self.day_of_week)
return datetime(self.year, self.month, day, self.hour or 0, self.minute or 0)
elif self.day_of_week is not None and self.week is not None:
day = weekday_and_week_to_day(self.year, self.month, self.week, self.day_of_week)
return datetime(self.year, self.month, day, self.hour or 0, self.minute or 0)
else:
return datetime(self.year, self.month or 1, self.day or 1, self.hour or 0, self.minute or 0)
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
return setattr(self, key, value)
def __copy__(self):
return DateTimeHolder(minute=self.minute, hour=self.hour, day=self.day, day_of_week=self.day_of_week,
day_of_week_num=self.day_of_week_num, week=self.week, month=self.month, year=self.year)
def __lt__(self, other):
return self.datetime < other.datetime
def __gt__(self, other):
return self.datetime > other.datetime
def __eq__(self, other):
return self.datetime == other.datetime
def __le__(self, other):
return self.datetime <= other.datetime
def __ge__(self, other):
return self.datetime >= other.datetime
class TaskStrategy(Enum):
days_of_month = 0 # 1-31
days_of_week = 1 # Sun-Sat + week number
days_of_week_num = 2 # Sun-Sat + weekday number
class DayStrategyFraction(Enum):
minute = 0
hour = 1
day = 2
month = 3
year = 4
class DayOfWeekStrategyFraction(Enum):
minute = 0
hour = 1
day_of_week = 2
week = 3
month = 4
year = 5
class DayOfWeekNumStrategyFraction(Enum):
minute = 0
hour = 1
day_of_week = 2
day_of_week_num = 3
month = 4
year = 5
class ScheduledTask:
def __init__(self, minutes=None, hours=None, days=None, days_of_week=None, days_of_week_num=None, weeks=None,
months=None, years=None, max_iterations=100):
if days_of_week is not None and days_of_week_num is not None:
self.strategy = TaskStrategy.days_of_week_num
self.fractions = DayOfWeekNumStrategyFraction
self.candidates = [minutes or range(0, 60), hours or range(0, 24), days_of_week or range(0, 7),
days_of_week_num or range(0, 5), months or range(1, 13), years or range(0, 9999)]
elif days_of_week is not None or weeks is not None:
self.strategy = TaskStrategy.days_of_week
self.fractions = DayOfWeekStrategyFraction
self.candidates = [minutes or range(0, 60), hours or range(0, 24), days_of_week or range(0, 7),
weeks or range(0, 6), months or range(1, 13), years or range(0, 9999)]
else:
self.strategy = TaskStrategy.days_of_month
self.fractions = DayStrategyFraction
self.candidates = [minutes or range(0, 60), hours or range(0, 24), days or range(1, 32),
months or range(1, 13), years or range(0, 9999)]
self.highest_fraction = last([f for f in self.fractions])
# Settings
self.max_iterations = max_iterations
def _datetimeholder_valid(self, datetimeholder: DateTimeHolder, fraction: Enum):
"""Check if date time holder is valid for current fraction
i.e. if fraction is days, check if current day exists in the month
"""
# Check min value
if self.strategy == TaskStrategy.days_of_month:
min_value = 1 if fraction in [self.fractions.day, self.fractions.month, self.fractions.year] else 0
else:
min_value = 1 if fraction in [self.fractions.month, self.fractions.year] else 0
if datetimeholder[fraction.name] < min_value:
return False
# Check if day exceeds number of days in that month
if self.strategy == TaskStrategy.days_of_month and fraction == self.fractions.day:
n_days_in_month = num_days_in_month(datetimeholder.year, datetimeholder.month)
if datetimeholder.day > n_days_in_month:
return False
# Check if day of week number exceeds number of day of weeks for this month
if self.strategy == TaskStrategy.days_of_week_num and fraction == self.fractions.day_of_week_num:
# Since we don't know what day of week we are validating,
# assume that this number can't be more than max week number
if datetimeholder.day_of_week_num > max_week_num(datetimeholder.year, datetimeholder.month):
return False
# Check if day of week and day of week number exceeds maximum day of week number for this month
if self.strategy == TaskStrategy.days_of_week_num and fraction == self.fractions.day_of_week:
day = weekday_and_num_to_day(datetimeholder.year, datetimeholder.month, datetimeholder.day_of_week_num,
datetimeholder.day_of_week)
n_days_in_month = num_days_in_month(datetimeholder.year, datetimeholder.month)
if day > n_days_in_month:
return False
# Check if month has n weeks
if self.strategy == TaskStrategy.days_of_week and fraction == self.fractions.week:
if datetimeholder.week > max_week_num(datetimeholder.year, datetimeholder.month):
return False
# Check if weekday and week number combination
if self.strategy == TaskStrategy.days_of_week and fraction == self.fractions.day_of_week:
day = weekday_and_week_to_day(datetimeholder.year, datetimeholder.month, datetimeholder.week,
datetimeholder.day_of_week)
n_days_in_month = num_days_in_month(datetimeholder.year, datetimeholder.month)
if day is None:
return False
if day > n_days_in_month:
return False
# All checks are passed
return True
def _datetimeholders_equal(self, a: DateTimeHolder, b: DateTimeHolder, from_fraction: Enum):
"""Partially check a and b date time holders for equality, starting with fraction.
For example, if the fraction is DAY, compare only DAY, MONTH and YEAR
"""
return all([a[self.fractions(fv).name] == b[self.fractions(fv).name] for fv
in range(from_fraction.value, self.highest_fraction.value+1)])
def _datetimeholders_compare(self, a: DateTimeHolder, b: DateTimeHolder, from_fraction: Enum):
"""Partially compare a and b date time holders, starting with fraction.
For example, if the fraction is DAY, compare only DAY, MONTH and YEAR
"""
_a = DateTimeHolder()
_b = DateTimeHolder()
for fraction_value in range(from_fraction.value, self.highest_fraction.value+1):
fraction = self.fractions(fraction_value)
_a[fraction.name] = a[fraction.name]
_b[fraction.name] = b[fraction.name]
if _a > _b:
return 1
elif _a == _b:
return 0
else:
return -1
def _increase_fraction(self, result: DateTimeHolder, fraction: Enum, increment: int, current: DateTimeHolder):
"""Increase fraction on the datetimeholder
:param result:Value to increase
:param fraction:Fraction to increase
:param current:Original value - used to reset if we can't increase
:return:Number of fractions increased (to know from which to recalculate)
"""
# If candidates are range, perform step-aware increment
if type(self.candidates[fraction.value]) == list:
new_value = result[fraction.name] + increment
elif type(self.candidates[fraction.value]) == range:
new_value = result[fraction.name] + increment * self.candidates[fraction.value].step
else:
raise ValueError("candidate must be of type list or range")
datetimeholder_increased = copy(result)
datetimeholder_increased[fraction.name] = new_value
if increment > 0: # 1
new_value = get_smallest_value_greater_or_equal_to(self.candidates[fraction.value],
datetimeholder_increased[fraction.name])
in_range = new_value is not None
else: # -1
new_value = get_biggest_value_less_or_equal_to(self.candidates[fraction.value],
datetimeholder_increased[fraction.name])
in_range = new_value is not None
if self._datetimeholder_valid(datetimeholder_increased, fraction) and in_range:
result[fraction.name] = new_value
return 1
else:
if fraction == self.highest_fraction:
raise ValueError("Can't increase fraction - current " + self.highest_fraction +
" is " + result[fraction.value])
result[fraction.name] = current[fraction.name]
return 1 + self._increase_fraction(result, self.fractions(fraction.value + 1), increment, current)
def get_next_time(self, current_datetime: datetime = None):
"""Returns next task execution time nearest to the given datetime
"""
if current_datetime is None:
current_datetime = datetime.utcnow()
if self.strategy == TaskStrategy.days_of_month:
current = DateTimeHolder(minute=current_datetime.minute, hour=current_datetime.hour,
day=current_datetime.day, month=current_datetime.month, year=current_datetime.year)
elif self.strategy == TaskStrategy.days_of_week:
current = DateTimeHolder(minute=current_datetime.minute, hour=current_datetime.hour,
day_of_week=current_datetime.weekday(),
week=week_num(current_datetime),
month=current_datetime.month, year=current_datetime.year)
else:
current = DateTimeHolder(minute=current_datetime.minute, hour=current_datetime.hour,
day_of_week=current_datetime.weekday(),
day_of_week_num=weekday_num(current_datetime),
month=current_datetime.month, year=current_datetime.year)
result = self._get_next_time(current)
return result.datetime
def get_previous_time(self, current_datetime: datetime = None):
"""Returns previous task execution time nearest to the given datetime
"""
if current_datetime is None:
current_datetime = datetime.utcnow()
if self.strategy == TaskStrategy.days_of_month:
current = DateTimeHolder(minute=current_datetime.minute, hour=current_datetime.hour,
day=current_datetime.day, month=current_datetime.month, year=current_datetime.year)
elif self.strategy == TaskStrategy.days_of_week:
current = DateTimeHolder(minute=current_datetime.minute, hour=current_datetime.hour,
day_of_week=current_datetime.weekday(),
week=week_num(current_datetime),
month=current_datetime.month, year=current_datetime.year)
else:
current = DateTimeHolder(minute=current_datetime.minute, hour=current_datetime.hour,
day_of_week=current_datetime.weekday(),
day_of_week_num=weekday_num(current_datetime),
month=current_datetime.month, year=current_datetime.year)
result = self._get_previous_time(current)
return result.datetime
def _get_next_time(self, current: DateTimeHolder):
"""Calculates next task time using current
"""
result = DateTimeHolder()
fraction_value = self.highest_fraction.value
i = 0
while fraction_value != -1: # From year to minute
i += 1
if i > self.max_iterations: # Max iteration check
raise ValueError("maximum number of iterations exceeded. You found a bug with scheduledtask. Dump: " +
"candidates: {}, ".format(self.candidates) +
"current: {}, max_iterations: {}".format(current, self.max_iterations))
fraction = self.fractions(fraction_value)
if fraction is self.highest_fraction \
or self._datetimeholders_equal(result, current, self.fractions(fraction_value+1)):
result[fraction.name] = get_smallest_value_greater_or_equal_to(self.candidates[fraction_value],
current[fraction.name])
else:
result[fraction.name] = first(self.candidates[fraction_value])
if result[fraction.name] is None \
or not self._datetimeholder_valid(result, fraction) \
or not self._datetimeholders_compare(result, current, fraction) > -1: # In case with day_of_week_num
if fraction == self.highest_fraction:
return None # Can't find highest fraction match, event never happened in the past
# Decrease higher fractions on result datetime, recalculate starting from that fraction-1
fraction_value += self._increase_fraction(result, self.fractions(fraction_value + 1), +1, current) - 1
continue
fraction_value -= 1
return result
def _get_previous_time(self, current: DateTimeHolder):
"""Calculates previous task time using current
"""
result = DateTimeHolder()
fraction_value = self.highest_fraction.value
i = 0
while fraction_value != -1: # From year to minute
i += 1
if i > self.max_iterations: # Max iteration check
raise ValueError("maximum number of iterations exceeded. You found a bug with scheduledtask. Dump: " +
"candidates: {}, ".format(self.candidates) +
"current: {}, max_iterations: {}".format(current, self.max_iterations))
fraction = self.fractions(fraction_value)
if fraction is self.highest_fraction \
or self._datetimeholders_equal(result, current, self.fractions(fraction_value + 1)):
result[fraction.name] = get_biggest_value_less_or_equal_to(self.candidates[fraction_value],
current[fraction.name])
else:
result[fraction.name] = last(self.candidates[fraction_value])
if result[fraction.name] is None \
or not self._datetimeholder_valid(result, fraction) \
or not self._datetimeholders_compare(result, current, fraction) < 1: # In case with day_of_week_num
if fraction == self.highest_fraction:
return None # Can't find highest fraction match, event never happened in the past
# Decrease higher fractions on result datetime, recalculate starting from that fraction-1
fraction_value += self._increase_fraction(result, self.fractions(fraction_value + 1), -1, current) - 1
continue
fraction_value -= 1
return result
| 0.850453 | 0.391988 |
from datetime import datetime
from calendar import monthrange
def get_biggest_value_less_or_equal_to(iter: list or range, value):
"""Returns the biggest element from the list that is less or equal to the value. Return None if not found
"""
if type(iter) == list:
i = [x for x in iter if x <= value]
return max(i) if i else None
elif type(iter) == range:
if value in range(iter.start, iter.stop): # Value lies within this range, return step-aware value
return value - ((value - iter.start) % iter.step)
elif value > iter.stop-1: # value is greater than range, return last element of range
return iter.stop-1
else: # value is less than range, return None
return None
else:
raise ValueError("iter must be of type list or range")
def get_smallest_value_greater_or_equal_to(iter: list or range, value):
"""Returns the smallest element from the list that is greater or equal to the value. Return None if not found
"""
if type(iter) == list:
i = [x for x in iter if x >= value]
return min(i) if i else None
elif type(iter) == range:
if value in range(iter.start, iter.stop): # Value lies within this range, return step-aware value
return value + (iter.step - ((value - iter.start) % iter.step)) % iter.step
elif value < iter.start: # Value is less than range start, return start
return iter.start
else: # Value is greater than range, return None
return None
else:
raise ValueError("iter must be of type list or range")
def last(iter: list or range):
"""Returns the last element from the list or range
"""
if type(iter) == list:
return iter[len(iter)-1]
elif type(iter) == range:
return iter.stop - (iter.stop - 1 - iter.start) % iter.step - 1 # Step-aware last element
else:
raise ValueError("iter must be of type list or range")
def first(iter: list or range):
"""Returns first element from the list or range
"""
if type(iter) == list:
return iter[0]
elif type(iter) == range:
return iter.start
else:
raise ValueError("iter must be of type list or range")
def num_days_in_month(year: int, month: int):
return monthrange(year, month)[1]
def weekday_num(dt: datetime):
"""Returns number of weekday in the current month. I.e. if Tuesday is first in this month, returns 0
"""
return int((dt.day - 1)/7)
def weekday_and_num_to_day(year: int, month: int, weekday_number: int, weekday: int):
"""Converts current year, month, weekday and weekday number into the day of month
"""
dt_first = datetime(year, month, 1)
dt_first_weekday = dt_first.weekday()
return 1 - dt_first_weekday + weekday + ((0 if weekday >= dt_first_weekday else 1) + weekday_number) * 7
def weekday_and_week_to_day(year: int, month: int, week: int, weekday: int):
"""Converts current year, month, weekday and week number into the day of month
"""
dt_first = datetime(year, month, 1)
dt_first_weekday = dt_first.weekday()
result = week * 7 + weekday - dt_first_weekday + 1
if result < 1 or result > num_days_in_month(year, month):
return None
else:
return result
def week_num(dt: datetime):
"""Returns week number of the given day
"""
dt_first = dt.replace(day=1)
dt_first_weekday = dt_first.weekday()
return int((dt.day + dt_first_weekday - 1) / 7)
def max_week_num(year: int, month: int):
"""Returns number of weeks (Monday to Friday) that month contains
"""
# The same thing as week number for the last day of month
return week_num(datetime(year, month, num_days_in_month(year, month)))
|
scheduled-task
|
/scheduled_task-1.0.1.tar.gz/scheduled_task-1.0.1/scheduled_task/utils.py
|
utils.py
|
from datetime import datetime
from calendar import monthrange
def get_biggest_value_less_or_equal_to(iter: list or range, value):
"""Returns the biggest element from the list that is less or equal to the value. Return None if not found
"""
if type(iter) == list:
i = [x for x in iter if x <= value]
return max(i) if i else None
elif type(iter) == range:
if value in range(iter.start, iter.stop): # Value lies within this range, return step-aware value
return value - ((value - iter.start) % iter.step)
elif value > iter.stop-1: # value is greater than range, return last element of range
return iter.stop-1
else: # value is less than range, return None
return None
else:
raise ValueError("iter must be of type list or range")
def get_smallest_value_greater_or_equal_to(iter: list or range, value):
"""Returns the smallest element from the list that is greater or equal to the value. Return None if not found
"""
if type(iter) == list:
i = [x for x in iter if x >= value]
return min(i) if i else None
elif type(iter) == range:
if value in range(iter.start, iter.stop): # Value lies within this range, return step-aware value
return value + (iter.step - ((value - iter.start) % iter.step)) % iter.step
elif value < iter.start: # Value is less than range start, return start
return iter.start
else: # Value is greater than range, return None
return None
else:
raise ValueError("iter must be of type list or range")
def last(iter: list or range):
"""Returns the last element from the list or range
"""
if type(iter) == list:
return iter[len(iter)-1]
elif type(iter) == range:
return iter.stop - (iter.stop - 1 - iter.start) % iter.step - 1 # Step-aware last element
else:
raise ValueError("iter must be of type list or range")
def first(iter: list or range):
"""Returns first element from the list or range
"""
if type(iter) == list:
return iter[0]
elif type(iter) == range:
return iter.start
else:
raise ValueError("iter must be of type list or range")
def num_days_in_month(year: int, month: int):
return monthrange(year, month)[1]
def weekday_num(dt: datetime):
"""Returns number of weekday in the current month. I.e. if Tuesday is first in this month, returns 0
"""
return int((dt.day - 1)/7)
def weekday_and_num_to_day(year: int, month: int, weekday_number: int, weekday: int):
"""Converts current year, month, weekday and weekday number into the day of month
"""
dt_first = datetime(year, month, 1)
dt_first_weekday = dt_first.weekday()
return 1 - dt_first_weekday + weekday + ((0 if weekday >= dt_first_weekday else 1) + weekday_number) * 7
def weekday_and_week_to_day(year: int, month: int, week: int, weekday: int):
"""Converts current year, month, weekday and week number into the day of month
"""
dt_first = datetime(year, month, 1)
dt_first_weekday = dt_first.weekday()
result = week * 7 + weekday - dt_first_weekday + 1
if result < 1 or result > num_days_in_month(year, month):
return None
else:
return result
def week_num(dt: datetime):
"""Returns week number of the given day
"""
dt_first = dt.replace(day=1)
dt_first_weekday = dt_first.weekday()
return int((dt.day + dt_first_weekday - 1) / 7)
def max_week_num(year: int, month: int):
"""Returns number of weeks (Monday to Friday) that month contains
"""
# The same thing as week number for the last day of month
return week_num(datetime(year, month, num_days_in_month(year, month)))
| 0.817028 | 0.667751 |
# Scheduled Task Reader
[](https://github.com/psf/black)
[](https://pypi.org/project/scheduled-tasks-reader/0.1/)
[](https://gitlab.crystal-cube.ch/ul15/list-scheduled-tasks/)
[](https://gitlab.crystal-cube.ch/ul15/list-scheduled-tasks/)
A program to various information of scheduled tasks.
## Installation
Clone this git repository, then run the following commands:
```python3 -m venv venv```
```source venv/bin/activate```
```pip3 install -r requirements.txt```
or just
```pip3 install scheduled-tasks-reader```
## Help page
```console
usage: Scheduled Tasks Reader [-h] [-o OUTPUT] [-of]
[-n TASK_NAMES [TASK_NAMES ...]]
[-p TASK_PATHS [TASK_PATHS ...]] [-s [...]]
[-t [...]] [--only_hidden] [--raw_data]
[--version]
dir_of_registry_files
Get Overview of Scheduled Tasks from the relevant registry files.
positional arguments:
dir_of_registry_files
Path to the directory containing the relevant registry
files
optional arguments:
-h, --help show this help message and exit
-o OUTPUT, --output OUTPUT
Store output at specified location. It will overwrite
existing files!
-of , --output_format
Define output format. Default value is: html.Allowed
values are: ['html', 'json', 'csv']
-n TASK_NAMES [TASK_NAMES ...], --task_names TASK_NAMES [TASK_NAMES ...]
Filter for array of one or more names of scheduled
task (separated by space).
-p TASK_PATHS [TASK_PATHS ...], --task_paths TASK_PATHS [TASK_PATHS ...]
Filter for array of one or more paths of scheduled
task (separated by space).
-s [ ...], --sort_by [ ...]
Sort by array of one or more attributes of scheduled
task (separated by space). Default values are:
['task_path', 'task_name'].Allowed values are:
['task_path', 'task_name', 'enabled', 'hidden',
'triggers', 'exec_command', 'exec_args',
'schedule_time']
-t [ ...], --task_triggers [ ...]
Filter for array of one or more trigger types of
scheduled task (separated by space). Allowed values
are: ['EventTrigger', 'TimeTrigger', 'LogonTrigger',
'BootTrigger', 'CalendarTrigger',
'SessionStateChangeTrigger', 'RegistrationTrigger']
--table_terminal_output
Show the output as a table, needs a wide Terminal.
--only_hidden Show only the hidden scheduled tasks
--raw_data Append the raw data from the scheduled tasks parsed
from the xmls to the normal output.
--version show program's version number and exit
```
## Example output
```console
task_path Microsoft/Windows/Registry
task_name RegIdleBackup
enabled True
hidden True
triggers [CalendarTrigger]
exec_command
exec_args
schedule_time {'schedule': 'ScheduleByDay', 'dayInterval': '10'}
===========================
...
```
or with the --table_terminal_output it would look like this:
```console
task_path task_name enabled hidden triggers exec_command exec_args schedule_time
. AviraSystemSpeedupUpdate True False [CalendarTrigger] "C:\ProgramData\Avira\SystemSpeedup\Update\avira_speedup_setup_update.exe" /VERIFY /VERYSILENT /NOSTART /NODOTNET /NORESTART {'schedule': 'ScheduleByWeek', 'daysOfWeek': ['Tuesday'], 'weeksInterval': '1'}
. Avira_Antivirus_Systray True False [ LogonTrigger] "C:\Program Files (x86)\Avira\Antivirus\avgnt.exe" /min N/A
...
```
## Built With
* [python3](https://www.python.org/) - Python :heart:
* [argparse](https://docs.python.org/3/library/argparse.html?highlight=argparse#module-argparse) - Parser for command-line options, arguments and sub-commands
* [xmltodict](https://github.com/martinblech/xmltodict) - To parse xml files
* [pandas](https://github.com/pandas-dev/pandas) - Powerful Python data analysis toolkit
## Contributing
Contributing in form of feedback, bug reporting or pull requests is welcome.
## License
This project is licensed under the MIT License - see the [LICENSE.md](LICENSE.md) file for details
## Acknowledgments
* Google :heart:
|
scheduled-tasks-reader
|
/scheduled_tasks_reader-0.1.1.tar.gz/scheduled_tasks_reader-0.1.1/README.md
|
README.md
|
or just
## Help page
## Example output
or with the --table_terminal_output it would look like this:
| 0.160135 | 0.458652 |
import os
import argparse
import xmltodict
import pandas as pd
import re
class CLIHandler:
def __init__(self):
self.columns = [
"task_path",
"task_name",
"enabled",
"hidden",
"triggers",
"exec_command",
"exec_args",
"schedule_time",
]
self.default_values_sort_by = ["task_path", "task_name"]
self.trigger_choices = [
"EventTrigger",
"TimeTrigger",
"LogonTrigger",
"BootTrigger",
"CalendarTrigger",
"SessionStateChangeTrigger",
"RegistrationTrigger",
]
self.output_format_choices = ["html", "json", "csv"]
self.default_value_output_format = "html"
parser = self.init_argparser()
self.args = parser.parse_args()
self.check_if_path_is_dir()
self.parsed_scheduled_task_output = self.parse_scheduled_task_output()
self.show_output()
if self.args.output:
self.store_output()
def init_argparser(self):
parser = argparse.ArgumentParser(
prog="Scheduled Tasks Reader",
description="Get Overview of Scheduled Tasks from the relevant registry files.",
)
parser.add_argument("dir_of_registry_files", help="Path to the directory containing the relevant registry files")
parser.add_argument(
"-o", "--output", help="Store output at specified location. It will overwrite existing files!"
)
parser.add_argument(
"-of",
"--output_format",
choices=self.output_format_choices,
metavar="",
default=self.default_value_output_format,
help=f"Define output format. Default value is: {self.default_value_output_format}.Allowed values are: {self.output_format_choices}",
)
parser.add_argument(
"-n",
"--task_names",
nargs="+",
help="Filter for array of one or more names of scheduled task (separated by space).",
)
parser.add_argument(
"-p",
"--task_paths",
nargs="+",
help="Filter for array of one or more paths of scheduled task (separated by space).",
)
parser.add_argument(
"-s",
"--sort_by",
nargs="+",
choices=self.columns,
metavar="",
default=self.default_values_sort_by,
help=f"Sort by array of one or more attributes of scheduled task (separated by space). Default values are: {self.default_values_sort_by}.Allowed values are: {self.columns}",
)
parser.add_argument(
"-t",
"--task_triggers",
nargs="+",
choices=self.trigger_choices,
metavar="",
help=f"Filter for array of one or more trigger types of scheduled task (separated by space). Allowed values are: {self.trigger_choices}",
)
parser.add_argument("--table_terminal_output", action="store_true", help="Show the output as a table, needs a wide Terminal.")
parser.add_argument("--only_hidden", action="store_true", help="Show only the hidden scheduled tasks")
parser.add_argument(
"--raw_data",
action="store_true",
help="Append the raw data from the scheduled tasks parsed from the xmls to the normal output.",
)
parser.add_argument("--version", action="version", version="%(prog)s 0.1")
return parser
def check_if_path_is_dir(self):
if not os.path.isdir(self.args.dir_of_registry_files):
raise ValueError(f"'{self.args.dir_of_registry_files}' is not a valid path of a directory")
def parse_scheduled_task_output(self):
schedule_task_parser = ScheduledTaskParser(self.args.dir_of_registry_files)
data_frame = pd.DataFrame(schedule_task_parser.scheduled_tasks)
data_frame = data_frame.sort_values(by=self.args.sort_by)
data_frame = self.filter_data_frame(data_frame)
return data_frame
def filter_data_frame(self, data_frame):
if self.args.only_hidden:
data_frame = data_frame[data_frame.hidden == True]
if self.args.task_paths:
data_frame = data_frame[data_frame.task_path.isin(self.args.task_paths)]
if self.args.task_names:
data_frame = data_frame[data_frame.task_name.isin(self.args.task_names)]
if self.args.task_triggers:
data_frame = data_frame[
data_frame.triggers.apply(
lambda triggers: any(trigger in self.args.task_triggers for trigger in triggers)
)
]
if self.args.raw_data:
data_frame = data_frame.join(pd.io.json.json_normalize(data_frame["task_data"]))
del data_frame["task_data"]
else:
data_frame = data_frame[self.columns]
return data_frame
def show_output(self):
pd.set_option("display.max_columns", None)
pd.set_option("display.expand_frame_repr", False)
pd.set_option("max_colwidth", -1)
pd.set_option("colheader_justify", "left")
if self.args.table_terminal_output:
print(self.parsed_scheduled_task_output.to_string(index=False))
else:
for task in self.parsed_scheduled_task_output.iterrows():
print(task[1].to_string())
print("===========================")
def store_output(self):
output_format = self.args.output_format
with open(self.args.output, "w") as output_file:
if output_format == "html":
this_directory = os.path.abspath(os.path.dirname(__file__))
html_template_path = os.path.join(this_directory, "html_template.html")
with open(html_template_path, "r", encoding="UTF-8") as html_template:
html_template_content = html_template.read()
html_content = html_template_content.format(
data=self.parsed_scheduled_task_output.to_html(table_id="dataframe", index=False)
)
output_file.write(html_content)
elif output_format == "json":
output_file.write(self.parsed_scheduled_task_output.to_json())
elif output_format == "csv":
output_file.write(self.parsed_scheduled_task_output.to_csv())
class ScheduleTimeParser:
def __init__(self, task_data, CalendarTrigger=True):
self.attributes = {
"schedule": None,
"dayInterval": None,
"daysOfWeek": None,
"weeksInterval": None,
"daysOfMonth": None,
"months": None,
"calenderTrigger": CalendarTrigger,
"task_data": task_data,
"executionLimit": None,
"duration": None,
"interval": None,
"stopAtEnd": None,
}
def set_time_day(self, task_data):
self.attributes["schedule"] = "ScheduleByDay"
if "DaysInterval" in task_data:
self.attributes["dayInterval"] = task_data["DaysInterval"]
def set_time_week(self, task_data):
self.attributes["schedule"] = "ScheduleByWeek"
if "WeeksInterval" in task_data:
self.attributes["weeksInterval"] = task_data["WeeksInterval"]
if "DaysOfWeek" in task_data:
self.attributes["daysOfWeek"] = list(task_data["DaysOfWeek"].keys())
def set_time_month(self, task_data):
self.attributes["schedule"] = "ScheduleByMonth"
if "DaysOfMonth" in task_data:
self.attributes["daysOfMonth"] = list(task_data["DaysOfMonth"].keys())
if "Months" in task_data:
self.attributes["months"] = list(task_data["Months"].keys())
def select_set_time(self, schedule, task_data):
if schedule == "ScheduleByDay":
self.set_time_day(task_data)
elif schedule == "ScheduleByWeek":
self.set_time_week(task_data)
elif schedule == "ScheduleByMonth":
self.set_time_month(task_data)
def set_trigger_time(self):
if "ExecutionTimeLimit" in self.attributes["task_data"]:
self.attributes["executionLimit"] = self.attributes["task_data"]["ExecutionTimeLimit"]
if "Repetition" in self.attributes["task_data"]:
if "Duration" in self.attributes["task_data"]["Repetition"]:
self.attributes["duration"] = self.attributes["task_data"]["Repetition"]["duration"]
if "Interval" in self.attributes["task_data"]["Repetition"]:
self.attributes["interval"] = self.attributes["task_data"]["Repetition"]["Interval"]
if "StopAtDurationEnd" in self.attributes["task_data"]["Repetition"]:
self.attributes["stopAtEnd"] = self.attributes["task_data"]["Repetition"]["StopAtDurationEnd"]
def get_schedule_time(self):
if self.attributes["calenderTrigger"]:
pattern = "(?P<schedule>ScheduleBy.*)"
for tag in self.attributes["task_data"]:
match = re.match(pattern, tag)
if match:
schedule = match.group("schedule")
self.select_set_time(schedule, self.attributes["task_data"][schedule])
elif not self.attributes["calenderTrigger"]:
self.set_trigger_time()
def return_information(self):
self.get_schedule_time()
res = {}
self.attributes["calenderTrigger"] = None
for attribute, value in self.attributes.items():
if value and attribute != "task_data":
res[attribute] = value
return res
class ScheduledTaskParser:
def __init__(self, dir_path):
self.scheduled_task_reader = ScheduledTaskReader(dir_path)
self.scheduled_tasks = self.scheduled_task_reader.scheduled_tasks
self.add_additional_information()
def add_additional_information(self):
for index, schedule_task in enumerate(self.scheduled_tasks):
schedule_task_data = schedule_task["task_data"]
enabled = self.get_enabled(schedule_task_data)
self.scheduled_tasks[index]["enabled"] = enabled
self.scheduled_tasks[index]["schedule_time"] = self.get_schedule_time(schedule_task_data)
self.scheduled_tasks[index]["hidden"] = self.get_hidden_flag(schedule_task_data)
self.scheduled_tasks[index]["triggers"] = self.get_triggers(schedule_task_data)
self.scheduled_tasks[index]["exec_command"] = self.get_exec_command(schedule_task_data)
self.scheduled_tasks[index]["exec_args"] = self.get_exec_args(schedule_task_data)
def get_enabled(self, task_data):
return "Enabled" in task_data["Settings"] and task_data["Settings"]["Enabled"] == "true"
def get_schedule_time(self, task_data):
if "Triggers" in task_data and task_data["Triggers"]:
if "CalendarTrigger" in task_data["Triggers"]:
if (
"Enabled" in task_data["Triggers"]["CalendarTrigger"]
and task_data["Triggers"]["CalendarTrigger"]["Enabled"] == "true"
) or "Enabled" not in task_data["Triggers"]["CalendarTrigger"]:
schedule_time = ScheduleTimeParser(task_data["Triggers"]["CalendarTrigger"], True)
return schedule_time.return_information()
if "TimeTrigger" in task_data["Triggers"]:
if (
"Enabled" in task_data["Triggers"]["TimeTrigger"]
and task_data["Triggers"]["TimeTrigger"]["Enabled"] == "true"
) or "Enabled" not in task_data["Triggers"]["TimeTrigger"]:
schedule_time = ScheduleTimeParser(task_data["Triggers"]["TimeTrigger"], False)
return schedule_time.return_information()
return "N/A"
def get_hidden_flag(self, task_data):
if "Hidden" in task_data["Settings"]:
return task_data["Settings"]["Hidden"] == "true"
return False
def get_triggers(self, task_data):
triggers = []
if "Triggers" in task_data and task_data["Triggers"]:
for trigger, data in task_data["Triggers"].items():
if data and "Enabled" in data and data["Enabled"] == "true":
triggers.append(trigger)
elif data and "Enabled" not in data:
triggers.append(trigger)
elif not data:
triggers.append(trigger)
return triggers
def get_exec_command(self, task_data):
if "Actions" in task_data and "Exec" in task_data["Actions"] and "Command" in task_data["Actions"]["Exec"]:
return task_data["Actions"]["Exec"]["Command"]
return ""
def get_exec_args(self, task_data):
if "Actions" in task_data and "Exec" in task_data["Actions"] and "Arguments" in task_data["Actions"]["Exec"]:
return task_data["Actions"]["Exec"]["Arguments"]
return ""
class ScheduledTaskReader:
def __init__(self, dir_path):
self.dir_path = dir_path
self.scheduled_tasks = self.get_scheduled_tasks()
def get_scheduled_tasks(self):
"""iterate through every file in the directory and call get_scheduled_task_information"""
scheduled_tasks = []
for path, subdirs, files in os.walk(self.dir_path):
for task_name in files:
scheduled_tasks.append(self.get_scheduled_task_information(path, task_name))
return scheduled_tasks
def get_scheduled_task_information(self, path, task_name):
full_path = os.path.join(path, task_name)
with open(full_path, "r", encoding="utf-16") as file:
task_data = xmltodict.parse(file.read())["Task"]
task_path = os.path.relpath(path, self.dir_path)
return {"task_path": task_path, "task_name": task_name, "task_data": task_data}
def main():
CLIHandler()
if __name__ == "__main__":
main()
|
scheduled-tasks-reader
|
/scheduled_tasks_reader-0.1.1.tar.gz/scheduled_tasks_reader-0.1.1/scheduled_tasks_reader.py
|
scheduled_tasks_reader.py
|
import os
import argparse
import xmltodict
import pandas as pd
import re
class CLIHandler:
def __init__(self):
self.columns = [
"task_path",
"task_name",
"enabled",
"hidden",
"triggers",
"exec_command",
"exec_args",
"schedule_time",
]
self.default_values_sort_by = ["task_path", "task_name"]
self.trigger_choices = [
"EventTrigger",
"TimeTrigger",
"LogonTrigger",
"BootTrigger",
"CalendarTrigger",
"SessionStateChangeTrigger",
"RegistrationTrigger",
]
self.output_format_choices = ["html", "json", "csv"]
self.default_value_output_format = "html"
parser = self.init_argparser()
self.args = parser.parse_args()
self.check_if_path_is_dir()
self.parsed_scheduled_task_output = self.parse_scheduled_task_output()
self.show_output()
if self.args.output:
self.store_output()
def init_argparser(self):
parser = argparse.ArgumentParser(
prog="Scheduled Tasks Reader",
description="Get Overview of Scheduled Tasks from the relevant registry files.",
)
parser.add_argument("dir_of_registry_files", help="Path to the directory containing the relevant registry files")
parser.add_argument(
"-o", "--output", help="Store output at specified location. It will overwrite existing files!"
)
parser.add_argument(
"-of",
"--output_format",
choices=self.output_format_choices,
metavar="",
default=self.default_value_output_format,
help=f"Define output format. Default value is: {self.default_value_output_format}.Allowed values are: {self.output_format_choices}",
)
parser.add_argument(
"-n",
"--task_names",
nargs="+",
help="Filter for array of one or more names of scheduled task (separated by space).",
)
parser.add_argument(
"-p",
"--task_paths",
nargs="+",
help="Filter for array of one or more paths of scheduled task (separated by space).",
)
parser.add_argument(
"-s",
"--sort_by",
nargs="+",
choices=self.columns,
metavar="",
default=self.default_values_sort_by,
help=f"Sort by array of one or more attributes of scheduled task (separated by space). Default values are: {self.default_values_sort_by}.Allowed values are: {self.columns}",
)
parser.add_argument(
"-t",
"--task_triggers",
nargs="+",
choices=self.trigger_choices,
metavar="",
help=f"Filter for array of one or more trigger types of scheduled task (separated by space). Allowed values are: {self.trigger_choices}",
)
parser.add_argument("--table_terminal_output", action="store_true", help="Show the output as a table, needs a wide Terminal.")
parser.add_argument("--only_hidden", action="store_true", help="Show only the hidden scheduled tasks")
parser.add_argument(
"--raw_data",
action="store_true",
help="Append the raw data from the scheduled tasks parsed from the xmls to the normal output.",
)
parser.add_argument("--version", action="version", version="%(prog)s 0.1")
return parser
def check_if_path_is_dir(self):
if not os.path.isdir(self.args.dir_of_registry_files):
raise ValueError(f"'{self.args.dir_of_registry_files}' is not a valid path of a directory")
def parse_scheduled_task_output(self):
schedule_task_parser = ScheduledTaskParser(self.args.dir_of_registry_files)
data_frame = pd.DataFrame(schedule_task_parser.scheduled_tasks)
data_frame = data_frame.sort_values(by=self.args.sort_by)
data_frame = self.filter_data_frame(data_frame)
return data_frame
def filter_data_frame(self, data_frame):
if self.args.only_hidden:
data_frame = data_frame[data_frame.hidden == True]
if self.args.task_paths:
data_frame = data_frame[data_frame.task_path.isin(self.args.task_paths)]
if self.args.task_names:
data_frame = data_frame[data_frame.task_name.isin(self.args.task_names)]
if self.args.task_triggers:
data_frame = data_frame[
data_frame.triggers.apply(
lambda triggers: any(trigger in self.args.task_triggers for trigger in triggers)
)
]
if self.args.raw_data:
data_frame = data_frame.join(pd.io.json.json_normalize(data_frame["task_data"]))
del data_frame["task_data"]
else:
data_frame = data_frame[self.columns]
return data_frame
def show_output(self):
pd.set_option("display.max_columns", None)
pd.set_option("display.expand_frame_repr", False)
pd.set_option("max_colwidth", -1)
pd.set_option("colheader_justify", "left")
if self.args.table_terminal_output:
print(self.parsed_scheduled_task_output.to_string(index=False))
else:
for task in self.parsed_scheduled_task_output.iterrows():
print(task[1].to_string())
print("===========================")
def store_output(self):
output_format = self.args.output_format
with open(self.args.output, "w") as output_file:
if output_format == "html":
this_directory = os.path.abspath(os.path.dirname(__file__))
html_template_path = os.path.join(this_directory, "html_template.html")
with open(html_template_path, "r", encoding="UTF-8") as html_template:
html_template_content = html_template.read()
html_content = html_template_content.format(
data=self.parsed_scheduled_task_output.to_html(table_id="dataframe", index=False)
)
output_file.write(html_content)
elif output_format == "json":
output_file.write(self.parsed_scheduled_task_output.to_json())
elif output_format == "csv":
output_file.write(self.parsed_scheduled_task_output.to_csv())
class ScheduleTimeParser:
def __init__(self, task_data, CalendarTrigger=True):
self.attributes = {
"schedule": None,
"dayInterval": None,
"daysOfWeek": None,
"weeksInterval": None,
"daysOfMonth": None,
"months": None,
"calenderTrigger": CalendarTrigger,
"task_data": task_data,
"executionLimit": None,
"duration": None,
"interval": None,
"stopAtEnd": None,
}
def set_time_day(self, task_data):
self.attributes["schedule"] = "ScheduleByDay"
if "DaysInterval" in task_data:
self.attributes["dayInterval"] = task_data["DaysInterval"]
def set_time_week(self, task_data):
self.attributes["schedule"] = "ScheduleByWeek"
if "WeeksInterval" in task_data:
self.attributes["weeksInterval"] = task_data["WeeksInterval"]
if "DaysOfWeek" in task_data:
self.attributes["daysOfWeek"] = list(task_data["DaysOfWeek"].keys())
def set_time_month(self, task_data):
self.attributes["schedule"] = "ScheduleByMonth"
if "DaysOfMonth" in task_data:
self.attributes["daysOfMonth"] = list(task_data["DaysOfMonth"].keys())
if "Months" in task_data:
self.attributes["months"] = list(task_data["Months"].keys())
def select_set_time(self, schedule, task_data):
if schedule == "ScheduleByDay":
self.set_time_day(task_data)
elif schedule == "ScheduleByWeek":
self.set_time_week(task_data)
elif schedule == "ScheduleByMonth":
self.set_time_month(task_data)
def set_trigger_time(self):
if "ExecutionTimeLimit" in self.attributes["task_data"]:
self.attributes["executionLimit"] = self.attributes["task_data"]["ExecutionTimeLimit"]
if "Repetition" in self.attributes["task_data"]:
if "Duration" in self.attributes["task_data"]["Repetition"]:
self.attributes["duration"] = self.attributes["task_data"]["Repetition"]["duration"]
if "Interval" in self.attributes["task_data"]["Repetition"]:
self.attributes["interval"] = self.attributes["task_data"]["Repetition"]["Interval"]
if "StopAtDurationEnd" in self.attributes["task_data"]["Repetition"]:
self.attributes["stopAtEnd"] = self.attributes["task_data"]["Repetition"]["StopAtDurationEnd"]
def get_schedule_time(self):
if self.attributes["calenderTrigger"]:
pattern = "(?P<schedule>ScheduleBy.*)"
for tag in self.attributes["task_data"]:
match = re.match(pattern, tag)
if match:
schedule = match.group("schedule")
self.select_set_time(schedule, self.attributes["task_data"][schedule])
elif not self.attributes["calenderTrigger"]:
self.set_trigger_time()
def return_information(self):
self.get_schedule_time()
res = {}
self.attributes["calenderTrigger"] = None
for attribute, value in self.attributes.items():
if value and attribute != "task_data":
res[attribute] = value
return res
class ScheduledTaskParser:
def __init__(self, dir_path):
self.scheduled_task_reader = ScheduledTaskReader(dir_path)
self.scheduled_tasks = self.scheduled_task_reader.scheduled_tasks
self.add_additional_information()
def add_additional_information(self):
for index, schedule_task in enumerate(self.scheduled_tasks):
schedule_task_data = schedule_task["task_data"]
enabled = self.get_enabled(schedule_task_data)
self.scheduled_tasks[index]["enabled"] = enabled
self.scheduled_tasks[index]["schedule_time"] = self.get_schedule_time(schedule_task_data)
self.scheduled_tasks[index]["hidden"] = self.get_hidden_flag(schedule_task_data)
self.scheduled_tasks[index]["triggers"] = self.get_triggers(schedule_task_data)
self.scheduled_tasks[index]["exec_command"] = self.get_exec_command(schedule_task_data)
self.scheduled_tasks[index]["exec_args"] = self.get_exec_args(schedule_task_data)
def get_enabled(self, task_data):
return "Enabled" in task_data["Settings"] and task_data["Settings"]["Enabled"] == "true"
def get_schedule_time(self, task_data):
if "Triggers" in task_data and task_data["Triggers"]:
if "CalendarTrigger" in task_data["Triggers"]:
if (
"Enabled" in task_data["Triggers"]["CalendarTrigger"]
and task_data["Triggers"]["CalendarTrigger"]["Enabled"] == "true"
) or "Enabled" not in task_data["Triggers"]["CalendarTrigger"]:
schedule_time = ScheduleTimeParser(task_data["Triggers"]["CalendarTrigger"], True)
return schedule_time.return_information()
if "TimeTrigger" in task_data["Triggers"]:
if (
"Enabled" in task_data["Triggers"]["TimeTrigger"]
and task_data["Triggers"]["TimeTrigger"]["Enabled"] == "true"
) or "Enabled" not in task_data["Triggers"]["TimeTrigger"]:
schedule_time = ScheduleTimeParser(task_data["Triggers"]["TimeTrigger"], False)
return schedule_time.return_information()
return "N/A"
def get_hidden_flag(self, task_data):
if "Hidden" in task_data["Settings"]:
return task_data["Settings"]["Hidden"] == "true"
return False
def get_triggers(self, task_data):
triggers = []
if "Triggers" in task_data and task_data["Triggers"]:
for trigger, data in task_data["Triggers"].items():
if data and "Enabled" in data and data["Enabled"] == "true":
triggers.append(trigger)
elif data and "Enabled" not in data:
triggers.append(trigger)
elif not data:
triggers.append(trigger)
return triggers
def get_exec_command(self, task_data):
if "Actions" in task_data and "Exec" in task_data["Actions"] and "Command" in task_data["Actions"]["Exec"]:
return task_data["Actions"]["Exec"]["Command"]
return ""
def get_exec_args(self, task_data):
if "Actions" in task_data and "Exec" in task_data["Actions"] and "Arguments" in task_data["Actions"]["Exec"]:
return task_data["Actions"]["Exec"]["Arguments"]
return ""
class ScheduledTaskReader:
def __init__(self, dir_path):
self.dir_path = dir_path
self.scheduled_tasks = self.get_scheduled_tasks()
def get_scheduled_tasks(self):
"""iterate through every file in the directory and call get_scheduled_task_information"""
scheduled_tasks = []
for path, subdirs, files in os.walk(self.dir_path):
for task_name in files:
scheduled_tasks.append(self.get_scheduled_task_information(path, task_name))
return scheduled_tasks
def get_scheduled_task_information(self, path, task_name):
full_path = os.path.join(path, task_name)
with open(full_path, "r", encoding="utf-16") as file:
task_data = xmltodict.parse(file.read())["Task"]
task_path = os.path.relpath(path, self.dir_path)
return {"task_path": task_path, "task_name": task_name, "task_data": task_data}
def main():
CLIHandler()
if __name__ == "__main__":
main()
| 0.571049 | 0.215691 |
.. These are examples of badges you might want to add to your README:
please update the URLs accordingly
.. image:: https://api.cirrus-ci.com/github/<USER>/scheduled_thread_pool_executor.svg?branch=main
:alt: Built Status
:target: https://cirrus-ci.com/github/<USER>/scheduled_thread_pool_executor
.. image:: https://readthedocs.org/projects/scheduled_thread_pool_executor/badge/?version=latest
:alt: ReadTheDocs
:target: https://scheduled_thread_pool_executor.readthedocs.io/en/stable/
.. image:: https://img.shields.io/coveralls/github/<USER>/scheduled_thread_pool_executor/main.svg
:alt: Coveralls
:target: https://coveralls.io/r/<USER>/scheduled_thread_pool_executor
.. image:: https://img.shields.io/conda/vn/conda-forge/scheduled_thread_pool_executor.svg
:alt: Conda-Forge
:target: https://anaconda.org/conda-forge/scheduled_thread_pool_executor
.. image:: https://pepy.tech/badge/scheduled_thread_pool_executor/month
:alt: Monthly Downloads
:target: https://pepy.tech/project/scheduled_thread_pool_executor
.. image:: https://img.shields.io/twitter/url/http/shields.io.svg?style=social&label=Twitter
:alt: Twitter
:target: https://twitter.com/scheduled_thread_pool_executor
.. image:: https://img.shields.io/badge/-PyScaffold-005CA0?logo=pyscaffold
:alt: Project generated with PyScaffold
:target: https://pyscaffold.org/
.. image:: https://img.shields.io/pypi/v/scheduled_thread_pool_executor.svg
:alt: PyPI-Server
:target: https://pypi.org/project/scheduled_thread_pool_executor/
==============================
Scheduled Thread Pool Executor
==============================
Scheduled Thread Pool Executor implementation in python
Makes use of delayed queue implementation to submit tasks to the thread pool.
-----
Usage
-----
.. code-block::
from scheduled_thread_pool_executor import ScheduledThreadPoolExecutor
scheduled_executor = ScheduledThreadPoolExecutor(max_workers=5)
scheduled_executor.schedule(task, 0) # equals to schedule once, where task is a callable
scheduled_executor.schedule_at_fixed_rate(task, 0, 5) # schedule immediately and run periodically for every 5 secs
scheduled_executor.schedule_at_fixed_delay(task, 5, 10) # schedule after 5secs (initial delay) and run periodically for every 10secs
.. _pyscaffold-notes:
Note
====
This project has been set up using PyScaffold 4.1.1. For details and usage
information on PyScaffold see https://pyscaffold.org/.
|
scheduled-thread-pool-executor
|
/scheduled_thread_pool_executor-1.0.2.tar.gz/scheduled_thread_pool_executor-1.0.2/README.rst
|
README.rst
|
.. These are examples of badges you might want to add to your README:
please update the URLs accordingly
.. image:: https://api.cirrus-ci.com/github/<USER>/scheduled_thread_pool_executor.svg?branch=main
:alt: Built Status
:target: https://cirrus-ci.com/github/<USER>/scheduled_thread_pool_executor
.. image:: https://readthedocs.org/projects/scheduled_thread_pool_executor/badge/?version=latest
:alt: ReadTheDocs
:target: https://scheduled_thread_pool_executor.readthedocs.io/en/stable/
.. image:: https://img.shields.io/coveralls/github/<USER>/scheduled_thread_pool_executor/main.svg
:alt: Coveralls
:target: https://coveralls.io/r/<USER>/scheduled_thread_pool_executor
.. image:: https://img.shields.io/conda/vn/conda-forge/scheduled_thread_pool_executor.svg
:alt: Conda-Forge
:target: https://anaconda.org/conda-forge/scheduled_thread_pool_executor
.. image:: https://pepy.tech/badge/scheduled_thread_pool_executor/month
:alt: Monthly Downloads
:target: https://pepy.tech/project/scheduled_thread_pool_executor
.. image:: https://img.shields.io/twitter/url/http/shields.io.svg?style=social&label=Twitter
:alt: Twitter
:target: https://twitter.com/scheduled_thread_pool_executor
.. image:: https://img.shields.io/badge/-PyScaffold-005CA0?logo=pyscaffold
:alt: Project generated with PyScaffold
:target: https://pyscaffold.org/
.. image:: https://img.shields.io/pypi/v/scheduled_thread_pool_executor.svg
:alt: PyPI-Server
:target: https://pypi.org/project/scheduled_thread_pool_executor/
==============================
Scheduled Thread Pool Executor
==============================
Scheduled Thread Pool Executor implementation in python
Makes use of delayed queue implementation to submit tasks to the thread pool.
-----
Usage
-----
.. code-block::
from scheduled_thread_pool_executor import ScheduledThreadPoolExecutor
scheduled_executor = ScheduledThreadPoolExecutor(max_workers=5)
scheduled_executor.schedule(task, 0) # equals to schedule once, where task is a callable
scheduled_executor.schedule_at_fixed_rate(task, 0, 5) # schedule immediately and run periodically for every 5 secs
scheduled_executor.schedule_at_fixed_delay(task, 5, 10) # schedule after 5secs (initial delay) and run periodically for every 10secs
.. _pyscaffold-notes:
Note
====
This project has been set up using PyScaffold 4.1.1. For details and usage
information on PyScaffold see https://pyscaffold.org/.
| 0.574514 | 0.417509 |
.. todo:: THIS IS SUPPOSED TO BE AN EXAMPLE. MODIFY IT ACCORDING TO YOUR NEEDS!
The document assumes you are using a source repository service that promotes a
contribution model similar to `GitHub's fork and pull request workflow`_.
While this is true for the majority of services (like GitHub, GitLab,
BitBucket), it might not be the case for private repositories (e.g., when
using Gerrit).
Also notice that the code examples might refer to GitHub URLs or the text
might use GitHub specific terminology (e.g., *Pull Request* instead of *Merge
Request*).
Please make sure to check the document having these assumptions in mind
and update things accordingly.
.. todo:: Provide the correct links/replacements at the bottom of the document.
.. todo:: You might want to have a look on `PyScaffold's contributor's guide`_,
especially if your project is open source. The text should be very similar to
this template, but there are a few extra contents that you might decide to
also include, like mentioning labels of your issue tracker or automated
releases.
============
Contributing
============
Welcome to ``scheduled_thread_pool_executor`` contributor's guide.
This document focuses on getting any potential contributor familiarized
with the development processes, but `other kinds of contributions`_ are also
appreciated.
If you are new to using git_ or have never collaborated in a project previously,
please have a look at `contribution-guide.org`_. Other resources are also
listed in the excellent `guide created by FreeCodeCamp`_ [#contrib1]_.
Please notice, all users and contributors are expected to be **open,
considerate, reasonable, and respectful**. When in doubt, `Python Software
Foundation's Code of Conduct`_ is a good reference in terms of behavior
guidelines.
Issue Reports
=============
If you experience bugs or general issues with ``scheduled_thread_pool_executor``, please have a look
on the `issue tracker`_. If you don't see anything useful there, please feel
free to fire an issue report.
.. tip::
Please don't forget to include the closed issues in your search.
Sometimes a solution was already reported, and the problem is considered
**solved**.
New issue reports should include information about your programming environment
(e.g., operating system, Python version) and steps to reproduce the problem.
Please try also to simplify the reproduction steps to a very minimal example
that still illustrates the problem you are facing. By removing other factors,
you help us to identify the root cause of the issue.
Documentation Improvements
==========================
You can help improve ``scheduled_thread_pool_executor`` docs by making them more readable and coherent, or
by adding missing information and correcting mistakes.
``scheduled_thread_pool_executor`` documentation uses Sphinx_ as its main documentation compiler.
This means that the docs are kept in the same repository as the project code, and
that any documentation update is done in the same way was a code contribution.
.. todo:: Don't forget to mention which markup language you are using.
e.g., reStructuredText_ or CommonMark_ with MyST_ extensions.
.. todo:: If your project is hosted on GitHub, you can also mention the following tip:
.. tip::
Please notice that the `GitHub web interface`_ provides a quick way of
propose changes in ``scheduled_thread_pool_executor``'s files. While this mechanism can
be tricky for normal code contributions, it works perfectly fine for
contributing to the docs, and can be quite handy.
If you are interested in trying this method out, please navigate to
the ``docs`` folder in the source repository_, find which file you
would like to propose changes and click in the little pencil icon at the
top, to open `GitHub's code editor`_. Once you finish editing the file,
please write a message in the form at the bottom of the page describing
which changes have you made and what are the motivations behind them and
submit your proposal.
When working on documentation changes in your local machine, you can
compile them using |tox|_::
tox -e docs
and use Python's built-in web server for a preview in your web browser
(``http://localhost:8000``)::
python3 -m http.server --directory 'docs/_build/html'
Code Contributions
==================
.. todo:: Please include a reference or explanation about the internals of the project.
An architecture description, design principles or at least a summary of the
main concepts will make it easy for potential contributors to get started
quickly.
Submit an issue
---------------
Before you work on any non-trivial code contribution it's best to first create
a report in the `issue tracker`_ to start a discussion on the subject.
This often provides additional considerations and avoids unnecessary work.
Create an environment
---------------------
Before you start coding, we recommend creating an isolated `virtual
environment`_ to avoid any problems with your installed Python packages.
This can easily be done via either |virtualenv|_::
virtualenv <PATH TO VENV>
source <PATH TO VENV>/bin/activate
or Miniconda_::
conda create -n scheduled_thread_pool_executor python=3 six virtualenv pytest pytest-cov
conda activate scheduled_thread_pool_executor
Clone the repository
--------------------
#. Create an user account on |the repository service| if you do not already have one.
#. Fork the project repository_: click on the *Fork* button near the top of the
page. This creates a copy of the code under your account on |the repository service|.
#. Clone this copy to your local disk::
git clone [email protected]:YourLogin/scheduled_thread_pool_executor.git
cd scheduled_thread_pool_executor
#. You should run::
pip install -U pip setuptools -e .
to be able run ``putup --help``.
.. todo:: if you are not using pre-commit, please remove the following item:
#. Install |pre-commit|_::
pip install pre-commit
pre-commit install
``scheduled_thread_pool_executor`` comes with a lot of hooks configured to automatically help the
developer to check the code being written.
Implement your changes
----------------------
#. Create a branch to hold your changes::
git checkout -b my-feature
and start making changes. Never work on the master branch!
#. Start your work on this branch. Don't forget to add docstrings_ to new
functions, modules and classes, especially if they are part of public APIs.
#. Add yourself to the list of contributors in ``AUTHORS.rst``.
#. When you’re done editing, do::
git add <MODIFIED FILES>
git commit
to record your changes in git_.
.. todo:: if you are not using pre-commit, please remove the following item:
Please make sure to see the validation messages from |pre-commit|_ and fix
any eventual issues.
This should automatically use flake8_/black_ to check/fix the code style
in a way that is compatible with the project.
.. important:: Don't forget to add unit tests and documentation in case your
contribution adds an additional feature and is not just a bugfix.
Moreover, writing a `descriptive commit message`_ is highly recommended.
In case of doubt, you can check the commit history with::
git log --graph --decorate --pretty=oneline --abbrev-commit --all
to look for recurring communication patterns.
#. Please check that your changes don't break any unit tests with::
tox
(after having installed |tox|_ with ``pip install tox`` or ``pipx``).
You can also use |tox|_ to run several other pre-configured tasks in the
repository. Try ``tox -av`` to see a list of the available checks.
Submit your contribution
------------------------
#. If everything works fine, push your local branch to |the repository service| with::
git push -u origin my-feature
#. Go to the web page of your fork and click |contribute button|
to send your changes for review.
.. todo:: if you are using GitHub, you can uncomment the following paragraph
Find more detailed information in `creating a PR`_. You might also want to open
the PR as a draft first and mark it as ready for review after the feedbacks
from the continuous integration (CI) system or any required fixes.
Troubleshooting
---------------
The following tips can be used when facing problems to build or test the
package:
#. Make sure to fetch all the tags from the upstream repository_.
The command ``git describe --abbrev=0 --tags`` should return the version you
are expecting. If you are trying to run CI scripts in a fork repository,
make sure to push all the tags.
You can also try to remove all the egg files or the complete egg folder, i.e.,
``.eggs``, as well as the ``*.egg-info`` folders in the ``src`` folder or
potentially in the root of your project.
#. Sometimes |tox|_ misses out when new dependencies are added, especially to
``setup.cfg`` and ``docs/requirements.txt``. If you find any problems with
missing dependencies when running a command with |tox|_, try to recreate the
``tox`` environment using the ``-r`` flag. For example, instead of::
tox -e docs
Try running::
tox -r -e docs
#. Make sure to have a reliable |tox|_ installation that uses the correct
Python version (e.g., 3.7+). When in doubt you can run::
tox --version
# OR
which tox
If you have trouble and are seeing weird errors upon running |tox|_, you can
also try to create a dedicated `virtual environment`_ with a |tox|_ binary
freshly installed. For example::
virtualenv .venv
source .venv/bin/activate
.venv/bin/pip install tox
.venv/bin/tox -e all
#. `Pytest can drop you`_ in an interactive session in the case an error occurs.
In order to do that you need to pass a ``--pdb`` option (for example by
running ``tox -- -k <NAME OF THE FALLING TEST> --pdb``).
You can also setup breakpoints manually instead of using the ``--pdb`` option.
Maintainer tasks
================
Releases
--------
.. todo:: This section assumes you are using PyPI to publicly release your package.
If instead you are using a different/private package index, please update
the instructions accordingly.
If you are part of the group of maintainers and have correct user permissions
on PyPI_, the following steps can be used to release a new version for
``scheduled_thread_pool_executor``:
#. Make sure all unit tests are successful.
#. Tag the current commit on the main branch with a release tag, e.g., ``v1.2.3``.
#. Push the new tag to the upstream repository_, e.g., ``git push upstream v1.2.3``
#. Clean up the ``dist`` and ``build`` folders with ``tox -e clean``
(or ``rm -rf dist build``)
to avoid confusion with old builds and Sphinx docs.
#. Run ``tox -e build`` and check that the files in ``dist`` have
the correct version (no ``.dirty`` or git_ hash) according to the git_ tag.
Also check the sizes of the distributions, if they are too big (e.g., >
500KB), unwanted clutter may have been accidentally included.
#. Run ``tox -e publish -- --repository pypi`` and check that everything was
uploaded to PyPI_ correctly.
.. [#contrib1] Even though, these resources focus on open source projects and
communities, the general ideas behind collaborating with other developers
to collectively create software are general and can be applied to all sorts
of environments, including private companies and proprietary code bases.
.. <-- strart -->
.. todo:: Please review and change the following definitions:
.. |the repository service| replace:: GitHub
.. |contribute button| replace:: "Create pull request"
.. _repository: https://github.com/<USERNAME>/scheduled_thread_pool_executor
.. _issue tracker: https://github.com/<USERNAME>/scheduled_thread_pool_executor/issues
.. <-- end -->
.. |virtualenv| replace:: ``virtualenv``
.. |pre-commit| replace:: ``pre-commit``
.. |tox| replace:: ``tox``
.. _black: https://pypi.org/project/black/
.. _CommonMark: https://commonmark.org/
.. _contribution-guide.org: http://www.contribution-guide.org/
.. _creating a PR: https://docs.github.com/en/github/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request
.. _descriptive commit message: https://chris.beams.io/posts/git-commit
.. _docstrings: https://www.sphinx-doc.org/en/master/usage/extensions/napoleon.html
.. _first-contributions tutorial: https://github.com/firstcontributions/first-contributions
.. _flake8: https://flake8.pycqa.org/en/stable/
.. _git: https://git-scm.com
.. _GitHub's fork and pull request workflow: https://guides.github.com/activities/forking/
.. _guide created by FreeCodeCamp: https://github.com/FreeCodeCamp/how-to-contribute-to-open-source
.. _Miniconda: https://docs.conda.io/en/latest/miniconda.html
.. _MyST: https://myst-parser.readthedocs.io/en/latest/syntax/syntax.html
.. _other kinds of contributions: https://opensource.guide/how-to-contribute
.. _pre-commit: https://pre-commit.com/
.. _PyPI: https://pypi.org/
.. _PyScaffold's contributor's guide: https://pyscaffold.org/en/stable/contributing.html
.. _Pytest can drop you: https://docs.pytest.org/en/stable/usage.html#dropping-to-pdb-python-debugger-at-the-start-of-a-test
.. _Python Software Foundation's Code of Conduct: https://www.python.org/psf/conduct/
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/
.. _Sphinx: https://www.sphinx-doc.org/en/master/
.. _tox: https://tox.readthedocs.io/en/stable/
.. _virtual environment: https://realpython.com/python-virtual-environments-a-primer/
.. _virtualenv: https://virtualenv.pypa.io/en/stable/
.. _GitHub web interface: https://docs.github.com/en/github/managing-files-in-a-repository/managing-files-on-github/editing-files-in-your-repository
.. _GitHub's code editor: https://docs.github.com/en/github/managing-files-in-a-repository/managing-files-on-github/editing-files-in-your-repository
|
scheduled-thread-pool-executor
|
/scheduled_thread_pool_executor-1.0.2.tar.gz/scheduled_thread_pool_executor-1.0.2/CONTRIBUTING.rst
|
CONTRIBUTING.rst
|
.. todo:: THIS IS SUPPOSED TO BE AN EXAMPLE. MODIFY IT ACCORDING TO YOUR NEEDS!
The document assumes you are using a source repository service that promotes a
contribution model similar to `GitHub's fork and pull request workflow`_.
While this is true for the majority of services (like GitHub, GitLab,
BitBucket), it might not be the case for private repositories (e.g., when
using Gerrit).
Also notice that the code examples might refer to GitHub URLs or the text
might use GitHub specific terminology (e.g., *Pull Request* instead of *Merge
Request*).
Please make sure to check the document having these assumptions in mind
and update things accordingly.
.. todo:: Provide the correct links/replacements at the bottom of the document.
.. todo:: You might want to have a look on `PyScaffold's contributor's guide`_,
especially if your project is open source. The text should be very similar to
this template, but there are a few extra contents that you might decide to
also include, like mentioning labels of your issue tracker or automated
releases.
============
Contributing
============
Welcome to ``scheduled_thread_pool_executor`` contributor's guide.
This document focuses on getting any potential contributor familiarized
with the development processes, but `other kinds of contributions`_ are also
appreciated.
If you are new to using git_ or have never collaborated in a project previously,
please have a look at `contribution-guide.org`_. Other resources are also
listed in the excellent `guide created by FreeCodeCamp`_ [#contrib1]_.
Please notice, all users and contributors are expected to be **open,
considerate, reasonable, and respectful**. When in doubt, `Python Software
Foundation's Code of Conduct`_ is a good reference in terms of behavior
guidelines.
Issue Reports
=============
If you experience bugs or general issues with ``scheduled_thread_pool_executor``, please have a look
on the `issue tracker`_. If you don't see anything useful there, please feel
free to fire an issue report.
.. tip::
Please don't forget to include the closed issues in your search.
Sometimes a solution was already reported, and the problem is considered
**solved**.
New issue reports should include information about your programming environment
(e.g., operating system, Python version) and steps to reproduce the problem.
Please try also to simplify the reproduction steps to a very minimal example
that still illustrates the problem you are facing. By removing other factors,
you help us to identify the root cause of the issue.
Documentation Improvements
==========================
You can help improve ``scheduled_thread_pool_executor`` docs by making them more readable and coherent, or
by adding missing information and correcting mistakes.
``scheduled_thread_pool_executor`` documentation uses Sphinx_ as its main documentation compiler.
This means that the docs are kept in the same repository as the project code, and
that any documentation update is done in the same way was a code contribution.
.. todo:: Don't forget to mention which markup language you are using.
e.g., reStructuredText_ or CommonMark_ with MyST_ extensions.
.. todo:: If your project is hosted on GitHub, you can also mention the following tip:
.. tip::
Please notice that the `GitHub web interface`_ provides a quick way of
propose changes in ``scheduled_thread_pool_executor``'s files. While this mechanism can
be tricky for normal code contributions, it works perfectly fine for
contributing to the docs, and can be quite handy.
If you are interested in trying this method out, please navigate to
the ``docs`` folder in the source repository_, find which file you
would like to propose changes and click in the little pencil icon at the
top, to open `GitHub's code editor`_. Once you finish editing the file,
please write a message in the form at the bottom of the page describing
which changes have you made and what are the motivations behind them and
submit your proposal.
When working on documentation changes in your local machine, you can
compile them using |tox|_::
tox -e docs
and use Python's built-in web server for a preview in your web browser
(``http://localhost:8000``)::
python3 -m http.server --directory 'docs/_build/html'
Code Contributions
==================
.. todo:: Please include a reference or explanation about the internals of the project.
An architecture description, design principles or at least a summary of the
main concepts will make it easy for potential contributors to get started
quickly.
Submit an issue
---------------
Before you work on any non-trivial code contribution it's best to first create
a report in the `issue tracker`_ to start a discussion on the subject.
This often provides additional considerations and avoids unnecessary work.
Create an environment
---------------------
Before you start coding, we recommend creating an isolated `virtual
environment`_ to avoid any problems with your installed Python packages.
This can easily be done via either |virtualenv|_::
virtualenv <PATH TO VENV>
source <PATH TO VENV>/bin/activate
or Miniconda_::
conda create -n scheduled_thread_pool_executor python=3 six virtualenv pytest pytest-cov
conda activate scheduled_thread_pool_executor
Clone the repository
--------------------
#. Create an user account on |the repository service| if you do not already have one.
#. Fork the project repository_: click on the *Fork* button near the top of the
page. This creates a copy of the code under your account on |the repository service|.
#. Clone this copy to your local disk::
git clone [email protected]:YourLogin/scheduled_thread_pool_executor.git
cd scheduled_thread_pool_executor
#. You should run::
pip install -U pip setuptools -e .
to be able run ``putup --help``.
.. todo:: if you are not using pre-commit, please remove the following item:
#. Install |pre-commit|_::
pip install pre-commit
pre-commit install
``scheduled_thread_pool_executor`` comes with a lot of hooks configured to automatically help the
developer to check the code being written.
Implement your changes
----------------------
#. Create a branch to hold your changes::
git checkout -b my-feature
and start making changes. Never work on the master branch!
#. Start your work on this branch. Don't forget to add docstrings_ to new
functions, modules and classes, especially if they are part of public APIs.
#. Add yourself to the list of contributors in ``AUTHORS.rst``.
#. When you’re done editing, do::
git add <MODIFIED FILES>
git commit
to record your changes in git_.
.. todo:: if you are not using pre-commit, please remove the following item:
Please make sure to see the validation messages from |pre-commit|_ and fix
any eventual issues.
This should automatically use flake8_/black_ to check/fix the code style
in a way that is compatible with the project.
.. important:: Don't forget to add unit tests and documentation in case your
contribution adds an additional feature and is not just a bugfix.
Moreover, writing a `descriptive commit message`_ is highly recommended.
In case of doubt, you can check the commit history with::
git log --graph --decorate --pretty=oneline --abbrev-commit --all
to look for recurring communication patterns.
#. Please check that your changes don't break any unit tests with::
tox
(after having installed |tox|_ with ``pip install tox`` or ``pipx``).
You can also use |tox|_ to run several other pre-configured tasks in the
repository. Try ``tox -av`` to see a list of the available checks.
Submit your contribution
------------------------
#. If everything works fine, push your local branch to |the repository service| with::
git push -u origin my-feature
#. Go to the web page of your fork and click |contribute button|
to send your changes for review.
.. todo:: if you are using GitHub, you can uncomment the following paragraph
Find more detailed information in `creating a PR`_. You might also want to open
the PR as a draft first and mark it as ready for review after the feedbacks
from the continuous integration (CI) system or any required fixes.
Troubleshooting
---------------
The following tips can be used when facing problems to build or test the
package:
#. Make sure to fetch all the tags from the upstream repository_.
The command ``git describe --abbrev=0 --tags`` should return the version you
are expecting. If you are trying to run CI scripts in a fork repository,
make sure to push all the tags.
You can also try to remove all the egg files or the complete egg folder, i.e.,
``.eggs``, as well as the ``*.egg-info`` folders in the ``src`` folder or
potentially in the root of your project.
#. Sometimes |tox|_ misses out when new dependencies are added, especially to
``setup.cfg`` and ``docs/requirements.txt``. If you find any problems with
missing dependencies when running a command with |tox|_, try to recreate the
``tox`` environment using the ``-r`` flag. For example, instead of::
tox -e docs
Try running::
tox -r -e docs
#. Make sure to have a reliable |tox|_ installation that uses the correct
Python version (e.g., 3.7+). When in doubt you can run::
tox --version
# OR
which tox
If you have trouble and are seeing weird errors upon running |tox|_, you can
also try to create a dedicated `virtual environment`_ with a |tox|_ binary
freshly installed. For example::
virtualenv .venv
source .venv/bin/activate
.venv/bin/pip install tox
.venv/bin/tox -e all
#. `Pytest can drop you`_ in an interactive session in the case an error occurs.
In order to do that you need to pass a ``--pdb`` option (for example by
running ``tox -- -k <NAME OF THE FALLING TEST> --pdb``).
You can also setup breakpoints manually instead of using the ``--pdb`` option.
Maintainer tasks
================
Releases
--------
.. todo:: This section assumes you are using PyPI to publicly release your package.
If instead you are using a different/private package index, please update
the instructions accordingly.
If you are part of the group of maintainers and have correct user permissions
on PyPI_, the following steps can be used to release a new version for
``scheduled_thread_pool_executor``:
#. Make sure all unit tests are successful.
#. Tag the current commit on the main branch with a release tag, e.g., ``v1.2.3``.
#. Push the new tag to the upstream repository_, e.g., ``git push upstream v1.2.3``
#. Clean up the ``dist`` and ``build`` folders with ``tox -e clean``
(or ``rm -rf dist build``)
to avoid confusion with old builds and Sphinx docs.
#. Run ``tox -e build`` and check that the files in ``dist`` have
the correct version (no ``.dirty`` or git_ hash) according to the git_ tag.
Also check the sizes of the distributions, if they are too big (e.g., >
500KB), unwanted clutter may have been accidentally included.
#. Run ``tox -e publish -- --repository pypi`` and check that everything was
uploaded to PyPI_ correctly.
.. [#contrib1] Even though, these resources focus on open source projects and
communities, the general ideas behind collaborating with other developers
to collectively create software are general and can be applied to all sorts
of environments, including private companies and proprietary code bases.
.. <-- strart -->
.. todo:: Please review and change the following definitions:
.. |the repository service| replace:: GitHub
.. |contribute button| replace:: "Create pull request"
.. _repository: https://github.com/<USERNAME>/scheduled_thread_pool_executor
.. _issue tracker: https://github.com/<USERNAME>/scheduled_thread_pool_executor/issues
.. <-- end -->
.. |virtualenv| replace:: ``virtualenv``
.. |pre-commit| replace:: ``pre-commit``
.. |tox| replace:: ``tox``
.. _black: https://pypi.org/project/black/
.. _CommonMark: https://commonmark.org/
.. _contribution-guide.org: http://www.contribution-guide.org/
.. _creating a PR: https://docs.github.com/en/github/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request
.. _descriptive commit message: https://chris.beams.io/posts/git-commit
.. _docstrings: https://www.sphinx-doc.org/en/master/usage/extensions/napoleon.html
.. _first-contributions tutorial: https://github.com/firstcontributions/first-contributions
.. _flake8: https://flake8.pycqa.org/en/stable/
.. _git: https://git-scm.com
.. _GitHub's fork and pull request workflow: https://guides.github.com/activities/forking/
.. _guide created by FreeCodeCamp: https://github.com/FreeCodeCamp/how-to-contribute-to-open-source
.. _Miniconda: https://docs.conda.io/en/latest/miniconda.html
.. _MyST: https://myst-parser.readthedocs.io/en/latest/syntax/syntax.html
.. _other kinds of contributions: https://opensource.guide/how-to-contribute
.. _pre-commit: https://pre-commit.com/
.. _PyPI: https://pypi.org/
.. _PyScaffold's contributor's guide: https://pyscaffold.org/en/stable/contributing.html
.. _Pytest can drop you: https://docs.pytest.org/en/stable/usage.html#dropping-to-pdb-python-debugger-at-the-start-of-a-test
.. _Python Software Foundation's Code of Conduct: https://www.python.org/psf/conduct/
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/
.. _Sphinx: https://www.sphinx-doc.org/en/master/
.. _tox: https://tox.readthedocs.io/en/stable/
.. _virtual environment: https://realpython.com/python-virtual-environments-a-primer/
.. _virtualenv: https://virtualenv.pypa.io/en/stable/
.. _GitHub web interface: https://docs.github.com/en/github/managing-files-in-a-repository/managing-files-on-github/editing-files-in-your-repository
.. _GitHub's code editor: https://docs.github.com/en/github/managing-files-in-a-repository/managing-files-on-github/editing-files-in-your-repository
| 0.626124 | 0.600833 |
import threading
from concurrent.futures import ThreadPoolExecutor
from typing import Optional, Callable
from delayedqueue import DelayedQueue
from scheduled_thread_pool_executor.ScheduledTask import ScheduledTask
class ScheduledThreadPoolExecutor(ThreadPoolExecutor):
def __init__(self, max_workers=10, name=''):
super().__init__(max_workers=max_workers, thread_name_prefix=name)
self._max_workers = max_workers
self.queue = DelayedQueue()
self.shutdown = False
def schedule_at_fixed_rate(self, fn: Callable, initial_delay: int, period: int, *args, **kwargs) -> bool:
if self.shutdown:
raise RuntimeError(f"cannot schedule new task after shutdown!")
task = ScheduledTask(fn, initial_delay, period, *args, is_fixed_rate=True, executor_ctx=self, **kwargs)
return self._put(task, initial_delay)
def schedule_at_fixed_delay(self, fn: Callable, initial_delay: int, period: int, *args, **kwargs) -> bool:
if self.shutdown:
raise RuntimeError(f"cannot schedule new task after shutdown!")
task = ScheduledTask(fn, initial_delay, period, *args, is_fixed_delay=True, executor_ctx=self, **kwargs)
return self._put(task, initial_delay)
def schedule(self, fn, initial_delay, *args, **kwargs) -> bool:
task = ScheduledTask(fn, initial_delay, 0, *args, executor_ctx=self, **kwargs)
return self._put(task, initial_delay)
def _put(self, task: ScheduledTask, delay: int) -> bool:
# Don't use this explicitly. Use schedule/schedule_at_fixed_delay/schedule_at_fixed_rate. Additionally, to be
# called by ScheduledTask only!
if not isinstance(task, ScheduledTask):
raise TypeError(f"Task `{task!r}` must be of type ScheduledTask")
if delay < 0:
raise ValueError(f"Delay `{delay}` must be a non-negative number")
print(f" enqueuing {task} with delay of {delay}")
return self.queue.put(task, delay)
def __run(self):
while not self.shutdown:
try:
task: ScheduledTask = self.queue.get()
super().submit(task.run, *task.args, **task.kwargs)
except Exception as e:
print(e)
def stop(self, wait_for_completion: Optional[bool] = True):
self.shutdown = True
super().shutdown(wait_for_completion)
def run(self):
t = threading.Thread(target=self.__run)
t.setDaemon(True)
t.start()
|
scheduled-thread-pool-executor
|
/scheduled_thread_pool_executor-1.0.2.tar.gz/scheduled_thread_pool_executor-1.0.2/src/scheduled_thread_pool_executor/ScheduledThreadPoolExecutor.py
|
ScheduledThreadPoolExecutor.py
|
import threading
from concurrent.futures import ThreadPoolExecutor
from typing import Optional, Callable
from delayedqueue import DelayedQueue
from scheduled_thread_pool_executor.ScheduledTask import ScheduledTask
class ScheduledThreadPoolExecutor(ThreadPoolExecutor):
def __init__(self, max_workers=10, name=''):
super().__init__(max_workers=max_workers, thread_name_prefix=name)
self._max_workers = max_workers
self.queue = DelayedQueue()
self.shutdown = False
def schedule_at_fixed_rate(self, fn: Callable, initial_delay: int, period: int, *args, **kwargs) -> bool:
if self.shutdown:
raise RuntimeError(f"cannot schedule new task after shutdown!")
task = ScheduledTask(fn, initial_delay, period, *args, is_fixed_rate=True, executor_ctx=self, **kwargs)
return self._put(task, initial_delay)
def schedule_at_fixed_delay(self, fn: Callable, initial_delay: int, period: int, *args, **kwargs) -> bool:
if self.shutdown:
raise RuntimeError(f"cannot schedule new task after shutdown!")
task = ScheduledTask(fn, initial_delay, period, *args, is_fixed_delay=True, executor_ctx=self, **kwargs)
return self._put(task, initial_delay)
def schedule(self, fn, initial_delay, *args, **kwargs) -> bool:
task = ScheduledTask(fn, initial_delay, 0, *args, executor_ctx=self, **kwargs)
return self._put(task, initial_delay)
def _put(self, task: ScheduledTask, delay: int) -> bool:
# Don't use this explicitly. Use schedule/schedule_at_fixed_delay/schedule_at_fixed_rate. Additionally, to be
# called by ScheduledTask only!
if not isinstance(task, ScheduledTask):
raise TypeError(f"Task `{task!r}` must be of type ScheduledTask")
if delay < 0:
raise ValueError(f"Delay `{delay}` must be a non-negative number")
print(f" enqueuing {task} with delay of {delay}")
return self.queue.put(task, delay)
def __run(self):
while not self.shutdown:
try:
task: ScheduledTask = self.queue.get()
super().submit(task.run, *task.args, **task.kwargs)
except Exception as e:
print(e)
def stop(self, wait_for_completion: Optional[bool] = True):
self.shutdown = True
super().shutdown(wait_for_completion)
def run(self):
t = threading.Thread(target=self.__run)
t.setDaemon(True)
t.start()
| 0.773559 | 0.149345 |
ScheduleDuty
============
Import schedules from a CSV file. Currently supports weekly shift-based
schedules and standard rotation-based schedules.
Usage
-----
1. Create a CSV file with the following format depending upon your schedule
type:
Weekly Shifts::
escalation_level,user_or_team,type,day_of_week,start_time,end_time
**escalation\_level** (int): Level to place user/team on the
escalation policy
**user\_or\_team** (str): The name/email of the user/team
**type** (str): Must be one of user, team
**day\_of\_week** (str or int): Must be one of 0, 1, 2, 3, 4, 5, 6,
sunday, monday, tuesday, wednesday, thursday, friday, saturday,
weekday, weekdays, weekend, weekends, all
**start\_time** (str): Start time of the shift for that day
(i.e. 13:00)
**end\_time** (str): End time of the shift for that day (i.e. 21:00)
Standard Rotation::
user,layer,layer_name,rotation_type,shift_length,shift_type,handoff_day,handoff_time,restriction_start_day,restriction_start_time,restriction_end_date,restriction_end_time
**user** (str): The name/email of the user
**layer** (int): The schedule layer
**layer_name** (str): The name of the layer
**rotation_type** (str): The type of rotation. Can be one of daily, weekly,
custom.
**shift_length** (int): Length of the on-call shift in a ``custom`` rotation
**shift_type** (str): The unit of measure for the ``shift_length``. Can be
one of hours, days, weeks.
**handoff_day** (str or int): The day of the week to handoff the on-call
shift. Can be one of 0, 1, 2, 3, 4, 5, 6, monday, tuesday, wednesday,
thursday, friday, saturday, sunday
**handoff_time** (str):The time of day to handoff the shift (i.e. 08:00)
**restriction_start_day** (str): Day of the week to start the restriction.
Can be one of 0, 1, 2, 3, 4, 5, 6, monday, tuesday, wednesday, thursday,
friday, saturday, sunday
**restriction_start_time** (str): Time of day to start the restriction
(i.e. 08:00)
**restriction_end_date** (str): Day of the week to end the restriction. Can
be one of 0, 1, 2, 3, 4, 5, 6, monday, tuesday, wednesday, thursday, friday,
saturday, sunday
**restriction_end_time** (str): Time of day to end the restriction
(i.e. 17:00)
\2. Save all CSV files into one directory
3. If running from the command line, execute the ``import_schedules.py`` script with the command line arguments for
your schedule type:
Weekly Shifts::
./scheduleduty/scheduleduty.py --schedule-type weekly_shifts --csv-dir examples/weekly_shifts --api-key EXAMPLE_TOKEN --base-name "Weekly Shifts" --level-name Level --multiple-name Multi --start-date 2017-01-01 --end-date 2017-02-01 --time-zone UTC --num-loops 1 --escalation-delay 30
Standard Rotation::
./scheduleduty/scheduleduty.py --schedule-type standard_rotation --csv-dir examples/standard_rotation --api-key EXAMPLE_TOKEN --base-name "Standard Rotation" --start-date 2017-01-01 --end-date 2017-02-01 --time-zone UTC
\4. If importing into a script, use the ``execute`` function within the ``Import`` class to import your schedules:
Weekly Shifts::
from scheduleduty import scheduleduty
importer = scheduleduty.Import("weekly_shifts","./examples/weekly_shifts","EXAMPLE_TOKEN","Weekly Shifts","Level","Multi","2017-01-01","2017-02-01","UTC",1,30)
importer.execute()
Standard Rotation::
from scheduleduty import scheduleduty
importer = scheduleduty.Import("standard_rotation","./examples/standard_rotation","EXAMPLE_TOKEN","Standard Rotation",None,None,"2017-01-01","2017-02-01","UTC",None,None)
importer.execute()
Arguments
----------------------
``--schedule-type``: Type of schedule(s) being uploaded. Must be one of ``weekly_shifts``, ``standard_rotation``.
``--csv-dir``: Path to the directory housing all CSVs to import into PagerDuty. Required for all schedule types.
``--api-key``: PagerDuty v2 REST API token. Required for all schedule types.
``--base-name``: Name of the escalation policy or schedule being added as well as the base name for each schedule added to the escalation policy. Required for all schedule types.
``--level-name``: The base name for each new escalation policy level to be appended by the integer value of the level number. Required for ``weekly_shifts`` schedule type.
``--multiple-name``: The base name for each schedule on the same escalation policy level to be appended by the integer value of the schedule number. Required for ``weekly_shifts`` schedule type.
``--start-date``: ISO 8601 formatted start date for the schedule. Currently only support dates in YYYY-MM-DD format. Required for all schedule types.
``--end-date``: ISO 8601 formatted end date for the schedule. Currently only supports dates in YYYY-MM-DD format. Optional for all schedule types.
``--time-zone``: Time zone for this schedule. Must be one of the time zones from the IANA time zone database. Required for all schedule types.
``--num-loops``: The number of times to loop through the escalation policy. Required for ``weekly_shifts`` schedule type.
``--escalation-delay``: The number of minutes to wait before escalating the incident to the next level. Required for ``weekly_shifts`` schedule type.
Testing
-------
1. Create a file ``config.json`` that includes your command-line
arguments for testing:
::
{
"api_key": "EXAMPLE_KEY",
"base_name": "Weekly Shifts",
"level_name": "Level",
"multi_name": "Multi",
"start_date": "2017-01-01",
"end_date": null,
"time_zone": "UTC",
"num_loops": 1,
"escalation_delay": 30
}
2. Save ``config.json`` within the ``tests`` directory
3. Run the test suite in ``test_suite.py``:
::
python tests/test_suite.py
Author
------
Luke Epp [email protected]
.. _IANA time zone database: https://www.iana.org/time-zones
|
scheduleduty
|
/scheduleduty-0.3.3.tar.gz/scheduleduty-0.3.3/README.rst
|
README.rst
|
ScheduleDuty
============
Import schedules from a CSV file. Currently supports weekly shift-based
schedules and standard rotation-based schedules.
Usage
-----
1. Create a CSV file with the following format depending upon your schedule
type:
Weekly Shifts::
escalation_level,user_or_team,type,day_of_week,start_time,end_time
**escalation\_level** (int): Level to place user/team on the
escalation policy
**user\_or\_team** (str): The name/email of the user/team
**type** (str): Must be one of user, team
**day\_of\_week** (str or int): Must be one of 0, 1, 2, 3, 4, 5, 6,
sunday, monday, tuesday, wednesday, thursday, friday, saturday,
weekday, weekdays, weekend, weekends, all
**start\_time** (str): Start time of the shift for that day
(i.e. 13:00)
**end\_time** (str): End time of the shift for that day (i.e. 21:00)
Standard Rotation::
user,layer,layer_name,rotation_type,shift_length,shift_type,handoff_day,handoff_time,restriction_start_day,restriction_start_time,restriction_end_date,restriction_end_time
**user** (str): The name/email of the user
**layer** (int): The schedule layer
**layer_name** (str): The name of the layer
**rotation_type** (str): The type of rotation. Can be one of daily, weekly,
custom.
**shift_length** (int): Length of the on-call shift in a ``custom`` rotation
**shift_type** (str): The unit of measure for the ``shift_length``. Can be
one of hours, days, weeks.
**handoff_day** (str or int): The day of the week to handoff the on-call
shift. Can be one of 0, 1, 2, 3, 4, 5, 6, monday, tuesday, wednesday,
thursday, friday, saturday, sunday
**handoff_time** (str):The time of day to handoff the shift (i.e. 08:00)
**restriction_start_day** (str): Day of the week to start the restriction.
Can be one of 0, 1, 2, 3, 4, 5, 6, monday, tuesday, wednesday, thursday,
friday, saturday, sunday
**restriction_start_time** (str): Time of day to start the restriction
(i.e. 08:00)
**restriction_end_date** (str): Day of the week to end the restriction. Can
be one of 0, 1, 2, 3, 4, 5, 6, monday, tuesday, wednesday, thursday, friday,
saturday, sunday
**restriction_end_time** (str): Time of day to end the restriction
(i.e. 17:00)
\2. Save all CSV files into one directory
3. If running from the command line, execute the ``import_schedules.py`` script with the command line arguments for
your schedule type:
Weekly Shifts::
./scheduleduty/scheduleduty.py --schedule-type weekly_shifts --csv-dir examples/weekly_shifts --api-key EXAMPLE_TOKEN --base-name "Weekly Shifts" --level-name Level --multiple-name Multi --start-date 2017-01-01 --end-date 2017-02-01 --time-zone UTC --num-loops 1 --escalation-delay 30
Standard Rotation::
./scheduleduty/scheduleduty.py --schedule-type standard_rotation --csv-dir examples/standard_rotation --api-key EXAMPLE_TOKEN --base-name "Standard Rotation" --start-date 2017-01-01 --end-date 2017-02-01 --time-zone UTC
\4. If importing into a script, use the ``execute`` function within the ``Import`` class to import your schedules:
Weekly Shifts::
from scheduleduty import scheduleduty
importer = scheduleduty.Import("weekly_shifts","./examples/weekly_shifts","EXAMPLE_TOKEN","Weekly Shifts","Level","Multi","2017-01-01","2017-02-01","UTC",1,30)
importer.execute()
Standard Rotation::
from scheduleduty import scheduleduty
importer = scheduleduty.Import("standard_rotation","./examples/standard_rotation","EXAMPLE_TOKEN","Standard Rotation",None,None,"2017-01-01","2017-02-01","UTC",None,None)
importer.execute()
Arguments
----------------------
``--schedule-type``: Type of schedule(s) being uploaded. Must be one of ``weekly_shifts``, ``standard_rotation``.
``--csv-dir``: Path to the directory housing all CSVs to import into PagerDuty. Required for all schedule types.
``--api-key``: PagerDuty v2 REST API token. Required for all schedule types.
``--base-name``: Name of the escalation policy or schedule being added as well as the base name for each schedule added to the escalation policy. Required for all schedule types.
``--level-name``: The base name for each new escalation policy level to be appended by the integer value of the level number. Required for ``weekly_shifts`` schedule type.
``--multiple-name``: The base name for each schedule on the same escalation policy level to be appended by the integer value of the schedule number. Required for ``weekly_shifts`` schedule type.
``--start-date``: ISO 8601 formatted start date for the schedule. Currently only support dates in YYYY-MM-DD format. Required for all schedule types.
``--end-date``: ISO 8601 formatted end date for the schedule. Currently only supports dates in YYYY-MM-DD format. Optional for all schedule types.
``--time-zone``: Time zone for this schedule. Must be one of the time zones from the IANA time zone database. Required for all schedule types.
``--num-loops``: The number of times to loop through the escalation policy. Required for ``weekly_shifts`` schedule type.
``--escalation-delay``: The number of minutes to wait before escalating the incident to the next level. Required for ``weekly_shifts`` schedule type.
Testing
-------
1. Create a file ``config.json`` that includes your command-line
arguments for testing:
::
{
"api_key": "EXAMPLE_KEY",
"base_name": "Weekly Shifts",
"level_name": "Level",
"multi_name": "Multi",
"start_date": "2017-01-01",
"end_date": null,
"time_zone": "UTC",
"num_loops": 1,
"escalation_delay": 30
}
2. Save ``config.json`` within the ``tests`` directory
3. Run the test suite in ``test_suite.py``:
::
python tests/test_suite.py
Author
------
Luke Epp [email protected]
.. _IANA time zone database: https://www.iana.org/time-zones
| 0.885043 | 0.599749 |
# scheduleplus
A scheduler built on cron syntax with workday/holiday check feature
| Atributes | Allowed values | Allowed special characters | Required |
| ---------- | -------------- | -------------------------- | -------- |
| Minutes | 0-59 | \* , - / | Yes |
| Hours | 0-23 | \* , - / | Yes |
| Days | 1-31 | \* , - / L | Yes |
| Months | 1-12, JAN-DEC | \* , - / | Yes |
| Weekdays | 0-6, MON-SUN | \* , - / F L | Yes |
| Holiday(\* | 0, 1 | \* | No |
### Holiday\*
0 = checking if workday, 1 = checking if holiday, \* = any day
## Installation
```
pip install scheduleplus
```
## Usage
### Running a function on schedule
This runs the function every 5 minutes.
```python
from scheduleplus.schedule import Scheduler
def work(data):
print(f"Working {data}...")
schedule = Scheduler()
schedule.cron("*/5 * * * *").do_function(work, "test")
while True:
schedule.run_function_jobs()
time.sleep(1)
```
### Running a callback
This runs the callback with a dictonary every 5 minutes.
```python
from scheduleplus.schedule import Scheduler
def work(data):
print(f"Working {data}...")
schedule = Scheduler()
schedule.cron("*/5 * * * *").do_callback({"message": "cool"})
while True:
schedule.run_callback_jobs(work)
time.sleep(1)
```
### List jobs
```python
schedule = Scheduler()
schedule.cron("*/5 * * * *").do_function(work, "test")
schedule.cron("1 1 1 1 1").do_callback({"message": "cool"})
schedule.list_jobs()
```
Result
```
Job id Cron Next run time Time left Function Callback message
-------- ------------- --------------------- --------------------- --------------- ---------------------
1 */5 * * * * 2022-11-04 13:50:00 0:00:29 work('test')
2 1 1 1 1 1 2030-01-01 01:01:00 2614 days, 11:11:29 {'message': 'cool'}
```
|
scheduleplus
|
/scheduleplus-0.1.1.tar.gz/scheduleplus-0.1.1/README.md
|
README.md
|
pip install scheduleplus
from scheduleplus.schedule import Scheduler
def work(data):
print(f"Working {data}...")
schedule = Scheduler()
schedule.cron("*/5 * * * *").do_function(work, "test")
while True:
schedule.run_function_jobs()
time.sleep(1)
from scheduleplus.schedule import Scheduler
def work(data):
print(f"Working {data}...")
schedule = Scheduler()
schedule.cron("*/5 * * * *").do_callback({"message": "cool"})
while True:
schedule.run_callback_jobs(work)
time.sleep(1)
schedule = Scheduler()
schedule.cron("*/5 * * * *").do_function(work, "test")
schedule.cron("1 1 1 1 1").do_callback({"message": "cool"})
schedule.list_jobs()
Job id Cron Next run time Time left Function Callback message
-------- ------------- --------------------- --------------------- --------------- ---------------------
1 */5 * * * * 2022-11-04 13:50:00 0:00:29 work('test')
2 1 1 1 1 1 2030-01-01 01:01:00 2614 days, 11:11:29 {'message': 'cool'}
| 0.487307 | 0.770098 |
scheduler
=========
A simple job scheduler with cron expression
Inspired by "`schedule <https://github.com/dbader/schedule>`_".
Usage
-----
.. code-block:: bash
$ pip3 install scheduler-cron
.. code-block:: python
import scheduler
def test_1():
print('test1')
def test_2(name):
print('test2: ' + name)
def test_3(name, lname):
print('test3: ' + name + ' ' + lname)
scheduler = Scheduler(60)
scheduler.add('foo', '* * * * *', test_1)
scheduler.add('bar', '0/2 * * * *', test_2, ('mehrdad',))
scheduler.add('bas', '0/3 * * * *', test_3, ('behzad', 'mahmoudi'))
scheduler.add('zoo', '0/4 * * * *', test_3, ('reza', 'mahmoudi'))
scheduler.start()
ToDo
-----
- Run jobs in async mode
- sleep for more than interval seconds if there is no jobs for next run
Meta
----
Mehrdad Mahmoudi - `@mehrdadmhd <https://twitter.com/mehrdadmhd>`_ - [email protected]
https://github.com/mehrdadmhd/scheduler-py
|
scheduler-cron
|
/scheduler-cron-0.1.tar.gz/scheduler-cron-0.1/README.rst
|
README.rst
|
scheduler
=========
A simple job scheduler with cron expression
Inspired by "`schedule <https://github.com/dbader/schedule>`_".
Usage
-----
.. code-block:: bash
$ pip3 install scheduler-cron
.. code-block:: python
import scheduler
def test_1():
print('test1')
def test_2(name):
print('test2: ' + name)
def test_3(name, lname):
print('test3: ' + name + ' ' + lname)
scheduler = Scheduler(60)
scheduler.add('foo', '* * * * *', test_1)
scheduler.add('bar', '0/2 * * * *', test_2, ('mehrdad',))
scheduler.add('bas', '0/3 * * * *', test_3, ('behzad', 'mahmoudi'))
scheduler.add('zoo', '0/4 * * * *', test_3, ('reza', 'mahmoudi'))
scheduler.start()
ToDo
-----
- Run jobs in async mode
- sleep for more than interval seconds if there is no jobs for next run
Meta
----
Mehrdad Mahmoudi - `@mehrdadmhd <https://twitter.com/mehrdadmhd>`_ - [email protected]
https://github.com/mehrdadmhd/scheduler-py
| 0.648911 | 0.423816 |
from threading import Thread
import croniter
import datetime
import time
class Scheduler(object):
def __init__(self, interval):
self.jobs = {}
self.schedule = []
self.interval = interval
self.last_check = None
self.started = False
self.shutdown = False
self._refresh_times()
def start(self):
if self.started:
return
self.started = True
thread = Thread(target=self._do)
thread.start()
def _do(self):
while not self.shutdown:
now = datetime.datetime.now()
for alias in self.schedule:
self.jobs[alias].run(now)
self._refresh_times(True)
time.sleep(self.interval)
def add(self, alias, expression, func, args=None):
self.jobs[alias] = Job(alias, expression, func, args)
# self._refresh_times()
def _refresh_times(self, force=False):
if self.last_check is None or force:
self.last_check = datetime.datetime.now()
check_for = self.last_check + datetime.timedelta(seconds=self.interval)
self.schedule = []
for alias, job in self.jobs.items():
if job.can_run(check_for):
self.schedule.append(alias)
class Job(object):
def __init__(self, alias, cron_expression, func, args):
self.is_periodic = False
self.cron_expression = cron_expression
self.last_run = None
self.next_run = None
self.job_func = func
self.job_func_args = args
self.alias = alias
self.cron_expression_obj = CronExpression(cron_expression)
self.next_run = self.cron_expression_obj.get_next()
def can_run(self, time_to_run):
return self.next_run != self.last_run and self.next_run <= time_to_run
def run(self, now):
if self.next_run == self.last_run:
return
self.next_run = self.cron_expression_obj.get_next(True)
self.last_run = now
if self.job_func_args is None:
self.job_func()
else:
self.job_func(*self.job_func_args)
class CronExpression(object):
def __init__(self, cron_expression):
self.cron_expression = cron_expression
self.last_run = None
self.next_run = None
self._process_cron_expression()
def _process_cron_expression(self):
if self.last_run is None:
self.last_run = datetime.datetime.now()
self.cron = croniter.croniter(self.cron_expression, self.last_run)
self.get_next()
def get_next(self, force=False):
if self.next_run is None or force:
self.next_run = self.cron.get_next(datetime.datetime)
return self.next_run
|
scheduler-cron
|
/scheduler-cron-0.1.tar.gz/scheduler-cron-0.1/scheduler/__init__.py
|
__init__.py
|
from threading import Thread
import croniter
import datetime
import time
class Scheduler(object):
def __init__(self, interval):
self.jobs = {}
self.schedule = []
self.interval = interval
self.last_check = None
self.started = False
self.shutdown = False
self._refresh_times()
def start(self):
if self.started:
return
self.started = True
thread = Thread(target=self._do)
thread.start()
def _do(self):
while not self.shutdown:
now = datetime.datetime.now()
for alias in self.schedule:
self.jobs[alias].run(now)
self._refresh_times(True)
time.sleep(self.interval)
def add(self, alias, expression, func, args=None):
self.jobs[alias] = Job(alias, expression, func, args)
# self._refresh_times()
def _refresh_times(self, force=False):
if self.last_check is None or force:
self.last_check = datetime.datetime.now()
check_for = self.last_check + datetime.timedelta(seconds=self.interval)
self.schedule = []
for alias, job in self.jobs.items():
if job.can_run(check_for):
self.schedule.append(alias)
class Job(object):
def __init__(self, alias, cron_expression, func, args):
self.is_periodic = False
self.cron_expression = cron_expression
self.last_run = None
self.next_run = None
self.job_func = func
self.job_func_args = args
self.alias = alias
self.cron_expression_obj = CronExpression(cron_expression)
self.next_run = self.cron_expression_obj.get_next()
def can_run(self, time_to_run):
return self.next_run != self.last_run and self.next_run <= time_to_run
def run(self, now):
if self.next_run == self.last_run:
return
self.next_run = self.cron_expression_obj.get_next(True)
self.last_run = now
if self.job_func_args is None:
self.job_func()
else:
self.job_func(*self.job_func_args)
class CronExpression(object):
def __init__(self, cron_expression):
self.cron_expression = cron_expression
self.last_run = None
self.next_run = None
self._process_cron_expression()
def _process_cron_expression(self):
if self.last_run is None:
self.last_run = datetime.datetime.now()
self.cron = croniter.croniter(self.cron_expression, self.last_run)
self.get_next()
def get_next(self, force=False):
if self.next_run is None or force:
self.next_run = self.cron.get_next(datetime.datetime)
return self.next_run
| 0.430746 | 0.10226 |
import webbrowser
import os
from flask import Flask, render_template, request
from scheduler_front.script_template import file as source_file
from distutils.dir_util import copy_tree
ADMIN_EMAIL = '[email protected]'
app = Flask(__name__, static_url_path='')
@app.route("/", methods=['POST', 'GET'])
def index():
if request.method == 'GET':
return render_template('index.html')
else:
create_folder(request)
write_scheduler_configuration_file(request)
write_query(request)
write_script_configuration_file(request)
copy_script_template(request)
return render_template('success.html')
def create_folder(form):
output_dir = parse_output_dir(form)
folder_name = parse_folder_name(form)
os.mkdir(f"{output_dir}\{folder_name}")
os.mkdir(f"{output_dir}\{folder_name}\\bin")
os.mkdir(f"{output_dir}\{folder_name}\\conf")
os.mkdir(f"{output_dir}\{folder_name}\\queries")
os.mkdir(f"{output_dir}\{folder_name}\\outputs")
with open(f"{output_dir}\{folder_name}\\conf\\.gitignore", 'w') as f:
f.write("*.conf5\n")
with open(f"{output_dir}\{folder_name}\\outputs\\.gitignore", 'w') as f:
f.write("*.csv\n")
f.write("*.xlsx\n")
with open(f"{output_dir}\{folder_name}\\outputs\\cleaning_bot_instructions.txt", 'w') as f:
f.write("delete_after_nb_days: 60\n")
def copy_script_template(form):
output_dir = parse_output_dir(form)
folder_name = parse_folder_name(form)
filename = f"{output_dir}\{folder_name}\\bin\\script.py" if output_dir else 'script.py'
with open(filename, 'w') as destination_file:
destination_file.write(source_file)
def parse_destination_emails(form):
return form.form['destination_email'].split(';')
def parse_folder_name(form):
return form.form['job_name']
def parse_kind_of_query(form):
return form.form['kind_of_query']
def parse_email_subject(form):
return form.form['subject_email']
def parse_email_body(form):
return form.form['body_email']
def parse_output_file_format(form):
return form.form['output_type']
def parse_error_emails(form):
return [email.strip() for email in form.form['error_email'].split(';')]
def parse_output_dir(form):
return form.form['output_dir']
def write_script_configuration_file(form):
output_dir = parse_output_dir(form)
folder_name = parse_folder_name(form)
kind_of_query = parse_kind_of_query(form)
destination_emails = parse_destination_emails(form)
email_subject = parse_email_subject(form)
email_body = parse_email_body(form)
output_file_format = parse_output_file_format(form)
email_sender = ADMIN_EMAIL
filename = f"{output_dir}\{folder_name}\\bin\\script_config.py" if output_dir else 'script_config.py'
parsed_emails = ", ".join([f"'{email.strip()}'" for email in destination_emails])
with open(filename, 'w') as f:
f.write(f"kind_of_query = '{kind_of_query}'\n")
f.write(f"output_type = '{output_file_format}'\n")
f.write(f"destination_emails = [{parsed_emails}]\n")
f.write(f"email_subject = '{email_subject}'\n")
f.write(f"email_body = '''{email_body}'''\n")
f.write(f"email_sender = '{email_sender}'")
def write_query(form):
output_dir = parse_output_dir(form)
folder_name = parse_folder_name(form)
filename = f"{output_dir}\{folder_name}\\queries\\query.sql" if output_dir else 'query.sql'
with open(filename, 'w') as f:
f.write(form.form['query'])
def parse_days(form):
days = []
for i in range(1, 32):
try:
if form.form[str(i)]:
days.append(str(i))
except KeyError:
pass
return ",".join(days)
def parse_weekdays(form):
weekdays = ""
days_of_the_week = [
'sunday',
'monday',
'tuesday',
'wednesday',
'thursday',
'friday',
'saturday'
]
weekdays = []
for i, day in enumerate(days_of_the_week):
try:
if form.form[day]:
weekdays.append(str(i))
except KeyError:
pass
return ",".join(weekdays)
def parse_months(form):
months = []
months_of_the_year = [
'jan',
'feb',
'mar',
'apr',
'may',
'jun',
'jul',
'aug',
'sep',
'oct',
'nov',
'dec'
]
for i, month in enumerate(months_of_the_year):
try:
if form.form[month]:
months.append(str(i + 1))
except KeyError:
pass
return ",".join(months)
def write_scheduler_configuration_file(form):
error_emails = parse_error_emails(form)
days = parse_days(form)
weekdays = parse_weekdays(form)
months = parse_months(form)
output_dir = parse_output_dir(form)
folder_name = parse_folder_name(form)
filename = f"{output_dir}\{folder_name}\\conf\\config.txt" if output_dir else 'config.txt'
with open(filename, 'w') as f:
if not error_emails:
raise ValueError
else:
f.write(f"error_email: {', '.join(error_emails)}\n")
if days:
f.write(f"days: {days}\n")
if weekdays:
f.write(f"weekdays: {weekdays}\n")
if months:
f.write(f"months: {months}\n")
def start():
webbrowser.open('http://127.0.0.1:5000')
app.run()
if __name__ == '__main__':
start()
|
scheduler-front
|
/scheduler_front-0.0.11-py3-none-any.whl/scheduler_front/app.py
|
app.py
|
import webbrowser
import os
from flask import Flask, render_template, request
from scheduler_front.script_template import file as source_file
from distutils.dir_util import copy_tree
ADMIN_EMAIL = '[email protected]'
app = Flask(__name__, static_url_path='')
@app.route("/", methods=['POST', 'GET'])
def index():
if request.method == 'GET':
return render_template('index.html')
else:
create_folder(request)
write_scheduler_configuration_file(request)
write_query(request)
write_script_configuration_file(request)
copy_script_template(request)
return render_template('success.html')
def create_folder(form):
output_dir = parse_output_dir(form)
folder_name = parse_folder_name(form)
os.mkdir(f"{output_dir}\{folder_name}")
os.mkdir(f"{output_dir}\{folder_name}\\bin")
os.mkdir(f"{output_dir}\{folder_name}\\conf")
os.mkdir(f"{output_dir}\{folder_name}\\queries")
os.mkdir(f"{output_dir}\{folder_name}\\outputs")
with open(f"{output_dir}\{folder_name}\\conf\\.gitignore", 'w') as f:
f.write("*.conf5\n")
with open(f"{output_dir}\{folder_name}\\outputs\\.gitignore", 'w') as f:
f.write("*.csv\n")
f.write("*.xlsx\n")
with open(f"{output_dir}\{folder_name}\\outputs\\cleaning_bot_instructions.txt", 'w') as f:
f.write("delete_after_nb_days: 60\n")
def copy_script_template(form):
output_dir = parse_output_dir(form)
folder_name = parse_folder_name(form)
filename = f"{output_dir}\{folder_name}\\bin\\script.py" if output_dir else 'script.py'
with open(filename, 'w') as destination_file:
destination_file.write(source_file)
def parse_destination_emails(form):
return form.form['destination_email'].split(';')
def parse_folder_name(form):
return form.form['job_name']
def parse_kind_of_query(form):
return form.form['kind_of_query']
def parse_email_subject(form):
return form.form['subject_email']
def parse_email_body(form):
return form.form['body_email']
def parse_output_file_format(form):
return form.form['output_type']
def parse_error_emails(form):
return [email.strip() for email in form.form['error_email'].split(';')]
def parse_output_dir(form):
return form.form['output_dir']
def write_script_configuration_file(form):
output_dir = parse_output_dir(form)
folder_name = parse_folder_name(form)
kind_of_query = parse_kind_of_query(form)
destination_emails = parse_destination_emails(form)
email_subject = parse_email_subject(form)
email_body = parse_email_body(form)
output_file_format = parse_output_file_format(form)
email_sender = ADMIN_EMAIL
filename = f"{output_dir}\{folder_name}\\bin\\script_config.py" if output_dir else 'script_config.py'
parsed_emails = ", ".join([f"'{email.strip()}'" for email in destination_emails])
with open(filename, 'w') as f:
f.write(f"kind_of_query = '{kind_of_query}'\n")
f.write(f"output_type = '{output_file_format}'\n")
f.write(f"destination_emails = [{parsed_emails}]\n")
f.write(f"email_subject = '{email_subject}'\n")
f.write(f"email_body = '''{email_body}'''\n")
f.write(f"email_sender = '{email_sender}'")
def write_query(form):
output_dir = parse_output_dir(form)
folder_name = parse_folder_name(form)
filename = f"{output_dir}\{folder_name}\\queries\\query.sql" if output_dir else 'query.sql'
with open(filename, 'w') as f:
f.write(form.form['query'])
def parse_days(form):
days = []
for i in range(1, 32):
try:
if form.form[str(i)]:
days.append(str(i))
except KeyError:
pass
return ",".join(days)
def parse_weekdays(form):
weekdays = ""
days_of_the_week = [
'sunday',
'monday',
'tuesday',
'wednesday',
'thursday',
'friday',
'saturday'
]
weekdays = []
for i, day in enumerate(days_of_the_week):
try:
if form.form[day]:
weekdays.append(str(i))
except KeyError:
pass
return ",".join(weekdays)
def parse_months(form):
months = []
months_of_the_year = [
'jan',
'feb',
'mar',
'apr',
'may',
'jun',
'jul',
'aug',
'sep',
'oct',
'nov',
'dec'
]
for i, month in enumerate(months_of_the_year):
try:
if form.form[month]:
months.append(str(i + 1))
except KeyError:
pass
return ",".join(months)
def write_scheduler_configuration_file(form):
error_emails = parse_error_emails(form)
days = parse_days(form)
weekdays = parse_weekdays(form)
months = parse_months(form)
output_dir = parse_output_dir(form)
folder_name = parse_folder_name(form)
filename = f"{output_dir}\{folder_name}\\conf\\config.txt" if output_dir else 'config.txt'
with open(filename, 'w') as f:
if not error_emails:
raise ValueError
else:
f.write(f"error_email: {', '.join(error_emails)}\n")
if days:
f.write(f"days: {days}\n")
if weekdays:
f.write(f"weekdays: {weekdays}\n")
if months:
f.write(f"months: {months}\n")
def start():
webbrowser.open('http://127.0.0.1:5000')
app.run()
if __name__ == '__main__':
start()
| 0.174199 | 0.092196 |
!function(t,e){"object"==typeof exports&&"undefined"!=typeof module?e(exports,require("jquery"),require("popper.js")):"function"==typeof define&&define.amd?define(["exports","jquery","popper.js"],e):e(t.bootstrap={},t.jQuery,t.Popper)}(this,function(t,e,h){"use strict";function i(t,e){for(var n=0;n<e.length;n++){var i=e[n];i.enumerable=i.enumerable||!1,i.configurable=!0,"value"in i&&(i.writable=!0),Object.defineProperty(t,i.key,i)}}function s(t,e,n){return e&&i(t.prototype,e),n&&i(t,n),t}function l(r){for(var t=1;t<arguments.length;t++){var o=null!=arguments[t]?arguments[t]:{},e=Object.keys(o);"function"==typeof Object.getOwnPropertySymbols&&(e=e.concat(Object.getOwnPropertySymbols(o).filter(function(t){return Object.getOwnPropertyDescriptor(o,t).enumerable}))),e.forEach(function(t){var e,n,i;e=r,i=o[n=t],n in e?Object.defineProperty(e,n,{value:i,enumerable:!0,configurable:!0,writable:!0}):e[n]=i})}return r}e=e&&e.hasOwnProperty("default")?e.default:e,h=h&&h.hasOwnProperty("default")?h.default:h;var r,n,o,a,c,u,f,d,g,_,m,p,v,y,E,C,T,b,S,I,A,D,w,N,O,k,P,j,H,L,R,x,W,U,q,F,K,M,Q,B,V,Y,z,J,Z,G,$,X,tt,et,nt,it,rt,ot,st,at,lt,ct,ht,ut,ft,dt,gt,_t,mt,pt,vt,yt,Et,Ct,Tt,bt,St,It,At,Dt,wt,Nt,Ot,kt,Pt,jt,Ht,Lt,Rt,xt,Wt,Ut,qt,Ft,Kt,Mt,Qt,Bt,Vt,Yt,zt,Jt,Zt,Gt,$t,Xt,te,ee,ne,ie,re,oe,se,ae,le,ce,he,ue,fe,de,ge,_e,me,pe,ve,ye,Ee,Ce,Te,be,Se,Ie,Ae,De,we,Ne,Oe,ke,Pe,je,He,Le,Re,xe,We,Ue,qe,Fe,Ke,Me,Qe,Be,Ve,Ye,ze,Je,Ze,Ge,$e,Xe,tn,en,nn,rn,on,sn,an,ln,cn,hn,un,fn,dn,gn,_n,mn,pn,vn,yn,En,Cn,Tn,bn,Sn,In,An,Dn,wn,Nn,On,kn,Pn,jn,Hn,Ln,Rn,xn,Wn,Un,qn,Fn=function(i){var e="transitionend";function t(t){var e=this,n=!1;return i(this).one(l.TRANSITION_END,function(){n=!0}),setTimeout(function(){n||l.triggerTransitionEnd(e)},t),this}var l={TRANSITION_END:"bsTransitionEnd",getUID:function(t){for(;t+=~~(1e6*Math.random()),document.getElementById(t););return t},getSelectorFromElement:function(t){var e=t.getAttribute("data-target");e&&"#"!==e||(e=t.getAttribute("href")||"");try{return document.querySelector(e)?e:null}catch(t){return null}},getTransitionDurationFromElement:function(t){if(!t)return 0;var e=i(t).css("transition-duration");return parseFloat(e)?(e=e.split(",")[0],1e3*parseFloat(e)):0},reflow:function(t){return t.offsetHeight},triggerTransitionEnd:function(t){i(t).trigger(e)},supportsTransitionEnd:function(){return Boolean(e)},isElement:function(t){return(t[0]||t).nodeType},typeCheckConfig:function(t,e,n){for(var i in n)if(Object.prototype.hasOwnProperty.call(n,i)){var r=n[i],o=e[i],s=o&&l.isElement(o)?"element":(a=o,{}.toString.call(a).match(/\s([a-z]+)/i)[1].toLowerCase());if(!new RegExp(r).test(s))throw new Error(t.toUpperCase()+': Option "'+i+'" provided type "'+s+'" but expected type "'+r+'".')}var a}};return i.fn.emulateTransitionEnd=t,i.event.special[l.TRANSITION_END]={bindType:e,delegateType:e,handle:function(t){if(i(t.target).is(this))return t.handleObj.handler.apply(this,arguments)}},l}(e),Kn=(n="alert",a="."+(o="bs.alert"),c=(r=e).fn[n],u={CLOSE:"close"+a,CLOSED:"closed"+a,CLICK_DATA_API:"click"+a+".data-api"},f="alert",d="fade",g="show",_=function(){function i(t){this._element=t}var t=i.prototype;return t.close=function(t){var e=this._element;t&&(e=this._getRootElement(t)),this._triggerCloseEvent(e).isDefaultPrevented()||this._removeElement(e)},t.dispose=function(){r.removeData(this._element,o),this._element=null},t._getRootElement=function(t){var e=Fn.getSelectorFromElement(t),n=!1;return e&&(n=document.querySelector(e)),n||(n=r(t).closest("."+f)[0]),n},t._triggerCloseEvent=function(t){var e=r.Event(u.CLOSE);return r(t).trigger(e),e},t._removeElement=function(e){var n=this;if(r(e).removeClass(g),r(e).hasClass(d)){var t=Fn.getTransitionDurationFromElement(e);r(e).one(Fn.TRANSITION_END,function(t){return n._destroyElement(e,t)}).emulateTransitionEnd(t)}else this._destroyElement(e)},t._destroyElement=function(t){r(t).detach().trigger(u.CLOSED).remove()},i._jQueryInterface=function(n){return this.each(function(){var t=r(this),e=t.data(o);e||(e=new i(this),t.data(o,e)),"close"===n&&e[n](this)})},i._handleDismiss=function(e){return function(t){t&&t.preventDefault(),e.close(this)}},s(i,null,[{key:"VERSION",get:function(){return"4.1.3"}}]),i}(),r(document).on(u.CLICK_DATA_API,'[data-dismiss="alert"]',_._handleDismiss(new _)),r.fn[n]=_._jQueryInterface,r.fn[n].Constructor=_,r.fn[n].noConflict=function(){return r.fn[n]=c,_._jQueryInterface},_),Mn=(p="button",y="."+(v="bs.button"),E=".data-api",C=(m=e).fn[p],T="active",b="btn",I='[data-toggle^="button"]',A='[data-toggle="buttons"]',D="input",w=".active",N=".btn",O={CLICK_DATA_API:"click"+y+E,FOCUS_BLUR_DATA_API:(S="focus")+y+E+" blur"+y+E},k=function(){function n(t){this._element=t}var t=n.prototype;return t.toggle=function(){var t=!0,e=!0,n=m(this._element).closest(A)[0];if(n){var i=this._element.querySelector(D);if(i){if("radio"===i.type)if(i.checked&&this._element.classList.contains(T))t=!1;else{var r=n.querySelector(w);r&&m(r).removeClass(T)}if(t){if(i.hasAttribute("disabled")||n.hasAttribute("disabled")||i.classList.contains("disabled")||n.classList.contains("disabled"))return;i.checked=!this._element.classList.contains(T),m(i).trigger("change")}i.focus(),e=!1}}e&&this._element.setAttribute("aria-pressed",!this._element.classList.contains(T)),t&&m(this._element).toggleClass(T)},t.dispose=function(){m.removeData(this._element,v),this._element=null},n._jQueryInterface=function(e){return this.each(function(){var t=m(this).data(v);t||(t=new n(this),m(this).data(v,t)),"toggle"===e&&t[e]()})},s(n,null,[{key:"VERSION",get:function(){return"4.1.3"}}]),n}(),m(document).on(O.CLICK_DATA_API,I,function(t){t.preventDefault();var e=t.target;m(e).hasClass(b)||(e=m(e).closest(N)),k._jQueryInterface.call(m(e),"toggle")}).on(O.FOCUS_BLUR_DATA_API,I,function(t){var e=m(t.target).closest(N)[0];m(e).toggleClass(S,/^focus(in)?$/.test(t.type))}),m.fn[p]=k._jQueryInterface,m.fn[p].Constructor=k,m.fn[p].noConflict=function(){return m.fn[p]=C,k._jQueryInterface},k),Qn=(j="carousel",L="."+(H="bs.carousel"),R=".data-api",x=(P=e).fn[j],W={interval:5e3,keyboard:!0,slide:!1,pause:"hover",wrap:!0},U={interval:"(number|boolean)",keyboard:"boolean",slide:"(boolean|string)",pause:"(string|boolean)",wrap:"boolean"},q="next",F="prev",K="left",M="right",Q={SLIDE:"slide"+L,SLID:"slid"+L,KEYDOWN:"keydown"+L,MOUSEENTER:"mouseenter"+L,MOUSELEAVE:"mouseleave"+L,TOUCHEND:"touchend"+L,LOAD_DATA_API:"load"+L+R,CLICK_DATA_API:"click"+L+R},B="carousel",V="active",Y="slide",z="carousel-item-right",J="carousel-item-left",Z="carousel-item-next",G="carousel-item-prev",$=".active",X=".active.carousel-item",tt=".carousel-item",et=".carousel-item-next, .carousel-item-prev",nt=".carousel-indicators",it="[data-slide], [data-slide-to]",rt='[data-ride="carousel"]',ot=function(){function o(t,e){this._items=null,this._interval=null,this._activeElement=null,this._isPaused=!1,this._isSliding=!1,this.touchTimeout=null,this._config=this._getConfig(e),this._element=P(t)[0],this._indicatorsElement=this._element.querySelector(nt),this._addEventListeners()}var t=o.prototype;return t.next=function(){this._isSliding||this._slide(q)},t.nextWhenVisible=function(){!document.hidden&&P(this._element).is(":visible")&&"hidden"!==P(this._element).css("visibility")&&this.next()},t.prev=function(){this._isSliding||this._slide(F)},t.pause=function(t){t||(this._isPaused=!0),this._element.querySelector(et)&&(Fn.triggerTransitionEnd(this._element),this.cycle(!0)),clearInterval(this._interval),this._interval=null},t.cycle=function(t){t||(this._isPaused=!1),this._interval&&(clearInterval(this._interval),this._interval=null),this._config.interval&&!this._isPaused&&(this._interval=setInterval((document.visibilityState?this.nextWhenVisible:this.next).bind(this),this._config.interval))},t.to=function(t){var e=this;this._activeElement=this._element.querySelector(X);var n=this._getItemIndex(this._activeElement);if(!(t>this._items.length-1||t<0))if(this._isSliding)P(this._element).one(Q.SLID,function(){return e.to(t)});else{if(n===t)return this.pause(),void this.cycle();var i=n<t?q:F;this._slide(i,this._items[t])}},t.dispose=function(){P(this._element).off(L),P.removeData(this._element,H),this._items=null,this._config=null,this._element=null,this._interval=null,this._isPaused=null,this._isSliding=null,this._activeElement=null,this._indicatorsElement=null},t._getConfig=function(t){return t=l({},W,t),Fn.typeCheckConfig(j,t,U),t},t._addEventListeners=function(){var e=this;this._config.keyboard&&P(this._element).on(Q.KEYDOWN,function(t){return e._keydown(t)}),"hover"===this._config.pause&&(P(this._element).on(Q.MOUSEENTER,function(t){return e.pause(t)}).on(Q.MOUSELEAVE,function(t){return e.cycle(t)}),"ontouchstart"in document.documentElement&&P(this._element).on(Q.TOUCHEND,function(){e.pause(),e.touchTimeout&&clearTimeout(e.touchTimeout),e.touchTimeout=setTimeout(function(t){return e.cycle(t)},500+e._config.interval)}))},t._keydown=function(t){if(!/input|textarea/i.test(t.target.tagName))switch(t.which){case 37:t.preventDefault(),this.prev();break;case 39:t.preventDefault(),this.next()}},t._getItemIndex=function(t){return this._items=t&&t.parentNode?[].slice.call(t.parentNode.querySelectorAll(tt)):[],this._items.indexOf(t)},t._getItemByDirection=function(t,e){var n=t===q,i=t===F,r=this._getItemIndex(e),o=this._items.length-1;if((i&&0===r||n&&r===o)&&!this._config.wrap)return e;var s=(r+(t===F?-1:1))%this._items.length;return-1===s?this._items[this._items.length-1]:this._items[s]},t._triggerSlideEvent=function(t,e){var n=this._getItemIndex(t),i=this._getItemIndex(this._element.querySelector(X)),r=P.Event(Q.SLIDE,{relatedTarget:t,direction:e,from:i,to:n});return P(this._element).trigger(r),r},t._setActiveIndicatorElement=function(t){if(this._indicatorsElement){var e=[].slice.call(this._indicatorsElement.querySelectorAll($));P(e).removeClass(V);var n=this._indicatorsElement.children[this._getItemIndex(t)];n&&P(n).addClass(V)}},t._slide=function(t,e){var n,i,r,o=this,s=this._element.querySelector(X),a=this._getItemIndex(s),l=e||s&&this._getItemByDirection(t,s),c=this._getItemIndex(l),h=Boolean(this._interval);if(t===q?(n=J,i=Z,r=K):(n=z,i=G,r=M),l&&P(l).hasClass(V))this._isSliding=!1;else if(!this._triggerSlideEvent(l,r).isDefaultPrevented()&&s&&l){this._isSliding=!0,h&&this.pause(),this._setActiveIndicatorElement(l);var u=P.Event(Q.SLID,{relatedTarget:l,direction:r,from:a,to:c});if(P(this._element).hasClass(Y)){P(l).addClass(i),Fn.reflow(l),P(s).addClass(n),P(l).addClass(n);var f=Fn.getTransitionDurationFromElement(s);P(s).one(Fn.TRANSITION_END,function(){P(l).removeClass(n+" "+i).addClass(V),P(s).removeClass(V+" "+i+" "+n),o._isSliding=!1,setTimeout(function(){return P(o._element).trigger(u)},0)}).emulateTransitionEnd(f)}else P(s).removeClass(V),P(l).addClass(V),this._isSliding=!1,P(this._element).trigger(u);h&&this.cycle()}},o._jQueryInterface=function(i){return this.each(function(){var t=P(this).data(H),e=l({},W,P(this).data());"object"==typeof i&&(e=l({},e,i));var n="string"==typeof i?i:e.slide;if(t||(t=new o(this,e),P(this).data(H,t)),"number"==typeof i)t.to(i);else if("string"==typeof n){if("undefined"==typeof t[n])throw new TypeError('No method named "'+n+'"');t[n]()}else e.interval&&(t.pause(),t.cycle())})},o._dataApiClickHandler=function(t){var e=Fn.getSelectorFromElement(this);if(e){var n=P(e)[0];if(n&&P(n).hasClass(B)){var i=l({},P(n).data(),P(this).data()),r=this.getAttribute("data-slide-to");r&&(i.interval=!1),o._jQueryInterface.call(P(n),i),r&&P(n).data(H).to(r),t.preventDefault()}}},s(o,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return W}}]),o}(),P(document).on(Q.CLICK_DATA_API,it,ot._dataApiClickHandler),P(window).on(Q.LOAD_DATA_API,function(){for(var t=[].slice.call(document.querySelectorAll(rt)),e=0,n=t.length;e<n;e++){var i=P(t[e]);ot._jQueryInterface.call(i,i.data())}}),P.fn[j]=ot._jQueryInterface,P.fn[j].Constructor=ot,P.fn[j].noConflict=function(){return P.fn[j]=x,ot._jQueryInterface},ot),Bn=(at="collapse",ct="."+(lt="bs.collapse"),ht=(st=e).fn[at],ut={toggle:!0,parent:""},ft={toggle:"boolean",parent:"(string|element)"},dt={SHOW:"show"+ct,SHOWN:"shown"+ct,HIDE:"hide"+ct,HIDDEN:"hidden"+ct,CLICK_DATA_API:"click"+ct+".data-api"},gt="show",_t="collapse",mt="collapsing",pt="collapsed",vt="width",yt="height",Et=".show, .collapsing",Ct='[data-toggle="collapse"]',Tt=function(){function a(e,t){this._isTransitioning=!1,this._element=e,this._config=this._getConfig(t),this._triggerArray=st.makeArray(document.querySelectorAll('[data-toggle="collapse"][href="#'+e.id+'"],[data-toggle="collapse"][data-target="#'+e.id+'"]'));for(var n=[].slice.call(document.querySelectorAll(Ct)),i=0,r=n.length;i<r;i++){var o=n[i],s=Fn.getSelectorFromElement(o),a=[].slice.call(document.querySelectorAll(s)).filter(function(t){return t===e});null!==s&&0<a.length&&(this._selector=s,this._triggerArray.push(o))}this._parent=this._config.parent?this._getParent():null,this._config.parent||this._addAriaAndCollapsedClass(this._element,this._triggerArray),this._config.toggle&&this.toggle()}var t=a.prototype;return t.toggle=function(){st(this._element).hasClass(gt)?this.hide():this.show()},t.show=function(){var t,e,n=this;if(!this._isTransitioning&&!st(this._element).hasClass(gt)&&(this._parent&&0===(t=[].slice.call(this._parent.querySelectorAll(Et)).filter(function(t){return t.getAttribute("data-parent")===n._config.parent})).length&&(t=null),!(t&&(e=st(t).not(this._selector).data(lt))&&e._isTransitioning))){var i=st.Event(dt.SHOW);if(st(this._element).trigger(i),!i.isDefaultPrevented()){t&&(a._jQueryInterface.call(st(t).not(this._selector),"hide"),e||st(t).data(lt,null));var r=this._getDimension();st(this._element).removeClass(_t).addClass(mt),this._element.style[r]=0,this._triggerArray.length&&st(this._triggerArray).removeClass(pt).attr("aria-expanded",!0),this.setTransitioning(!0);var o="scroll"+(r[0].toUpperCase()+r.slice(1)),s=Fn.getTransitionDurationFromElement(this._element);st(this._element).one(Fn.TRANSITION_END,function(){st(n._element).removeClass(mt).addClass(_t).addClass(gt),n._element.style[r]="",n.setTransitioning(!1),st(n._element).trigger(dt.SHOWN)}).emulateTransitionEnd(s),this._element.style[r]=this._element[o]+"px"}}},t.hide=function(){var t=this;if(!this._isTransitioning&&st(this._element).hasClass(gt)){var e=st.Event(dt.HIDE);if(st(this._element).trigger(e),!e.isDefaultPrevented()){var n=this._getDimension();this._element.style[n]=this._element.getBoundingClientRect()[n]+"px",Fn.reflow(this._element),st(this._element).addClass(mt).removeClass(_t).removeClass(gt);var i=this._triggerArray.length;if(0<i)for(var r=0;r<i;r++){var o=this._triggerArray[r],s=Fn.getSelectorFromElement(o);if(null!==s)st([].slice.call(document.querySelectorAll(s))).hasClass(gt)||st(o).addClass(pt).attr("aria-expanded",!1)}this.setTransitioning(!0);this._element.style[n]="";var a=Fn.getTransitionDurationFromElement(this._element);st(this._element).one(Fn.TRANSITION_END,function(){t.setTransitioning(!1),st(t._element).removeClass(mt).addClass(_t).trigger(dt.HIDDEN)}).emulateTransitionEnd(a)}}},t.setTransitioning=function(t){this._isTransitioning=t},t.dispose=function(){st.removeData(this._element,lt),this._config=null,this._parent=null,this._element=null,this._triggerArray=null,this._isTransitioning=null},t._getConfig=function(t){return(t=l({},ut,t)).toggle=Boolean(t.toggle),Fn.typeCheckConfig(at,t,ft),t},t._getDimension=function(){return st(this._element).hasClass(vt)?vt:yt},t._getParent=function(){var n=this,t=null;Fn.isElement(this._config.parent)?(t=this._config.parent,"undefined"!=typeof this._config.parent.jquery&&(t=this._config.parent[0])):t=document.querySelector(this._config.parent);var e='[data-toggle="collapse"][data-parent="'+this._config.parent+'"]',i=[].slice.call(t.querySelectorAll(e));return st(i).each(function(t,e){n._addAriaAndCollapsedClass(a._getTargetFromElement(e),[e])}),t},t._addAriaAndCollapsedClass=function(t,e){if(t){var n=st(t).hasClass(gt);e.length&&st(e).toggleClass(pt,!n).attr("aria-expanded",n)}},a._getTargetFromElement=function(t){var e=Fn.getSelectorFromElement(t);return e?document.querySelector(e):null},a._jQueryInterface=function(i){return this.each(function(){var t=st(this),e=t.data(lt),n=l({},ut,t.data(),"object"==typeof i&&i?i:{});if(!e&&n.toggle&&/show|hide/.test(i)&&(n.toggle=!1),e||(e=new a(this,n),t.data(lt,e)),"string"==typeof i){if("undefined"==typeof e[i])throw new TypeError('No method named "'+i+'"');e[i]()}})},s(a,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return ut}}]),a}(),st(document).on(dt.CLICK_DATA_API,Ct,function(t){"A"===t.currentTarget.tagName&&t.preventDefault();var n=st(this),e=Fn.getSelectorFromElement(this),i=[].slice.call(document.querySelectorAll(e));st(i).each(function(){var t=st(this),e=t.data(lt)?"toggle":n.data();Tt._jQueryInterface.call(t,e)})}),st.fn[at]=Tt._jQueryInterface,st.fn[at].Constructor=Tt,st.fn[at].noConflict=function(){return st.fn[at]=ht,Tt._jQueryInterface},Tt),Vn=(St="dropdown",At="."+(It="bs.dropdown"),Dt=".data-api",wt=(bt=e).fn[St],Nt=new RegExp("38|40|27"),Ot={HIDE:"hide"+At,HIDDEN:"hidden"+At,SHOW:"show"+At,SHOWN:"shown"+At,CLICK:"click"+At,CLICK_DATA_API:"click"+At+Dt,KEYDOWN_DATA_API:"keydown"+At+Dt,KEYUP_DATA_API:"keyup"+At+Dt},kt="disabled",Pt="show",jt="dropup",Ht="dropright",Lt="dropleft",Rt="dropdown-menu-right",xt="position-static",Wt='[data-toggle="dropdown"]',Ut=".dropdown form",qt=".dropdown-menu",Ft=".navbar-nav",Kt=".dropdown-menu .dropdown-item:not(.disabled):not(:disabled)",Mt="top-start",Qt="top-end",Bt="bottom-start",Vt="bottom-end",Yt="right-start",zt="left-start",Jt={offset:0,flip:!0,boundary:"scrollParent",reference:"toggle",display:"dynamic"},Zt={offset:"(number|string|function)",flip:"boolean",boundary:"(string|element)",reference:"(string|element)",display:"string"},Gt=function(){function c(t,e){this._element=t,this._popper=null,this._config=this._getConfig(e),this._menu=this._getMenuElement(),this._inNavbar=this._detectNavbar(),this._addEventListeners()}var t=c.prototype;return t.toggle=function(){if(!this._element.disabled&&!bt(this._element).hasClass(kt)){var t=c._getParentFromElement(this._element),e=bt(this._menu).hasClass(Pt);if(c._clearMenus(),!e){var n={relatedTarget:this._element},i=bt.Event(Ot.SHOW,n);if(bt(t).trigger(i),!i.isDefaultPrevented()){if(!this._inNavbar){if("undefined"==typeof h)throw new TypeError("Bootstrap dropdown require Popper.js (https://popper.js.org)");var r=this._element;"parent"===this._config.reference?r=t:Fn.isElement(this._config.reference)&&(r=this._config.reference,"undefined"!=typeof this._config.reference.jquery&&(r=this._config.reference[0])),"scrollParent"!==this._config.boundary&&bt(t).addClass(xt),this._popper=new h(r,this._menu,this._getPopperConfig())}"ontouchstart"in document.documentElement&&0===bt(t).closest(Ft).length&&bt(document.body).children().on("mouseover",null,bt.noop),this._element.focus(),this._element.setAttribute("aria-expanded",!0),bt(this._menu).toggleClass(Pt),bt(t).toggleClass(Pt).trigger(bt.Event(Ot.SHOWN,n))}}}},t.dispose=function(){bt.removeData(this._element,It),bt(this._element).off(At),this._element=null,(this._menu=null)!==this._popper&&(this._popper.destroy(),this._popper=null)},t.update=function(){this._inNavbar=this._detectNavbar(),null!==this._popper&&this._popper.scheduleUpdate()},t._addEventListeners=function(){var e=this;bt(this._element).on(Ot.CLICK,function(t){t.preventDefault(),t.stopPropagation(),e.toggle()})},t._getConfig=function(t){return t=l({},this.constructor.Default,bt(this._element).data(),t),Fn.typeCheckConfig(St,t,this.constructor.DefaultType),t},t._getMenuElement=function(){if(!this._menu){var t=c._getParentFromElement(this._element);t&&(this._menu=t.querySelector(qt))}return this._menu},t._getPlacement=function(){var t=bt(this._element.parentNode),e=Bt;return t.hasClass(jt)?(e=Mt,bt(this._menu).hasClass(Rt)&&(e=Qt)):t.hasClass(Ht)?e=Yt:t.hasClass(Lt)?e=zt:bt(this._menu).hasClass(Rt)&&(e=Vt),e},t._detectNavbar=function(){return 0<bt(this._element).closest(".navbar").length},t._getPopperConfig=function(){var e=this,t={};"function"==typeof this._config.offset?t.fn=function(t){return t.offsets=l({},t.offsets,e._config.offset(t.offsets)||{}),t}:t.offset=this._config.offset;var n={placement:this._getPlacement(),modifiers:{offset:t,flip:{enabled:this._config.flip},preventOverflow:{boundariesElement:this._config.boundary}}};return"static"===this._config.display&&(n.modifiers.applyStyle={enabled:!1}),n},c._jQueryInterface=function(e){return this.each(function(){var t=bt(this).data(It);if(t||(t=new c(this,"object"==typeof e?e:null),bt(this).data(It,t)),"string"==typeof e){if("undefined"==typeof t[e])throw new TypeError('No method named "'+e+'"');t[e]()}})},c._clearMenus=function(t){if(!t||3!==t.which&&("keyup"!==t.type||9===t.which))for(var e=[].slice.call(document.querySelectorAll(Wt)),n=0,i=e.length;n<i;n++){var r=c._getParentFromElement(e[n]),o=bt(e[n]).data(It),s={relatedTarget:e[n]};if(t&&"click"===t.type&&(s.clickEvent=t),o){var a=o._menu;if(bt(r).hasClass(Pt)&&!(t&&("click"===t.type&&/input|textarea/i.test(t.target.tagName)||"keyup"===t.type&&9===t.which)&&bt.contains(r,t.target))){var l=bt.Event(Ot.HIDE,s);bt(r).trigger(l),l.isDefaultPrevented()||("ontouchstart"in document.documentElement&&bt(document.body).children().off("mouseover",null,bt.noop),e[n].setAttribute("aria-expanded","false"),bt(a).removeClass(Pt),bt(r).removeClass(Pt).trigger(bt.Event(Ot.HIDDEN,s)))}}}},c._getParentFromElement=function(t){var e,n=Fn.getSelectorFromElement(t);return n&&(e=document.querySelector(n)),e||t.parentNode},c._dataApiKeydownHandler=function(t){if((/input|textarea/i.test(t.target.tagName)?!(32===t.which||27!==t.which&&(40!==t.which&&38!==t.which||bt(t.target).closest(qt).length)):Nt.test(t.which))&&(t.preventDefault(),t.stopPropagation(),!this.disabled&&!bt(this).hasClass(kt))){var e=c._getParentFromElement(this),n=bt(e).hasClass(Pt);if((n||27===t.which&&32===t.which)&&(!n||27!==t.which&&32!==t.which)){var i=[].slice.call(e.querySelectorAll(Kt));if(0!==i.length){var r=i.indexOf(t.target);38===t.which&&0<r&&r--,40===t.which&&r<i.length-1&&r++,r<0&&(r=0),i[r].focus()}}else{if(27===t.which){var o=e.querySelector(Wt);bt(o).trigger("focus")}bt(this).trigger("click")}}},s(c,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return Jt}},{key:"DefaultType",get:function(){return Zt}}]),c}(),bt(document).on(Ot.KEYDOWN_DATA_API,Wt,Gt._dataApiKeydownHandler).on(Ot.KEYDOWN_DATA_API,qt,Gt._dataApiKeydownHandler).on(Ot.CLICK_DATA_API+" "+Ot.KEYUP_DATA_API,Gt._clearMenus).on(Ot.CLICK_DATA_API,Wt,function(t){t.preventDefault(),t.stopPropagation(),Gt._jQueryInterface.call(bt(this),"toggle")}).on(Ot.CLICK_DATA_API,Ut,function(t){t.stopPropagation()}),bt.fn[St]=Gt._jQueryInterface,bt.fn[St].Constructor=Gt,bt.fn[St].noConflict=function(){return bt.fn[St]=wt,Gt._jQueryInterface},Gt),Yn=(Xt="modal",ee="."+(te="bs.modal"),ne=($t=e).fn[Xt],ie={backdrop:!0,keyboard:!0,focus:!0,show:!0},re={backdrop:"(boolean|string)",keyboard:"boolean",focus:"boolean",show:"boolean"},oe={HIDE:"hide"+ee,HIDDEN:"hidden"+ee,SHOW:"show"+ee,SHOWN:"shown"+ee,FOCUSIN:"focusin"+ee,RESIZE:"resize"+ee,CLICK_DISMISS:"click.dismiss"+ee,KEYDOWN_DISMISS:"keydown.dismiss"+ee,MOUSEUP_DISMISS:"mouseup.dismiss"+ee,MOUSEDOWN_DISMISS:"mousedown.dismiss"+ee,CLICK_DATA_API:"click"+ee+".data-api"},se="modal-scrollbar-measure",ae="modal-backdrop",le="modal-open",ce="fade",he="show",ue=".modal-dialog",fe='[data-toggle="modal"]',de='[data-dismiss="modal"]',ge=".fixed-top, .fixed-bottom, .is-fixed, .sticky-top",_e=".sticky-top",me=function(){function r(t,e){this._config=this._getConfig(e),this._element=t,this._dialog=t.querySelector(ue),this._backdrop=null,this._isShown=!1,this._isBodyOverflowing=!1,this._ignoreBackdropClick=!1,this._scrollbarWidth=0}var t=r.prototype;return t.toggle=function(t){return this._isShown?this.hide():this.show(t)},t.show=function(t){var e=this;if(!this._isTransitioning&&!this._isShown){$t(this._element).hasClass(ce)&&(this._isTransitioning=!0);var n=$t.Event(oe.SHOW,{relatedTarget:t});$t(this._element).trigger(n),this._isShown||n.isDefaultPrevented()||(this._isShown=!0,this._checkScrollbar(),this._setScrollbar(),this._adjustDialog(),$t(document.body).addClass(le),this._setEscapeEvent(),this._setResizeEvent(),$t(this._element).on(oe.CLICK_DISMISS,de,function(t){return e.hide(t)}),$t(this._dialog).on(oe.MOUSEDOWN_DISMISS,function(){$t(e._element).one(oe.MOUSEUP_DISMISS,function(t){$t(t.target).is(e._element)&&(e._ignoreBackdropClick=!0)})}),this._showBackdrop(function(){return e._showElement(t)}))}},t.hide=function(t){var e=this;if(t&&t.preventDefault(),!this._isTransitioning&&this._isShown){var n=$t.Event(oe.HIDE);if($t(this._element).trigger(n),this._isShown&&!n.isDefaultPrevented()){this._isShown=!1;var i=$t(this._element).hasClass(ce);if(i&&(this._isTransitioning=!0),this._setEscapeEvent(),this._setResizeEvent(),$t(document).off(oe.FOCUSIN),$t(this._element).removeClass(he),$t(this._element).off(oe.CLICK_DISMISS),$t(this._dialog).off(oe.MOUSEDOWN_DISMISS),i){var r=Fn.getTransitionDurationFromElement(this._element);$t(this._element).one(Fn.TRANSITION_END,function(t){return e._hideModal(t)}).emulateTransitionEnd(r)}else this._hideModal()}}},t.dispose=function(){$t.removeData(this._element,te),$t(window,document,this._element,this._backdrop).off(ee),this._config=null,this._element=null,this._dialog=null,this._backdrop=null,this._isShown=null,this._isBodyOverflowing=null,this._ignoreBackdropClick=null,this._scrollbarWidth=null},t.handleUpdate=function(){this._adjustDialog()},t._getConfig=function(t){return t=l({},ie,t),Fn.typeCheckConfig(Xt,t,re),t},t._showElement=function(t){var e=this,n=$t(this._element).hasClass(ce);this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE||document.body.appendChild(this._element),this._element.style.display="block",this._element.removeAttribute("aria-hidden"),this._element.scrollTop=0,n&&Fn.reflow(this._element),$t(this._element).addClass(he),this._config.focus&&this._enforceFocus();var i=$t.Event(oe.SHOWN,{relatedTarget:t}),r=function(){e._config.focus&&e._element.focus(),e._isTransitioning=!1,$t(e._element).trigger(i)};if(n){var o=Fn.getTransitionDurationFromElement(this._element);$t(this._dialog).one(Fn.TRANSITION_END,r).emulateTransitionEnd(o)}else r()},t._enforceFocus=function(){var e=this;$t(document).off(oe.FOCUSIN).on(oe.FOCUSIN,function(t){document!==t.target&&e._element!==t.target&&0===$t(e._element).has(t.target).length&&e._element.focus()})},t._setEscapeEvent=function(){var e=this;this._isShown&&this._config.keyboard?$t(this._element).on(oe.KEYDOWN_DISMISS,function(t){27===t.which&&(t.preventDefault(),e.hide())}):this._isShown||$t(this._element).off(oe.KEYDOWN_DISMISS)},t._setResizeEvent=function(){var e=this;this._isShown?$t(window).on(oe.RESIZE,function(t){return e.handleUpdate(t)}):$t(window).off(oe.RESIZE)},t._hideModal=function(){var t=this;this._element.style.display="none",this._element.setAttribute("aria-hidden",!0),this._isTransitioning=!1,this._showBackdrop(function(){$t(document.body).removeClass(le),t._resetAdjustments(),t._resetScrollbar(),$t(t._element).trigger(oe.HIDDEN)})},t._removeBackdrop=function(){this._backdrop&&($t(this._backdrop).remove(),this._backdrop=null)},t._showBackdrop=function(t){var e=this,n=$t(this._element).hasClass(ce)?ce:"";if(this._isShown&&this._config.backdrop){if(this._backdrop=document.createElement("div"),this._backdrop.className=ae,n&&this._backdrop.classList.add(n),$t(this._backdrop).appendTo(document.body),$t(this._element).on(oe.CLICK_DISMISS,function(t){e._ignoreBackdropClick?e._ignoreBackdropClick=!1:t.target===t.currentTarget&&("static"===e._config.backdrop?e._element.focus():e.hide())}),n&&Fn.reflow(this._backdrop),$t(this._backdrop).addClass(he),!t)return;if(!n)return void t();var i=Fn.getTransitionDurationFromElement(this._backdrop);$t(this._backdrop).one(Fn.TRANSITION_END,t).emulateTransitionEnd(i)}else if(!this._isShown&&this._backdrop){$t(this._backdrop).removeClass(he);var r=function(){e._removeBackdrop(),t&&t()};if($t(this._element).hasClass(ce)){var o=Fn.getTransitionDurationFromElement(this._backdrop);$t(this._backdrop).one(Fn.TRANSITION_END,r).emulateTransitionEnd(o)}else r()}else t&&t()},t._adjustDialog=function(){var t=this._element.scrollHeight>document.documentElement.clientHeight;!this._isBodyOverflowing&&t&&(this._element.style.paddingLeft=this._scrollbarWidth+"px"),this._isBodyOverflowing&&!t&&(this._element.style.paddingRight=this._scrollbarWidth+"px")},t._resetAdjustments=function(){this._element.style.paddingLeft="",this._element.style.paddingRight=""},t._checkScrollbar=function(){var t=document.body.getBoundingClientRect();this._isBodyOverflowing=t.left+t.right<window.innerWidth,this._scrollbarWidth=this._getScrollbarWidth()},t._setScrollbar=function(){var r=this;if(this._isBodyOverflowing){var t=[].slice.call(document.querySelectorAll(ge)),e=[].slice.call(document.querySelectorAll(_e));$t(t).each(function(t,e){var n=e.style.paddingRight,i=$t(e).css("padding-right");$t(e).data("padding-right",n).css("padding-right",parseFloat(i)+r._scrollbarWidth+"px")}),$t(e).each(function(t,e){var n=e.style.marginRight,i=$t(e).css("margin-right");$t(e).data("margin-right",n).css("margin-right",parseFloat(i)-r._scrollbarWidth+"px")});var n=document.body.style.paddingRight,i=$t(document.body).css("padding-right");$t(document.body).data("padding-right",n).css("padding-right",parseFloat(i)+this._scrollbarWidth+"px")}},t._resetScrollbar=function(){var t=[].slice.call(document.querySelectorAll(ge));$t(t).each(function(t,e){var n=$t(e).data("padding-right");$t(e).removeData("padding-right"),e.style.paddingRight=n||""});var e=[].slice.call(document.querySelectorAll(""+_e));$t(e).each(function(t,e){var n=$t(e).data("margin-right");"undefined"!=typeof n&&$t(e).css("margin-right",n).removeData("margin-right")});var n=$t(document.body).data("padding-right");$t(document.body).removeData("padding-right"),document.body.style.paddingRight=n||""},t._getScrollbarWidth=function(){var t=document.createElement("div");t.className=se,document.body.appendChild(t);var e=t.getBoundingClientRect().width-t.clientWidth;return document.body.removeChild(t),e},r._jQueryInterface=function(n,i){return this.each(function(){var t=$t(this).data(te),e=l({},ie,$t(this).data(),"object"==typeof n&&n?n:{});if(t||(t=new r(this,e),$t(this).data(te,t)),"string"==typeof n){if("undefined"==typeof t[n])throw new TypeError('No method named "'+n+'"');t[n](i)}else e.show&&t.show(i)})},s(r,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return ie}}]),r}(),$t(document).on(oe.CLICK_DATA_API,fe,function(t){var e,n=this,i=Fn.getSelectorFromElement(this);i&&(e=document.querySelector(i));var r=$t(e).data(te)?"toggle":l({},$t(e).data(),$t(this).data());"A"!==this.tagName&&"AREA"!==this.tagName||t.preventDefault();var o=$t(e).one(oe.SHOW,function(t){t.isDefaultPrevented()||o.one(oe.HIDDEN,function(){$t(n).is(":visible")&&n.focus()})});me._jQueryInterface.call($t(e),r,this)}),$t.fn[Xt]=me._jQueryInterface,$t.fn[Xt].Constructor=me,$t.fn[Xt].noConflict=function(){return $t.fn[Xt]=ne,me._jQueryInterface},me),zn=(ve="tooltip",Ee="."+(ye="bs.tooltip"),Ce=(pe=e).fn[ve],Te="bs-tooltip",be=new RegExp("(^|\\s)"+Te+"\\S+","g"),Ae={animation:!0,template:'<div class="tooltip" role="tooltip"><div class="arrow"></div><div class="tooltip-inner"></div></div>',trigger:"hover focus",title:"",delay:0,html:!(Ie={AUTO:"auto",TOP:"top",RIGHT:"right",BOTTOM:"bottom",LEFT:"left"}),selector:!(Se={animation:"boolean",template:"string",title:"(string|element|function)",trigger:"string",delay:"(number|object)",html:"boolean",selector:"(string|boolean)",placement:"(string|function)",offset:"(number|string)",container:"(string|element|boolean)",fallbackPlacement:"(string|array)",boundary:"(string|element)"}),placement:"top",offset:0,container:!1,fallbackPlacement:"flip",boundary:"scrollParent"},we="out",Ne={HIDE:"hide"+Ee,HIDDEN:"hidden"+Ee,SHOW:(De="show")+Ee,SHOWN:"shown"+Ee,INSERTED:"inserted"+Ee,CLICK:"click"+Ee,FOCUSIN:"focusin"+Ee,FOCUSOUT:"focusout"+Ee,MOUSEENTER:"mouseenter"+Ee,MOUSELEAVE:"mouseleave"+Ee},Oe="fade",ke="show",Pe=".tooltip-inner",je=".arrow",He="hover",Le="focus",Re="click",xe="manual",We=function(){function i(t,e){if("undefined"==typeof h)throw new TypeError("Bootstrap tooltips require Popper.js (https://popper.js.org)");this._isEnabled=!0,this._timeout=0,this._hoverState="",this._activeTrigger={},this._popper=null,this.element=t,this.config=this._getConfig(e),this.tip=null,this._setListeners()}var t=i.prototype;return t.enable=function(){this._isEnabled=!0},t.disable=function(){this._isEnabled=!1},t.toggleEnabled=function(){this._isEnabled=!this._isEnabled},t.toggle=function(t){if(this._isEnabled)if(t){var e=this.constructor.DATA_KEY,n=pe(t.currentTarget).data(e);n||(n=new this.constructor(t.currentTarget,this._getDelegateConfig()),pe(t.currentTarget).data(e,n)),n._activeTrigger.click=!n._activeTrigger.click,n._isWithActiveTrigger()?n._enter(null,n):n._leave(null,n)}else{if(pe(this.getTipElement()).hasClass(ke))return void this._leave(null,this);this._enter(null,this)}},t.dispose=function(){clearTimeout(this._timeout),pe.removeData(this.element,this.constructor.DATA_KEY),pe(this.element).off(this.constructor.EVENT_KEY),pe(this.element).closest(".modal").off("hide.bs.modal"),this.tip&&pe(this.tip).remove(),this._isEnabled=null,this._timeout=null,this._hoverState=null,(this._activeTrigger=null)!==this._popper&&this._popper.destroy(),this._popper=null,this.element=null,this.config=null,this.tip=null},t.show=function(){var e=this;if("none"===pe(this.element).css("display"))throw new Error("Please use show on visible elements");var t=pe.Event(this.constructor.Event.SHOW);if(this.isWithContent()&&this._isEnabled){pe(this.element).trigger(t);var n=pe.contains(this.element.ownerDocument.documentElement,this.element);if(t.isDefaultPrevented()||!n)return;var i=this.getTipElement(),r=Fn.getUID(this.constructor.NAME);i.setAttribute("id",r),this.element.setAttribute("aria-describedby",r),this.setContent(),this.config.animation&&pe(i).addClass(Oe);var o="function"==typeof this.config.placement?this.config.placement.call(this,i,this.element):this.config.placement,s=this._getAttachment(o);this.addAttachmentClass(s);var a=!1===this.config.container?document.body:pe(document).find(this.config.container);pe(i).data(this.constructor.DATA_KEY,this),pe.contains(this.element.ownerDocument.documentElement,this.tip)||pe(i).appendTo(a),pe(this.element).trigger(this.constructor.Event.INSERTED),this._popper=new h(this.element,i,{placement:s,modifiers:{offset:{offset:this.config.offset},flip:{behavior:this.config.fallbackPlacement},arrow:{element:je},preventOverflow:{boundariesElement:this.config.boundary}},onCreate:function(t){t.originalPlacement!==t.placement&&e._handlePopperPlacementChange(t)},onUpdate:function(t){e._handlePopperPlacementChange(t)}}),pe(i).addClass(ke),"ontouchstart"in document.documentElement&&pe(document.body).children().on("mouseover",null,pe.noop);var l=function(){e.config.animation&&e._fixTransition();var t=e._hoverState;e._hoverState=null,pe(e.element).trigger(e.constructor.Event.SHOWN),t===we&&e._leave(null,e)};if(pe(this.tip).hasClass(Oe)){var c=Fn.getTransitionDurationFromElement(this.tip);pe(this.tip).one(Fn.TRANSITION_END,l).emulateTransitionEnd(c)}else l()}},t.hide=function(t){var e=this,n=this.getTipElement(),i=pe.Event(this.constructor.Event.HIDE),r=function(){e._hoverState!==De&&n.parentNode&&n.parentNode.removeChild(n),e._cleanTipClass(),e.element.removeAttribute("aria-describedby"),pe(e.element).trigger(e.constructor.Event.HIDDEN),null!==e._popper&&e._popper.destroy(),t&&t()};if(pe(this.element).trigger(i),!i.isDefaultPrevented()){if(pe(n).removeClass(ke),"ontouchstart"in document.documentElement&&pe(document.body).children().off("mouseover",null,pe.noop),this._activeTrigger[Re]=!1,this._activeTrigger[Le]=!1,this._activeTrigger[He]=!1,pe(this.tip).hasClass(Oe)){var o=Fn.getTransitionDurationFromElement(n);pe(n).one(Fn.TRANSITION_END,r).emulateTransitionEnd(o)}else r();this._hoverState=""}},t.update=function(){null!==this._popper&&this._popper.scheduleUpdate()},t.isWithContent=function(){return Boolean(this.getTitle())},t.addAttachmentClass=function(t){pe(this.getTipElement()).addClass(Te+"-"+t)},t.getTipElement=function(){return this.tip=this.tip||pe(this.config.template)[0],this.tip},t.setContent=function(){var t=this.getTipElement();this.setElementContent(pe(t.querySelectorAll(Pe)),this.getTitle()),pe(t).removeClass(Oe+" "+ke)},t.setElementContent=function(t,e){var n=this.config.html;"object"==typeof e&&(e.nodeType||e.jquery)?n?pe(e).parent().is(t)||t.empty().append(e):t.text(pe(e).text()):t[n?"html":"text"](e)},t.getTitle=function(){var t=this.element.getAttribute("data-original-title");return t||(t="function"==typeof this.config.title?this.config.title.call(this.element):this.config.title),t},t._getAttachment=function(t){return Ie[t.toUpperCase()]},t._setListeners=function(){var i=this;this.config.trigger.split(" ").forEach(function(t){if("click"===t)pe(i.element).on(i.constructor.Event.CLICK,i.config.selector,function(t){return i.toggle(t)});else if(t!==xe){var e=t===He?i.constructor.Event.MOUSEENTER:i.constructor.Event.FOCUSIN,n=t===He?i.constructor.Event.MOUSELEAVE:i.constructor.Event.FOCUSOUT;pe(i.element).on(e,i.config.selector,function(t){return i._enter(t)}).on(n,i.config.selector,function(t){return i._leave(t)})}pe(i.element).closest(".modal").on("hide.bs.modal",function(){return i.hide()})}),this.config.selector?this.config=l({},this.config,{trigger:"manual",selector:""}):this._fixTitle()},t._fixTitle=function(){var t=typeof this.element.getAttribute("data-original-title");(this.element.getAttribute("title")||"string"!==t)&&(this.element.setAttribute("data-original-title",this.element.getAttribute("title")||""),this.element.setAttribute("title",""))},t._enter=function(t,e){var n=this.constructor.DATA_KEY;(e=e||pe(t.currentTarget).data(n))||(e=new this.constructor(t.currentTarget,this._getDelegateConfig()),pe(t.currentTarget).data(n,e)),t&&(e._activeTrigger["focusin"===t.type?Le:He]=!0),pe(e.getTipElement()).hasClass(ke)||e._hoverState===De?e._hoverState=De:(clearTimeout(e._timeout),e._hoverState=De,e.config.delay&&e.config.delay.show?e._timeout=setTimeout(function(){e._hoverState===De&&e.show()},e.config.delay.show):e.show())},t._leave=function(t,e){var n=this.constructor.DATA_KEY;(e=e||pe(t.currentTarget).data(n))||(e=new this.constructor(t.currentTarget,this._getDelegateConfig()),pe(t.currentTarget).data(n,e)),t&&(e._activeTrigger["focusout"===t.type?Le:He]=!1),e._isWithActiveTrigger()||(clearTimeout(e._timeout),e._hoverState=we,e.config.delay&&e.config.delay.hide?e._timeout=setTimeout(function(){e._hoverState===we&&e.hide()},e.config.delay.hide):e.hide())},t._isWithActiveTrigger=function(){for(var t in this._activeTrigger)if(this._activeTrigger[t])return!0;return!1},t._getConfig=function(t){return"number"==typeof(t=l({},this.constructor.Default,pe(this.element).data(),"object"==typeof t&&t?t:{})).delay&&(t.delay={show:t.delay,hide:t.delay}),"number"==typeof t.title&&(t.title=t.title.toString()),"number"==typeof t.content&&(t.content=t.content.toString()),Fn.typeCheckConfig(ve,t,this.constructor.DefaultType),t},t._getDelegateConfig=function(){var t={};if(this.config)for(var e in this.config)this.constructor.Default[e]!==this.config[e]&&(t[e]=this.config[e]);return t},t._cleanTipClass=function(){var t=pe(this.getTipElement()),e=t.attr("class").match(be);null!==e&&e.length&&t.removeClass(e.join(""))},t._handlePopperPlacementChange=function(t){var e=t.instance;this.tip=e.popper,this._cleanTipClass(),this.addAttachmentClass(this._getAttachment(t.placement))},t._fixTransition=function(){var t=this.getTipElement(),e=this.config.animation;null===t.getAttribute("x-placement")&&(pe(t).removeClass(Oe),this.config.animation=!1,this.hide(),this.show(),this.config.animation=e)},i._jQueryInterface=function(n){return this.each(function(){var t=pe(this).data(ye),e="object"==typeof n&&n;if((t||!/dispose|hide/.test(n))&&(t||(t=new i(this,e),pe(this).data(ye,t)),"string"==typeof n)){if("undefined"==typeof t[n])throw new TypeError('No method named "'+n+'"');t[n]()}})},s(i,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return Ae}},{key:"NAME",get:function(){return ve}},{key:"DATA_KEY",get:function(){return ye}},{key:"Event",get:function(){return Ne}},{key:"EVENT_KEY",get:function(){return Ee}},{key:"DefaultType",get:function(){return Se}}]),i}(),pe.fn[ve]=We._jQueryInterface,pe.fn[ve].Constructor=We,pe.fn[ve].noConflict=function(){return pe.fn[ve]=Ce,We._jQueryInterface},We),Jn=(qe="popover",Ke="."+(Fe="bs.popover"),Me=(Ue=e).fn[qe],Qe="bs-popover",Be=new RegExp("(^|\\s)"+Qe+"\\S+","g"),Ve=l({},zn.Default,{placement:"right",trigger:"click",content:"",template:'<div class="popover" role="tooltip"><div class="arrow"></div><h3 class="popover-header"></h3><div class="popover-body"></div></div>'}),Ye=l({},zn.DefaultType,{content:"(string|element|function)"}),ze="fade",Ze=".popover-header",Ge=".popover-body",$e={HIDE:"hide"+Ke,HIDDEN:"hidden"+Ke,SHOW:(Je="show")+Ke,SHOWN:"shown"+Ke,INSERTED:"inserted"+Ke,CLICK:"click"+Ke,FOCUSIN:"focusin"+Ke,FOCUSOUT:"focusout"+Ke,MOUSEENTER:"mouseenter"+Ke,MOUSELEAVE:"mouseleave"+Ke},Xe=function(t){var e,n;function i(){return t.apply(this,arguments)||this}n=t,(e=i).prototype=Object.create(n.prototype),(e.prototype.constructor=e).__proto__=n;var r=i.prototype;return r.isWithContent=function(){return this.getTitle()||this._getContent()},r.addAttachmentClass=function(t){Ue(this.getTipElement()).addClass(Qe+"-"+t)},r.getTipElement=function(){return this.tip=this.tip||Ue(this.config.template)[0],this.tip},r.setContent=function(){var t=Ue(this.getTipElement());this.setElementContent(t.find(Ze),this.getTitle());var e=this._getContent();"function"==typeof e&&(e=e.call(this.element)),this.setElementContent(t.find(Ge),e),t.removeClass(ze+" "+Je)},r._getContent=function(){return this.element.getAttribute("data-content")||this.config.content},r._cleanTipClass=function(){var t=Ue(this.getTipElement()),e=t.attr("class").match(Be);null!==e&&0<e.length&&t.removeClass(e.join(""))},i._jQueryInterface=function(n){return this.each(function(){var t=Ue(this).data(Fe),e="object"==typeof n?n:null;if((t||!/destroy|hide/.test(n))&&(t||(t=new i(this,e),Ue(this).data(Fe,t)),"string"==typeof n)){if("undefined"==typeof t[n])throw new TypeError('No method named "'+n+'"');t[n]()}})},s(i,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return Ve}},{key:"NAME",get:function(){return qe}},{key:"DATA_KEY",get:function(){return Fe}},{key:"Event",get:function(){return $e}},{key:"EVENT_KEY",get:function(){return Ke}},{key:"DefaultType",get:function(){return Ye}}]),i}(zn),Ue.fn[qe]=Xe._jQueryInterface,Ue.fn[qe].Constructor=Xe,Ue.fn[qe].noConflict=function(){return Ue.fn[qe]=Me,Xe._jQueryInterface},Xe),Zn=(en="scrollspy",rn="."+(nn="bs.scrollspy"),on=(tn=e).fn[en],sn={offset:10,method:"auto",target:""},an={offset:"number",method:"string",target:"(string|element)"},ln={ACTIVATE:"activate"+rn,SCROLL:"scroll"+rn,LOAD_DATA_API:"load"+rn+".data-api"},cn="dropdown-item",hn="active",un='[data-spy="scroll"]',fn=".active",dn=".nav, .list-group",gn=".nav-link",_n=".nav-item",mn=".list-group-item",pn=".dropdown",vn=".dropdown-item",yn=".dropdown-toggle",En="offset",Cn="position",Tn=function(){function n(t,e){var n=this;this._element=t,this._scrollElement="BODY"===t.tagName?window:t,this._config=this._getConfig(e),this._selector=this._config.target+" "+gn+","+this._config.target+" "+mn+","+this._config.target+" "+vn,this._offsets=[],this._targets=[],this._activeTarget=null,this._scrollHeight=0,tn(this._scrollElement).on(ln.SCROLL,function(t){return n._process(t)}),this.refresh(),this._process()}var t=n.prototype;return t.refresh=function(){var e=this,t=this._scrollElement===this._scrollElement.window?En:Cn,r="auto"===this._config.method?t:this._config.method,o=r===Cn?this._getScrollTop():0;this._offsets=[],this._targets=[],this._scrollHeight=this._getScrollHeight(),[].slice.call(document.querySelectorAll(this._selector)).map(function(t){var e,n=Fn.getSelectorFromElement(t);if(n&&(e=document.querySelector(n)),e){var i=e.getBoundingClientRect();if(i.width||i.height)return[tn(e)[r]().top+o,n]}return null}).filter(function(t){return t}).sort(function(t,e){return t[0]-e[0]}).forEach(function(t){e._offsets.push(t[0]),e._targets.push(t[1])})},t.dispose=function(){tn.removeData(this._element,nn),tn(this._scrollElement).off(rn),this._element=null,this._scrollElement=null,this._config=null,this._selector=null,this._offsets=null,this._targets=null,this._activeTarget=null,this._scrollHeight=null},t._getConfig=function(t){if("string"!=typeof(t=l({},sn,"object"==typeof t&&t?t:{})).target){var e=tn(t.target).attr("id");e||(e=Fn.getUID(en),tn(t.target).attr("id",e)),t.target="#"+e}return Fn.typeCheckConfig(en,t,an),t},t._getScrollTop=function(){return this._scrollElement===window?this._scrollElement.pageYOffset:this._scrollElement.scrollTop},t._getScrollHeight=function(){return this._scrollElement.scrollHeight||Math.max(document.body.scrollHeight,document.documentElement.scrollHeight)},t._getOffsetHeight=function(){return this._scrollElement===window?window.innerHeight:this._scrollElement.getBoundingClientRect().height},t._process=function(){var t=this._getScrollTop()+this._config.offset,e=this._getScrollHeight(),n=this._config.offset+e-this._getOffsetHeight();if(this._scrollHeight!==e&&this.refresh(),n<=t){var i=this._targets[this._targets.length-1];this._activeTarget!==i&&this._activate(i)}else{if(this._activeTarget&&t<this._offsets[0]&&0<this._offsets[0])return this._activeTarget=null,void this._clear();for(var r=this._offsets.length;r--;){this._activeTarget!==this._targets[r]&&t>=this._offsets[r]&&("undefined"==typeof this._offsets[r+1]||t<this._offsets[r+1])&&this._activate(this._targets[r])}}},t._activate=function(e){this._activeTarget=e,this._clear();var t=this._selector.split(",");t=t.map(function(t){return t+'[data-target="'+e+'"],'+t+'[href="'+e+'"]'});var n=tn([].slice.call(document.querySelectorAll(t.join(","))));n.hasClass(cn)?(n.closest(pn).find(yn).addClass(hn),n.addClass(hn)):(n.addClass(hn),n.parents(dn).prev(gn+", "+mn).addClass(hn),n.parents(dn).prev(_n).children(gn).addClass(hn)),tn(this._scrollElement).trigger(ln.ACTIVATE,{relatedTarget:e})},t._clear=function(){var t=[].slice.call(document.querySelectorAll(this._selector));tn(t).filter(fn).removeClass(hn)},n._jQueryInterface=function(e){return this.each(function(){var t=tn(this).data(nn);if(t||(t=new n(this,"object"==typeof e&&e),tn(this).data(nn,t)),"string"==typeof e){if("undefined"==typeof t[e])throw new TypeError('No method named "'+e+'"');t[e]()}})},s(n,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return sn}}]),n}(),tn(window).on(ln.LOAD_DATA_API,function(){for(var t=[].slice.call(document.querySelectorAll(un)),e=t.length;e--;){var n=tn(t[e]);Tn._jQueryInterface.call(n,n.data())}}),tn.fn[en]=Tn._jQueryInterface,tn.fn[en].Constructor=Tn,tn.fn[en].noConflict=function(){return tn.fn[en]=on,Tn._jQueryInterface},Tn),Gn=(In="."+(Sn="bs.tab"),An=(bn=e).fn.tab,Dn={HIDE:"hide"+In,HIDDEN:"hidden"+In,SHOW:"show"+In,SHOWN:"shown"+In,CLICK_DATA_API:"click"+In+".data-api"},wn="dropdown-menu",Nn="active",On="disabled",kn="fade",Pn="show",jn=".dropdown",Hn=".nav, .list-group",Ln=".active",Rn="> li > .active",xn='[data-toggle="tab"], [data-toggle="pill"], [data-toggle="list"]',Wn=".dropdown-toggle",Un="> .dropdown-menu .active",qn=function(){function i(t){this._element=t}var t=i.prototype;return t.show=function(){var n=this;if(!(this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE&&bn(this._element).hasClass(Nn)||bn(this._element).hasClass(On))){var t,i,e=bn(this._element).closest(Hn)[0],r=Fn.getSelectorFromElement(this._element);if(e){var o="UL"===e.nodeName?Rn:Ln;i=(i=bn.makeArray(bn(e).find(o)))[i.length-1]}var s=bn.Event(Dn.HIDE,{relatedTarget:this._element}),a=bn.Event(Dn.SHOW,{relatedTarget:i});if(i&&bn(i).trigger(s),bn(this._element).trigger(a),!a.isDefaultPrevented()&&!s.isDefaultPrevented()){r&&(t=document.querySelector(r)),this._activate(this._element,e);var l=function(){var t=bn.Event(Dn.HIDDEN,{relatedTarget:n._element}),e=bn.Event(Dn.SHOWN,{relatedTarget:i});bn(i).trigger(t),bn(n._element).trigger(e)};t?this._activate(t,t.parentNode,l):l()}}},t.dispose=function(){bn.removeData(this._element,Sn),this._element=null},t._activate=function(t,e,n){var i=this,r=("UL"===e.nodeName?bn(e).find(Rn):bn(e).children(Ln))[0],o=n&&r&&bn(r).hasClass(kn),s=function(){return i._transitionComplete(t,r,n)};if(r&&o){var a=Fn.getTransitionDurationFromElement(r);bn(r).one(Fn.TRANSITION_END,s).emulateTransitionEnd(a)}else s()},t._transitionComplete=function(t,e,n){if(e){bn(e).removeClass(Pn+" "+Nn);var i=bn(e.parentNode).find(Un)[0];i&&bn(i).removeClass(Nn),"tab"===e.getAttribute("role")&&e.setAttribute("aria-selected",!1)}if(bn(t).addClass(Nn),"tab"===t.getAttribute("role")&&t.setAttribute("aria-selected",!0),Fn.reflow(t),bn(t).addClass(Pn),t.parentNode&&bn(t.parentNode).hasClass(wn)){var r=bn(t).closest(jn)[0];if(r){var o=[].slice.call(r.querySelectorAll(Wn));bn(o).addClass(Nn)}t.setAttribute("aria-expanded",!0)}n&&n()},i._jQueryInterface=function(n){return this.each(function(){var t=bn(this),e=t.data(Sn);if(e||(e=new i(this),t.data(Sn,e)),"string"==typeof n){if("undefined"==typeof e[n])throw new TypeError('No method named "'+n+'"');e[n]()}})},s(i,null,[{key:"VERSION",get:function(){return"4.1.3"}}]),i}(),bn(document).on(Dn.CLICK_DATA_API,xn,function(t){t.preventDefault(),qn._jQueryInterface.call(bn(this),"show")}),bn.fn.tab=qn._jQueryInterface,bn.fn.tab.Constructor=qn,bn.fn.tab.noConflict=function(){return bn.fn.tab=An,qn._jQueryInterface},qn);!function(t){if("undefined"==typeof t)throw new TypeError("Bootstrap's JavaScript requires jQuery. jQuery must be included before Bootstrap's JavaScript.");var e=t.fn.jquery.split(" ")[0].split(".");if(e[0]<2&&e[1]<9||1===e[0]&&9===e[1]&&e[2]<1||4<=e[0])throw new Error("Bootstrap's JavaScript requires at least jQuery v1.9.1 but less than v4.0.0")}(e),t.Util=Fn,t.Alert=Kn,t.Button=Mn,t.Carousel=Qn,t.Collapse=Bn,t.Dropdown=Vn,t.Modal=Yn,t.Popover=Jn,t.Scrollspy=Zn,t.Tab=Gn,t.Tooltip=zn,Object.defineProperty(t,"__esModule",{value:!0})});
//# sourceMappingURL=bootstrap.min.js.map
|
scheduler-front
|
/scheduler_front-0.0.11-py3-none-any.whl/scheduler_front/static/vendor/bootstrap/js/bootstrap.min.js
|
bootstrap.min.js
|
!function(t,e){"object"==typeof exports&&"undefined"!=typeof module?e(exports,require("jquery"),require("popper.js")):"function"==typeof define&&define.amd?define(["exports","jquery","popper.js"],e):e(t.bootstrap={},t.jQuery,t.Popper)}(this,function(t,e,h){"use strict";function i(t,e){for(var n=0;n<e.length;n++){var i=e[n];i.enumerable=i.enumerable||!1,i.configurable=!0,"value"in i&&(i.writable=!0),Object.defineProperty(t,i.key,i)}}function s(t,e,n){return e&&i(t.prototype,e),n&&i(t,n),t}function l(r){for(var t=1;t<arguments.length;t++){var o=null!=arguments[t]?arguments[t]:{},e=Object.keys(o);"function"==typeof Object.getOwnPropertySymbols&&(e=e.concat(Object.getOwnPropertySymbols(o).filter(function(t){return Object.getOwnPropertyDescriptor(o,t).enumerable}))),e.forEach(function(t){var e,n,i;e=r,i=o[n=t],n in e?Object.defineProperty(e,n,{value:i,enumerable:!0,configurable:!0,writable:!0}):e[n]=i})}return r}e=e&&e.hasOwnProperty("default")?e.default:e,h=h&&h.hasOwnProperty("default")?h.default:h;var r,n,o,a,c,u,f,d,g,_,m,p,v,y,E,C,T,b,S,I,A,D,w,N,O,k,P,j,H,L,R,x,W,U,q,F,K,M,Q,B,V,Y,z,J,Z,G,$,X,tt,et,nt,it,rt,ot,st,at,lt,ct,ht,ut,ft,dt,gt,_t,mt,pt,vt,yt,Et,Ct,Tt,bt,St,It,At,Dt,wt,Nt,Ot,kt,Pt,jt,Ht,Lt,Rt,xt,Wt,Ut,qt,Ft,Kt,Mt,Qt,Bt,Vt,Yt,zt,Jt,Zt,Gt,$t,Xt,te,ee,ne,ie,re,oe,se,ae,le,ce,he,ue,fe,de,ge,_e,me,pe,ve,ye,Ee,Ce,Te,be,Se,Ie,Ae,De,we,Ne,Oe,ke,Pe,je,He,Le,Re,xe,We,Ue,qe,Fe,Ke,Me,Qe,Be,Ve,Ye,ze,Je,Ze,Ge,$e,Xe,tn,en,nn,rn,on,sn,an,ln,cn,hn,un,fn,dn,gn,_n,mn,pn,vn,yn,En,Cn,Tn,bn,Sn,In,An,Dn,wn,Nn,On,kn,Pn,jn,Hn,Ln,Rn,xn,Wn,Un,qn,Fn=function(i){var e="transitionend";function t(t){var e=this,n=!1;return i(this).one(l.TRANSITION_END,function(){n=!0}),setTimeout(function(){n||l.triggerTransitionEnd(e)},t),this}var l={TRANSITION_END:"bsTransitionEnd",getUID:function(t){for(;t+=~~(1e6*Math.random()),document.getElementById(t););return t},getSelectorFromElement:function(t){var e=t.getAttribute("data-target");e&&"#"!==e||(e=t.getAttribute("href")||"");try{return document.querySelector(e)?e:null}catch(t){return null}},getTransitionDurationFromElement:function(t){if(!t)return 0;var e=i(t).css("transition-duration");return parseFloat(e)?(e=e.split(",")[0],1e3*parseFloat(e)):0},reflow:function(t){return t.offsetHeight},triggerTransitionEnd:function(t){i(t).trigger(e)},supportsTransitionEnd:function(){return Boolean(e)},isElement:function(t){return(t[0]||t).nodeType},typeCheckConfig:function(t,e,n){for(var i in n)if(Object.prototype.hasOwnProperty.call(n,i)){var r=n[i],o=e[i],s=o&&l.isElement(o)?"element":(a=o,{}.toString.call(a).match(/\s([a-z]+)/i)[1].toLowerCase());if(!new RegExp(r).test(s))throw new Error(t.toUpperCase()+': Option "'+i+'" provided type "'+s+'" but expected type "'+r+'".')}var a}};return i.fn.emulateTransitionEnd=t,i.event.special[l.TRANSITION_END]={bindType:e,delegateType:e,handle:function(t){if(i(t.target).is(this))return t.handleObj.handler.apply(this,arguments)}},l}(e),Kn=(n="alert",a="."+(o="bs.alert"),c=(r=e).fn[n],u={CLOSE:"close"+a,CLOSED:"closed"+a,CLICK_DATA_API:"click"+a+".data-api"},f="alert",d="fade",g="show",_=function(){function i(t){this._element=t}var t=i.prototype;return t.close=function(t){var e=this._element;t&&(e=this._getRootElement(t)),this._triggerCloseEvent(e).isDefaultPrevented()||this._removeElement(e)},t.dispose=function(){r.removeData(this._element,o),this._element=null},t._getRootElement=function(t){var e=Fn.getSelectorFromElement(t),n=!1;return e&&(n=document.querySelector(e)),n||(n=r(t).closest("."+f)[0]),n},t._triggerCloseEvent=function(t){var e=r.Event(u.CLOSE);return r(t).trigger(e),e},t._removeElement=function(e){var n=this;if(r(e).removeClass(g),r(e).hasClass(d)){var t=Fn.getTransitionDurationFromElement(e);r(e).one(Fn.TRANSITION_END,function(t){return n._destroyElement(e,t)}).emulateTransitionEnd(t)}else this._destroyElement(e)},t._destroyElement=function(t){r(t).detach().trigger(u.CLOSED).remove()},i._jQueryInterface=function(n){return this.each(function(){var t=r(this),e=t.data(o);e||(e=new i(this),t.data(o,e)),"close"===n&&e[n](this)})},i._handleDismiss=function(e){return function(t){t&&t.preventDefault(),e.close(this)}},s(i,null,[{key:"VERSION",get:function(){return"4.1.3"}}]),i}(),r(document).on(u.CLICK_DATA_API,'[data-dismiss="alert"]',_._handleDismiss(new _)),r.fn[n]=_._jQueryInterface,r.fn[n].Constructor=_,r.fn[n].noConflict=function(){return r.fn[n]=c,_._jQueryInterface},_),Mn=(p="button",y="."+(v="bs.button"),E=".data-api",C=(m=e).fn[p],T="active",b="btn",I='[data-toggle^="button"]',A='[data-toggle="buttons"]',D="input",w=".active",N=".btn",O={CLICK_DATA_API:"click"+y+E,FOCUS_BLUR_DATA_API:(S="focus")+y+E+" blur"+y+E},k=function(){function n(t){this._element=t}var t=n.prototype;return t.toggle=function(){var t=!0,e=!0,n=m(this._element).closest(A)[0];if(n){var i=this._element.querySelector(D);if(i){if("radio"===i.type)if(i.checked&&this._element.classList.contains(T))t=!1;else{var r=n.querySelector(w);r&&m(r).removeClass(T)}if(t){if(i.hasAttribute("disabled")||n.hasAttribute("disabled")||i.classList.contains("disabled")||n.classList.contains("disabled"))return;i.checked=!this._element.classList.contains(T),m(i).trigger("change")}i.focus(),e=!1}}e&&this._element.setAttribute("aria-pressed",!this._element.classList.contains(T)),t&&m(this._element).toggleClass(T)},t.dispose=function(){m.removeData(this._element,v),this._element=null},n._jQueryInterface=function(e){return this.each(function(){var t=m(this).data(v);t||(t=new n(this),m(this).data(v,t)),"toggle"===e&&t[e]()})},s(n,null,[{key:"VERSION",get:function(){return"4.1.3"}}]),n}(),m(document).on(O.CLICK_DATA_API,I,function(t){t.preventDefault();var e=t.target;m(e).hasClass(b)||(e=m(e).closest(N)),k._jQueryInterface.call(m(e),"toggle")}).on(O.FOCUS_BLUR_DATA_API,I,function(t){var e=m(t.target).closest(N)[0];m(e).toggleClass(S,/^focus(in)?$/.test(t.type))}),m.fn[p]=k._jQueryInterface,m.fn[p].Constructor=k,m.fn[p].noConflict=function(){return m.fn[p]=C,k._jQueryInterface},k),Qn=(j="carousel",L="."+(H="bs.carousel"),R=".data-api",x=(P=e).fn[j],W={interval:5e3,keyboard:!0,slide:!1,pause:"hover",wrap:!0},U={interval:"(number|boolean)",keyboard:"boolean",slide:"(boolean|string)",pause:"(string|boolean)",wrap:"boolean"},q="next",F="prev",K="left",M="right",Q={SLIDE:"slide"+L,SLID:"slid"+L,KEYDOWN:"keydown"+L,MOUSEENTER:"mouseenter"+L,MOUSELEAVE:"mouseleave"+L,TOUCHEND:"touchend"+L,LOAD_DATA_API:"load"+L+R,CLICK_DATA_API:"click"+L+R},B="carousel",V="active",Y="slide",z="carousel-item-right",J="carousel-item-left",Z="carousel-item-next",G="carousel-item-prev",$=".active",X=".active.carousel-item",tt=".carousel-item",et=".carousel-item-next, .carousel-item-prev",nt=".carousel-indicators",it="[data-slide], [data-slide-to]",rt='[data-ride="carousel"]',ot=function(){function o(t,e){this._items=null,this._interval=null,this._activeElement=null,this._isPaused=!1,this._isSliding=!1,this.touchTimeout=null,this._config=this._getConfig(e),this._element=P(t)[0],this._indicatorsElement=this._element.querySelector(nt),this._addEventListeners()}var t=o.prototype;return t.next=function(){this._isSliding||this._slide(q)},t.nextWhenVisible=function(){!document.hidden&&P(this._element).is(":visible")&&"hidden"!==P(this._element).css("visibility")&&this.next()},t.prev=function(){this._isSliding||this._slide(F)},t.pause=function(t){t||(this._isPaused=!0),this._element.querySelector(et)&&(Fn.triggerTransitionEnd(this._element),this.cycle(!0)),clearInterval(this._interval),this._interval=null},t.cycle=function(t){t||(this._isPaused=!1),this._interval&&(clearInterval(this._interval),this._interval=null),this._config.interval&&!this._isPaused&&(this._interval=setInterval((document.visibilityState?this.nextWhenVisible:this.next).bind(this),this._config.interval))},t.to=function(t){var e=this;this._activeElement=this._element.querySelector(X);var n=this._getItemIndex(this._activeElement);if(!(t>this._items.length-1||t<0))if(this._isSliding)P(this._element).one(Q.SLID,function(){return e.to(t)});else{if(n===t)return this.pause(),void this.cycle();var i=n<t?q:F;this._slide(i,this._items[t])}},t.dispose=function(){P(this._element).off(L),P.removeData(this._element,H),this._items=null,this._config=null,this._element=null,this._interval=null,this._isPaused=null,this._isSliding=null,this._activeElement=null,this._indicatorsElement=null},t._getConfig=function(t){return t=l({},W,t),Fn.typeCheckConfig(j,t,U),t},t._addEventListeners=function(){var e=this;this._config.keyboard&&P(this._element).on(Q.KEYDOWN,function(t){return e._keydown(t)}),"hover"===this._config.pause&&(P(this._element).on(Q.MOUSEENTER,function(t){return e.pause(t)}).on(Q.MOUSELEAVE,function(t){return e.cycle(t)}),"ontouchstart"in document.documentElement&&P(this._element).on(Q.TOUCHEND,function(){e.pause(),e.touchTimeout&&clearTimeout(e.touchTimeout),e.touchTimeout=setTimeout(function(t){return e.cycle(t)},500+e._config.interval)}))},t._keydown=function(t){if(!/input|textarea/i.test(t.target.tagName))switch(t.which){case 37:t.preventDefault(),this.prev();break;case 39:t.preventDefault(),this.next()}},t._getItemIndex=function(t){return this._items=t&&t.parentNode?[].slice.call(t.parentNode.querySelectorAll(tt)):[],this._items.indexOf(t)},t._getItemByDirection=function(t,e){var n=t===q,i=t===F,r=this._getItemIndex(e),o=this._items.length-1;if((i&&0===r||n&&r===o)&&!this._config.wrap)return e;var s=(r+(t===F?-1:1))%this._items.length;return-1===s?this._items[this._items.length-1]:this._items[s]},t._triggerSlideEvent=function(t,e){var n=this._getItemIndex(t),i=this._getItemIndex(this._element.querySelector(X)),r=P.Event(Q.SLIDE,{relatedTarget:t,direction:e,from:i,to:n});return P(this._element).trigger(r),r},t._setActiveIndicatorElement=function(t){if(this._indicatorsElement){var e=[].slice.call(this._indicatorsElement.querySelectorAll($));P(e).removeClass(V);var n=this._indicatorsElement.children[this._getItemIndex(t)];n&&P(n).addClass(V)}},t._slide=function(t,e){var n,i,r,o=this,s=this._element.querySelector(X),a=this._getItemIndex(s),l=e||s&&this._getItemByDirection(t,s),c=this._getItemIndex(l),h=Boolean(this._interval);if(t===q?(n=J,i=Z,r=K):(n=z,i=G,r=M),l&&P(l).hasClass(V))this._isSliding=!1;else if(!this._triggerSlideEvent(l,r).isDefaultPrevented()&&s&&l){this._isSliding=!0,h&&this.pause(),this._setActiveIndicatorElement(l);var u=P.Event(Q.SLID,{relatedTarget:l,direction:r,from:a,to:c});if(P(this._element).hasClass(Y)){P(l).addClass(i),Fn.reflow(l),P(s).addClass(n),P(l).addClass(n);var f=Fn.getTransitionDurationFromElement(s);P(s).one(Fn.TRANSITION_END,function(){P(l).removeClass(n+" "+i).addClass(V),P(s).removeClass(V+" "+i+" "+n),o._isSliding=!1,setTimeout(function(){return P(o._element).trigger(u)},0)}).emulateTransitionEnd(f)}else P(s).removeClass(V),P(l).addClass(V),this._isSliding=!1,P(this._element).trigger(u);h&&this.cycle()}},o._jQueryInterface=function(i){return this.each(function(){var t=P(this).data(H),e=l({},W,P(this).data());"object"==typeof i&&(e=l({},e,i));var n="string"==typeof i?i:e.slide;if(t||(t=new o(this,e),P(this).data(H,t)),"number"==typeof i)t.to(i);else if("string"==typeof n){if("undefined"==typeof t[n])throw new TypeError('No method named "'+n+'"');t[n]()}else e.interval&&(t.pause(),t.cycle())})},o._dataApiClickHandler=function(t){var e=Fn.getSelectorFromElement(this);if(e){var n=P(e)[0];if(n&&P(n).hasClass(B)){var i=l({},P(n).data(),P(this).data()),r=this.getAttribute("data-slide-to");r&&(i.interval=!1),o._jQueryInterface.call(P(n),i),r&&P(n).data(H).to(r),t.preventDefault()}}},s(o,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return W}}]),o}(),P(document).on(Q.CLICK_DATA_API,it,ot._dataApiClickHandler),P(window).on(Q.LOAD_DATA_API,function(){for(var t=[].slice.call(document.querySelectorAll(rt)),e=0,n=t.length;e<n;e++){var i=P(t[e]);ot._jQueryInterface.call(i,i.data())}}),P.fn[j]=ot._jQueryInterface,P.fn[j].Constructor=ot,P.fn[j].noConflict=function(){return P.fn[j]=x,ot._jQueryInterface},ot),Bn=(at="collapse",ct="."+(lt="bs.collapse"),ht=(st=e).fn[at],ut={toggle:!0,parent:""},ft={toggle:"boolean",parent:"(string|element)"},dt={SHOW:"show"+ct,SHOWN:"shown"+ct,HIDE:"hide"+ct,HIDDEN:"hidden"+ct,CLICK_DATA_API:"click"+ct+".data-api"},gt="show",_t="collapse",mt="collapsing",pt="collapsed",vt="width",yt="height",Et=".show, .collapsing",Ct='[data-toggle="collapse"]',Tt=function(){function a(e,t){this._isTransitioning=!1,this._element=e,this._config=this._getConfig(t),this._triggerArray=st.makeArray(document.querySelectorAll('[data-toggle="collapse"][href="#'+e.id+'"],[data-toggle="collapse"][data-target="#'+e.id+'"]'));for(var n=[].slice.call(document.querySelectorAll(Ct)),i=0,r=n.length;i<r;i++){var o=n[i],s=Fn.getSelectorFromElement(o),a=[].slice.call(document.querySelectorAll(s)).filter(function(t){return t===e});null!==s&&0<a.length&&(this._selector=s,this._triggerArray.push(o))}this._parent=this._config.parent?this._getParent():null,this._config.parent||this._addAriaAndCollapsedClass(this._element,this._triggerArray),this._config.toggle&&this.toggle()}var t=a.prototype;return t.toggle=function(){st(this._element).hasClass(gt)?this.hide():this.show()},t.show=function(){var t,e,n=this;if(!this._isTransitioning&&!st(this._element).hasClass(gt)&&(this._parent&&0===(t=[].slice.call(this._parent.querySelectorAll(Et)).filter(function(t){return t.getAttribute("data-parent")===n._config.parent})).length&&(t=null),!(t&&(e=st(t).not(this._selector).data(lt))&&e._isTransitioning))){var i=st.Event(dt.SHOW);if(st(this._element).trigger(i),!i.isDefaultPrevented()){t&&(a._jQueryInterface.call(st(t).not(this._selector),"hide"),e||st(t).data(lt,null));var r=this._getDimension();st(this._element).removeClass(_t).addClass(mt),this._element.style[r]=0,this._triggerArray.length&&st(this._triggerArray).removeClass(pt).attr("aria-expanded",!0),this.setTransitioning(!0);var o="scroll"+(r[0].toUpperCase()+r.slice(1)),s=Fn.getTransitionDurationFromElement(this._element);st(this._element).one(Fn.TRANSITION_END,function(){st(n._element).removeClass(mt).addClass(_t).addClass(gt),n._element.style[r]="",n.setTransitioning(!1),st(n._element).trigger(dt.SHOWN)}).emulateTransitionEnd(s),this._element.style[r]=this._element[o]+"px"}}},t.hide=function(){var t=this;if(!this._isTransitioning&&st(this._element).hasClass(gt)){var e=st.Event(dt.HIDE);if(st(this._element).trigger(e),!e.isDefaultPrevented()){var n=this._getDimension();this._element.style[n]=this._element.getBoundingClientRect()[n]+"px",Fn.reflow(this._element),st(this._element).addClass(mt).removeClass(_t).removeClass(gt);var i=this._triggerArray.length;if(0<i)for(var r=0;r<i;r++){var o=this._triggerArray[r],s=Fn.getSelectorFromElement(o);if(null!==s)st([].slice.call(document.querySelectorAll(s))).hasClass(gt)||st(o).addClass(pt).attr("aria-expanded",!1)}this.setTransitioning(!0);this._element.style[n]="";var a=Fn.getTransitionDurationFromElement(this._element);st(this._element).one(Fn.TRANSITION_END,function(){t.setTransitioning(!1),st(t._element).removeClass(mt).addClass(_t).trigger(dt.HIDDEN)}).emulateTransitionEnd(a)}}},t.setTransitioning=function(t){this._isTransitioning=t},t.dispose=function(){st.removeData(this._element,lt),this._config=null,this._parent=null,this._element=null,this._triggerArray=null,this._isTransitioning=null},t._getConfig=function(t){return(t=l({},ut,t)).toggle=Boolean(t.toggle),Fn.typeCheckConfig(at,t,ft),t},t._getDimension=function(){return st(this._element).hasClass(vt)?vt:yt},t._getParent=function(){var n=this,t=null;Fn.isElement(this._config.parent)?(t=this._config.parent,"undefined"!=typeof this._config.parent.jquery&&(t=this._config.parent[0])):t=document.querySelector(this._config.parent);var e='[data-toggle="collapse"][data-parent="'+this._config.parent+'"]',i=[].slice.call(t.querySelectorAll(e));return st(i).each(function(t,e){n._addAriaAndCollapsedClass(a._getTargetFromElement(e),[e])}),t},t._addAriaAndCollapsedClass=function(t,e){if(t){var n=st(t).hasClass(gt);e.length&&st(e).toggleClass(pt,!n).attr("aria-expanded",n)}},a._getTargetFromElement=function(t){var e=Fn.getSelectorFromElement(t);return e?document.querySelector(e):null},a._jQueryInterface=function(i){return this.each(function(){var t=st(this),e=t.data(lt),n=l({},ut,t.data(),"object"==typeof i&&i?i:{});if(!e&&n.toggle&&/show|hide/.test(i)&&(n.toggle=!1),e||(e=new a(this,n),t.data(lt,e)),"string"==typeof i){if("undefined"==typeof e[i])throw new TypeError('No method named "'+i+'"');e[i]()}})},s(a,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return ut}}]),a}(),st(document).on(dt.CLICK_DATA_API,Ct,function(t){"A"===t.currentTarget.tagName&&t.preventDefault();var n=st(this),e=Fn.getSelectorFromElement(this),i=[].slice.call(document.querySelectorAll(e));st(i).each(function(){var t=st(this),e=t.data(lt)?"toggle":n.data();Tt._jQueryInterface.call(t,e)})}),st.fn[at]=Tt._jQueryInterface,st.fn[at].Constructor=Tt,st.fn[at].noConflict=function(){return st.fn[at]=ht,Tt._jQueryInterface},Tt),Vn=(St="dropdown",At="."+(It="bs.dropdown"),Dt=".data-api",wt=(bt=e).fn[St],Nt=new RegExp("38|40|27"),Ot={HIDE:"hide"+At,HIDDEN:"hidden"+At,SHOW:"show"+At,SHOWN:"shown"+At,CLICK:"click"+At,CLICK_DATA_API:"click"+At+Dt,KEYDOWN_DATA_API:"keydown"+At+Dt,KEYUP_DATA_API:"keyup"+At+Dt},kt="disabled",Pt="show",jt="dropup",Ht="dropright",Lt="dropleft",Rt="dropdown-menu-right",xt="position-static",Wt='[data-toggle="dropdown"]',Ut=".dropdown form",qt=".dropdown-menu",Ft=".navbar-nav",Kt=".dropdown-menu .dropdown-item:not(.disabled):not(:disabled)",Mt="top-start",Qt="top-end",Bt="bottom-start",Vt="bottom-end",Yt="right-start",zt="left-start",Jt={offset:0,flip:!0,boundary:"scrollParent",reference:"toggle",display:"dynamic"},Zt={offset:"(number|string|function)",flip:"boolean",boundary:"(string|element)",reference:"(string|element)",display:"string"},Gt=function(){function c(t,e){this._element=t,this._popper=null,this._config=this._getConfig(e),this._menu=this._getMenuElement(),this._inNavbar=this._detectNavbar(),this._addEventListeners()}var t=c.prototype;return t.toggle=function(){if(!this._element.disabled&&!bt(this._element).hasClass(kt)){var t=c._getParentFromElement(this._element),e=bt(this._menu).hasClass(Pt);if(c._clearMenus(),!e){var n={relatedTarget:this._element},i=bt.Event(Ot.SHOW,n);if(bt(t).trigger(i),!i.isDefaultPrevented()){if(!this._inNavbar){if("undefined"==typeof h)throw new TypeError("Bootstrap dropdown require Popper.js (https://popper.js.org)");var r=this._element;"parent"===this._config.reference?r=t:Fn.isElement(this._config.reference)&&(r=this._config.reference,"undefined"!=typeof this._config.reference.jquery&&(r=this._config.reference[0])),"scrollParent"!==this._config.boundary&&bt(t).addClass(xt),this._popper=new h(r,this._menu,this._getPopperConfig())}"ontouchstart"in document.documentElement&&0===bt(t).closest(Ft).length&&bt(document.body).children().on("mouseover",null,bt.noop),this._element.focus(),this._element.setAttribute("aria-expanded",!0),bt(this._menu).toggleClass(Pt),bt(t).toggleClass(Pt).trigger(bt.Event(Ot.SHOWN,n))}}}},t.dispose=function(){bt.removeData(this._element,It),bt(this._element).off(At),this._element=null,(this._menu=null)!==this._popper&&(this._popper.destroy(),this._popper=null)},t.update=function(){this._inNavbar=this._detectNavbar(),null!==this._popper&&this._popper.scheduleUpdate()},t._addEventListeners=function(){var e=this;bt(this._element).on(Ot.CLICK,function(t){t.preventDefault(),t.stopPropagation(),e.toggle()})},t._getConfig=function(t){return t=l({},this.constructor.Default,bt(this._element).data(),t),Fn.typeCheckConfig(St,t,this.constructor.DefaultType),t},t._getMenuElement=function(){if(!this._menu){var t=c._getParentFromElement(this._element);t&&(this._menu=t.querySelector(qt))}return this._menu},t._getPlacement=function(){var t=bt(this._element.parentNode),e=Bt;return t.hasClass(jt)?(e=Mt,bt(this._menu).hasClass(Rt)&&(e=Qt)):t.hasClass(Ht)?e=Yt:t.hasClass(Lt)?e=zt:bt(this._menu).hasClass(Rt)&&(e=Vt),e},t._detectNavbar=function(){return 0<bt(this._element).closest(".navbar").length},t._getPopperConfig=function(){var e=this,t={};"function"==typeof this._config.offset?t.fn=function(t){return t.offsets=l({},t.offsets,e._config.offset(t.offsets)||{}),t}:t.offset=this._config.offset;var n={placement:this._getPlacement(),modifiers:{offset:t,flip:{enabled:this._config.flip},preventOverflow:{boundariesElement:this._config.boundary}}};return"static"===this._config.display&&(n.modifiers.applyStyle={enabled:!1}),n},c._jQueryInterface=function(e){return this.each(function(){var t=bt(this).data(It);if(t||(t=new c(this,"object"==typeof e?e:null),bt(this).data(It,t)),"string"==typeof e){if("undefined"==typeof t[e])throw new TypeError('No method named "'+e+'"');t[e]()}})},c._clearMenus=function(t){if(!t||3!==t.which&&("keyup"!==t.type||9===t.which))for(var e=[].slice.call(document.querySelectorAll(Wt)),n=0,i=e.length;n<i;n++){var r=c._getParentFromElement(e[n]),o=bt(e[n]).data(It),s={relatedTarget:e[n]};if(t&&"click"===t.type&&(s.clickEvent=t),o){var a=o._menu;if(bt(r).hasClass(Pt)&&!(t&&("click"===t.type&&/input|textarea/i.test(t.target.tagName)||"keyup"===t.type&&9===t.which)&&bt.contains(r,t.target))){var l=bt.Event(Ot.HIDE,s);bt(r).trigger(l),l.isDefaultPrevented()||("ontouchstart"in document.documentElement&&bt(document.body).children().off("mouseover",null,bt.noop),e[n].setAttribute("aria-expanded","false"),bt(a).removeClass(Pt),bt(r).removeClass(Pt).trigger(bt.Event(Ot.HIDDEN,s)))}}}},c._getParentFromElement=function(t){var e,n=Fn.getSelectorFromElement(t);return n&&(e=document.querySelector(n)),e||t.parentNode},c._dataApiKeydownHandler=function(t){if((/input|textarea/i.test(t.target.tagName)?!(32===t.which||27!==t.which&&(40!==t.which&&38!==t.which||bt(t.target).closest(qt).length)):Nt.test(t.which))&&(t.preventDefault(),t.stopPropagation(),!this.disabled&&!bt(this).hasClass(kt))){var e=c._getParentFromElement(this),n=bt(e).hasClass(Pt);if((n||27===t.which&&32===t.which)&&(!n||27!==t.which&&32!==t.which)){var i=[].slice.call(e.querySelectorAll(Kt));if(0!==i.length){var r=i.indexOf(t.target);38===t.which&&0<r&&r--,40===t.which&&r<i.length-1&&r++,r<0&&(r=0),i[r].focus()}}else{if(27===t.which){var o=e.querySelector(Wt);bt(o).trigger("focus")}bt(this).trigger("click")}}},s(c,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return Jt}},{key:"DefaultType",get:function(){return Zt}}]),c}(),bt(document).on(Ot.KEYDOWN_DATA_API,Wt,Gt._dataApiKeydownHandler).on(Ot.KEYDOWN_DATA_API,qt,Gt._dataApiKeydownHandler).on(Ot.CLICK_DATA_API+" "+Ot.KEYUP_DATA_API,Gt._clearMenus).on(Ot.CLICK_DATA_API,Wt,function(t){t.preventDefault(),t.stopPropagation(),Gt._jQueryInterface.call(bt(this),"toggle")}).on(Ot.CLICK_DATA_API,Ut,function(t){t.stopPropagation()}),bt.fn[St]=Gt._jQueryInterface,bt.fn[St].Constructor=Gt,bt.fn[St].noConflict=function(){return bt.fn[St]=wt,Gt._jQueryInterface},Gt),Yn=(Xt="modal",ee="."+(te="bs.modal"),ne=($t=e).fn[Xt],ie={backdrop:!0,keyboard:!0,focus:!0,show:!0},re={backdrop:"(boolean|string)",keyboard:"boolean",focus:"boolean",show:"boolean"},oe={HIDE:"hide"+ee,HIDDEN:"hidden"+ee,SHOW:"show"+ee,SHOWN:"shown"+ee,FOCUSIN:"focusin"+ee,RESIZE:"resize"+ee,CLICK_DISMISS:"click.dismiss"+ee,KEYDOWN_DISMISS:"keydown.dismiss"+ee,MOUSEUP_DISMISS:"mouseup.dismiss"+ee,MOUSEDOWN_DISMISS:"mousedown.dismiss"+ee,CLICK_DATA_API:"click"+ee+".data-api"},se="modal-scrollbar-measure",ae="modal-backdrop",le="modal-open",ce="fade",he="show",ue=".modal-dialog",fe='[data-toggle="modal"]',de='[data-dismiss="modal"]',ge=".fixed-top, .fixed-bottom, .is-fixed, .sticky-top",_e=".sticky-top",me=function(){function r(t,e){this._config=this._getConfig(e),this._element=t,this._dialog=t.querySelector(ue),this._backdrop=null,this._isShown=!1,this._isBodyOverflowing=!1,this._ignoreBackdropClick=!1,this._scrollbarWidth=0}var t=r.prototype;return t.toggle=function(t){return this._isShown?this.hide():this.show(t)},t.show=function(t){var e=this;if(!this._isTransitioning&&!this._isShown){$t(this._element).hasClass(ce)&&(this._isTransitioning=!0);var n=$t.Event(oe.SHOW,{relatedTarget:t});$t(this._element).trigger(n),this._isShown||n.isDefaultPrevented()||(this._isShown=!0,this._checkScrollbar(),this._setScrollbar(),this._adjustDialog(),$t(document.body).addClass(le),this._setEscapeEvent(),this._setResizeEvent(),$t(this._element).on(oe.CLICK_DISMISS,de,function(t){return e.hide(t)}),$t(this._dialog).on(oe.MOUSEDOWN_DISMISS,function(){$t(e._element).one(oe.MOUSEUP_DISMISS,function(t){$t(t.target).is(e._element)&&(e._ignoreBackdropClick=!0)})}),this._showBackdrop(function(){return e._showElement(t)}))}},t.hide=function(t){var e=this;if(t&&t.preventDefault(),!this._isTransitioning&&this._isShown){var n=$t.Event(oe.HIDE);if($t(this._element).trigger(n),this._isShown&&!n.isDefaultPrevented()){this._isShown=!1;var i=$t(this._element).hasClass(ce);if(i&&(this._isTransitioning=!0),this._setEscapeEvent(),this._setResizeEvent(),$t(document).off(oe.FOCUSIN),$t(this._element).removeClass(he),$t(this._element).off(oe.CLICK_DISMISS),$t(this._dialog).off(oe.MOUSEDOWN_DISMISS),i){var r=Fn.getTransitionDurationFromElement(this._element);$t(this._element).one(Fn.TRANSITION_END,function(t){return e._hideModal(t)}).emulateTransitionEnd(r)}else this._hideModal()}}},t.dispose=function(){$t.removeData(this._element,te),$t(window,document,this._element,this._backdrop).off(ee),this._config=null,this._element=null,this._dialog=null,this._backdrop=null,this._isShown=null,this._isBodyOverflowing=null,this._ignoreBackdropClick=null,this._scrollbarWidth=null},t.handleUpdate=function(){this._adjustDialog()},t._getConfig=function(t){return t=l({},ie,t),Fn.typeCheckConfig(Xt,t,re),t},t._showElement=function(t){var e=this,n=$t(this._element).hasClass(ce);this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE||document.body.appendChild(this._element),this._element.style.display="block",this._element.removeAttribute("aria-hidden"),this._element.scrollTop=0,n&&Fn.reflow(this._element),$t(this._element).addClass(he),this._config.focus&&this._enforceFocus();var i=$t.Event(oe.SHOWN,{relatedTarget:t}),r=function(){e._config.focus&&e._element.focus(),e._isTransitioning=!1,$t(e._element).trigger(i)};if(n){var o=Fn.getTransitionDurationFromElement(this._element);$t(this._dialog).one(Fn.TRANSITION_END,r).emulateTransitionEnd(o)}else r()},t._enforceFocus=function(){var e=this;$t(document).off(oe.FOCUSIN).on(oe.FOCUSIN,function(t){document!==t.target&&e._element!==t.target&&0===$t(e._element).has(t.target).length&&e._element.focus()})},t._setEscapeEvent=function(){var e=this;this._isShown&&this._config.keyboard?$t(this._element).on(oe.KEYDOWN_DISMISS,function(t){27===t.which&&(t.preventDefault(),e.hide())}):this._isShown||$t(this._element).off(oe.KEYDOWN_DISMISS)},t._setResizeEvent=function(){var e=this;this._isShown?$t(window).on(oe.RESIZE,function(t){return e.handleUpdate(t)}):$t(window).off(oe.RESIZE)},t._hideModal=function(){var t=this;this._element.style.display="none",this._element.setAttribute("aria-hidden",!0),this._isTransitioning=!1,this._showBackdrop(function(){$t(document.body).removeClass(le),t._resetAdjustments(),t._resetScrollbar(),$t(t._element).trigger(oe.HIDDEN)})},t._removeBackdrop=function(){this._backdrop&&($t(this._backdrop).remove(),this._backdrop=null)},t._showBackdrop=function(t){var e=this,n=$t(this._element).hasClass(ce)?ce:"";if(this._isShown&&this._config.backdrop){if(this._backdrop=document.createElement("div"),this._backdrop.className=ae,n&&this._backdrop.classList.add(n),$t(this._backdrop).appendTo(document.body),$t(this._element).on(oe.CLICK_DISMISS,function(t){e._ignoreBackdropClick?e._ignoreBackdropClick=!1:t.target===t.currentTarget&&("static"===e._config.backdrop?e._element.focus():e.hide())}),n&&Fn.reflow(this._backdrop),$t(this._backdrop).addClass(he),!t)return;if(!n)return void t();var i=Fn.getTransitionDurationFromElement(this._backdrop);$t(this._backdrop).one(Fn.TRANSITION_END,t).emulateTransitionEnd(i)}else if(!this._isShown&&this._backdrop){$t(this._backdrop).removeClass(he);var r=function(){e._removeBackdrop(),t&&t()};if($t(this._element).hasClass(ce)){var o=Fn.getTransitionDurationFromElement(this._backdrop);$t(this._backdrop).one(Fn.TRANSITION_END,r).emulateTransitionEnd(o)}else r()}else t&&t()},t._adjustDialog=function(){var t=this._element.scrollHeight>document.documentElement.clientHeight;!this._isBodyOverflowing&&t&&(this._element.style.paddingLeft=this._scrollbarWidth+"px"),this._isBodyOverflowing&&!t&&(this._element.style.paddingRight=this._scrollbarWidth+"px")},t._resetAdjustments=function(){this._element.style.paddingLeft="",this._element.style.paddingRight=""},t._checkScrollbar=function(){var t=document.body.getBoundingClientRect();this._isBodyOverflowing=t.left+t.right<window.innerWidth,this._scrollbarWidth=this._getScrollbarWidth()},t._setScrollbar=function(){var r=this;if(this._isBodyOverflowing){var t=[].slice.call(document.querySelectorAll(ge)),e=[].slice.call(document.querySelectorAll(_e));$t(t).each(function(t,e){var n=e.style.paddingRight,i=$t(e).css("padding-right");$t(e).data("padding-right",n).css("padding-right",parseFloat(i)+r._scrollbarWidth+"px")}),$t(e).each(function(t,e){var n=e.style.marginRight,i=$t(e).css("margin-right");$t(e).data("margin-right",n).css("margin-right",parseFloat(i)-r._scrollbarWidth+"px")});var n=document.body.style.paddingRight,i=$t(document.body).css("padding-right");$t(document.body).data("padding-right",n).css("padding-right",parseFloat(i)+this._scrollbarWidth+"px")}},t._resetScrollbar=function(){var t=[].slice.call(document.querySelectorAll(ge));$t(t).each(function(t,e){var n=$t(e).data("padding-right");$t(e).removeData("padding-right"),e.style.paddingRight=n||""});var e=[].slice.call(document.querySelectorAll(""+_e));$t(e).each(function(t,e){var n=$t(e).data("margin-right");"undefined"!=typeof n&&$t(e).css("margin-right",n).removeData("margin-right")});var n=$t(document.body).data("padding-right");$t(document.body).removeData("padding-right"),document.body.style.paddingRight=n||""},t._getScrollbarWidth=function(){var t=document.createElement("div");t.className=se,document.body.appendChild(t);var e=t.getBoundingClientRect().width-t.clientWidth;return document.body.removeChild(t),e},r._jQueryInterface=function(n,i){return this.each(function(){var t=$t(this).data(te),e=l({},ie,$t(this).data(),"object"==typeof n&&n?n:{});if(t||(t=new r(this,e),$t(this).data(te,t)),"string"==typeof n){if("undefined"==typeof t[n])throw new TypeError('No method named "'+n+'"');t[n](i)}else e.show&&t.show(i)})},s(r,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return ie}}]),r}(),$t(document).on(oe.CLICK_DATA_API,fe,function(t){var e,n=this,i=Fn.getSelectorFromElement(this);i&&(e=document.querySelector(i));var r=$t(e).data(te)?"toggle":l({},$t(e).data(),$t(this).data());"A"!==this.tagName&&"AREA"!==this.tagName||t.preventDefault();var o=$t(e).one(oe.SHOW,function(t){t.isDefaultPrevented()||o.one(oe.HIDDEN,function(){$t(n).is(":visible")&&n.focus()})});me._jQueryInterface.call($t(e),r,this)}),$t.fn[Xt]=me._jQueryInterface,$t.fn[Xt].Constructor=me,$t.fn[Xt].noConflict=function(){return $t.fn[Xt]=ne,me._jQueryInterface},me),zn=(ve="tooltip",Ee="."+(ye="bs.tooltip"),Ce=(pe=e).fn[ve],Te="bs-tooltip",be=new RegExp("(^|\\s)"+Te+"\\S+","g"),Ae={animation:!0,template:'<div class="tooltip" role="tooltip"><div class="arrow"></div><div class="tooltip-inner"></div></div>',trigger:"hover focus",title:"",delay:0,html:!(Ie={AUTO:"auto",TOP:"top",RIGHT:"right",BOTTOM:"bottom",LEFT:"left"}),selector:!(Se={animation:"boolean",template:"string",title:"(string|element|function)",trigger:"string",delay:"(number|object)",html:"boolean",selector:"(string|boolean)",placement:"(string|function)",offset:"(number|string)",container:"(string|element|boolean)",fallbackPlacement:"(string|array)",boundary:"(string|element)"}),placement:"top",offset:0,container:!1,fallbackPlacement:"flip",boundary:"scrollParent"},we="out",Ne={HIDE:"hide"+Ee,HIDDEN:"hidden"+Ee,SHOW:(De="show")+Ee,SHOWN:"shown"+Ee,INSERTED:"inserted"+Ee,CLICK:"click"+Ee,FOCUSIN:"focusin"+Ee,FOCUSOUT:"focusout"+Ee,MOUSEENTER:"mouseenter"+Ee,MOUSELEAVE:"mouseleave"+Ee},Oe="fade",ke="show",Pe=".tooltip-inner",je=".arrow",He="hover",Le="focus",Re="click",xe="manual",We=function(){function i(t,e){if("undefined"==typeof h)throw new TypeError("Bootstrap tooltips require Popper.js (https://popper.js.org)");this._isEnabled=!0,this._timeout=0,this._hoverState="",this._activeTrigger={},this._popper=null,this.element=t,this.config=this._getConfig(e),this.tip=null,this._setListeners()}var t=i.prototype;return t.enable=function(){this._isEnabled=!0},t.disable=function(){this._isEnabled=!1},t.toggleEnabled=function(){this._isEnabled=!this._isEnabled},t.toggle=function(t){if(this._isEnabled)if(t){var e=this.constructor.DATA_KEY,n=pe(t.currentTarget).data(e);n||(n=new this.constructor(t.currentTarget,this._getDelegateConfig()),pe(t.currentTarget).data(e,n)),n._activeTrigger.click=!n._activeTrigger.click,n._isWithActiveTrigger()?n._enter(null,n):n._leave(null,n)}else{if(pe(this.getTipElement()).hasClass(ke))return void this._leave(null,this);this._enter(null,this)}},t.dispose=function(){clearTimeout(this._timeout),pe.removeData(this.element,this.constructor.DATA_KEY),pe(this.element).off(this.constructor.EVENT_KEY),pe(this.element).closest(".modal").off("hide.bs.modal"),this.tip&&pe(this.tip).remove(),this._isEnabled=null,this._timeout=null,this._hoverState=null,(this._activeTrigger=null)!==this._popper&&this._popper.destroy(),this._popper=null,this.element=null,this.config=null,this.tip=null},t.show=function(){var e=this;if("none"===pe(this.element).css("display"))throw new Error("Please use show on visible elements");var t=pe.Event(this.constructor.Event.SHOW);if(this.isWithContent()&&this._isEnabled){pe(this.element).trigger(t);var n=pe.contains(this.element.ownerDocument.documentElement,this.element);if(t.isDefaultPrevented()||!n)return;var i=this.getTipElement(),r=Fn.getUID(this.constructor.NAME);i.setAttribute("id",r),this.element.setAttribute("aria-describedby",r),this.setContent(),this.config.animation&&pe(i).addClass(Oe);var o="function"==typeof this.config.placement?this.config.placement.call(this,i,this.element):this.config.placement,s=this._getAttachment(o);this.addAttachmentClass(s);var a=!1===this.config.container?document.body:pe(document).find(this.config.container);pe(i).data(this.constructor.DATA_KEY,this),pe.contains(this.element.ownerDocument.documentElement,this.tip)||pe(i).appendTo(a),pe(this.element).trigger(this.constructor.Event.INSERTED),this._popper=new h(this.element,i,{placement:s,modifiers:{offset:{offset:this.config.offset},flip:{behavior:this.config.fallbackPlacement},arrow:{element:je},preventOverflow:{boundariesElement:this.config.boundary}},onCreate:function(t){t.originalPlacement!==t.placement&&e._handlePopperPlacementChange(t)},onUpdate:function(t){e._handlePopperPlacementChange(t)}}),pe(i).addClass(ke),"ontouchstart"in document.documentElement&&pe(document.body).children().on("mouseover",null,pe.noop);var l=function(){e.config.animation&&e._fixTransition();var t=e._hoverState;e._hoverState=null,pe(e.element).trigger(e.constructor.Event.SHOWN),t===we&&e._leave(null,e)};if(pe(this.tip).hasClass(Oe)){var c=Fn.getTransitionDurationFromElement(this.tip);pe(this.tip).one(Fn.TRANSITION_END,l).emulateTransitionEnd(c)}else l()}},t.hide=function(t){var e=this,n=this.getTipElement(),i=pe.Event(this.constructor.Event.HIDE),r=function(){e._hoverState!==De&&n.parentNode&&n.parentNode.removeChild(n),e._cleanTipClass(),e.element.removeAttribute("aria-describedby"),pe(e.element).trigger(e.constructor.Event.HIDDEN),null!==e._popper&&e._popper.destroy(),t&&t()};if(pe(this.element).trigger(i),!i.isDefaultPrevented()){if(pe(n).removeClass(ke),"ontouchstart"in document.documentElement&&pe(document.body).children().off("mouseover",null,pe.noop),this._activeTrigger[Re]=!1,this._activeTrigger[Le]=!1,this._activeTrigger[He]=!1,pe(this.tip).hasClass(Oe)){var o=Fn.getTransitionDurationFromElement(n);pe(n).one(Fn.TRANSITION_END,r).emulateTransitionEnd(o)}else r();this._hoverState=""}},t.update=function(){null!==this._popper&&this._popper.scheduleUpdate()},t.isWithContent=function(){return Boolean(this.getTitle())},t.addAttachmentClass=function(t){pe(this.getTipElement()).addClass(Te+"-"+t)},t.getTipElement=function(){return this.tip=this.tip||pe(this.config.template)[0],this.tip},t.setContent=function(){var t=this.getTipElement();this.setElementContent(pe(t.querySelectorAll(Pe)),this.getTitle()),pe(t).removeClass(Oe+" "+ke)},t.setElementContent=function(t,e){var n=this.config.html;"object"==typeof e&&(e.nodeType||e.jquery)?n?pe(e).parent().is(t)||t.empty().append(e):t.text(pe(e).text()):t[n?"html":"text"](e)},t.getTitle=function(){var t=this.element.getAttribute("data-original-title");return t||(t="function"==typeof this.config.title?this.config.title.call(this.element):this.config.title),t},t._getAttachment=function(t){return Ie[t.toUpperCase()]},t._setListeners=function(){var i=this;this.config.trigger.split(" ").forEach(function(t){if("click"===t)pe(i.element).on(i.constructor.Event.CLICK,i.config.selector,function(t){return i.toggle(t)});else if(t!==xe){var e=t===He?i.constructor.Event.MOUSEENTER:i.constructor.Event.FOCUSIN,n=t===He?i.constructor.Event.MOUSELEAVE:i.constructor.Event.FOCUSOUT;pe(i.element).on(e,i.config.selector,function(t){return i._enter(t)}).on(n,i.config.selector,function(t){return i._leave(t)})}pe(i.element).closest(".modal").on("hide.bs.modal",function(){return i.hide()})}),this.config.selector?this.config=l({},this.config,{trigger:"manual",selector:""}):this._fixTitle()},t._fixTitle=function(){var t=typeof this.element.getAttribute("data-original-title");(this.element.getAttribute("title")||"string"!==t)&&(this.element.setAttribute("data-original-title",this.element.getAttribute("title")||""),this.element.setAttribute("title",""))},t._enter=function(t,e){var n=this.constructor.DATA_KEY;(e=e||pe(t.currentTarget).data(n))||(e=new this.constructor(t.currentTarget,this._getDelegateConfig()),pe(t.currentTarget).data(n,e)),t&&(e._activeTrigger["focusin"===t.type?Le:He]=!0),pe(e.getTipElement()).hasClass(ke)||e._hoverState===De?e._hoverState=De:(clearTimeout(e._timeout),e._hoverState=De,e.config.delay&&e.config.delay.show?e._timeout=setTimeout(function(){e._hoverState===De&&e.show()},e.config.delay.show):e.show())},t._leave=function(t,e){var n=this.constructor.DATA_KEY;(e=e||pe(t.currentTarget).data(n))||(e=new this.constructor(t.currentTarget,this._getDelegateConfig()),pe(t.currentTarget).data(n,e)),t&&(e._activeTrigger["focusout"===t.type?Le:He]=!1),e._isWithActiveTrigger()||(clearTimeout(e._timeout),e._hoverState=we,e.config.delay&&e.config.delay.hide?e._timeout=setTimeout(function(){e._hoverState===we&&e.hide()},e.config.delay.hide):e.hide())},t._isWithActiveTrigger=function(){for(var t in this._activeTrigger)if(this._activeTrigger[t])return!0;return!1},t._getConfig=function(t){return"number"==typeof(t=l({},this.constructor.Default,pe(this.element).data(),"object"==typeof t&&t?t:{})).delay&&(t.delay={show:t.delay,hide:t.delay}),"number"==typeof t.title&&(t.title=t.title.toString()),"number"==typeof t.content&&(t.content=t.content.toString()),Fn.typeCheckConfig(ve,t,this.constructor.DefaultType),t},t._getDelegateConfig=function(){var t={};if(this.config)for(var e in this.config)this.constructor.Default[e]!==this.config[e]&&(t[e]=this.config[e]);return t},t._cleanTipClass=function(){var t=pe(this.getTipElement()),e=t.attr("class").match(be);null!==e&&e.length&&t.removeClass(e.join(""))},t._handlePopperPlacementChange=function(t){var e=t.instance;this.tip=e.popper,this._cleanTipClass(),this.addAttachmentClass(this._getAttachment(t.placement))},t._fixTransition=function(){var t=this.getTipElement(),e=this.config.animation;null===t.getAttribute("x-placement")&&(pe(t).removeClass(Oe),this.config.animation=!1,this.hide(),this.show(),this.config.animation=e)},i._jQueryInterface=function(n){return this.each(function(){var t=pe(this).data(ye),e="object"==typeof n&&n;if((t||!/dispose|hide/.test(n))&&(t||(t=new i(this,e),pe(this).data(ye,t)),"string"==typeof n)){if("undefined"==typeof t[n])throw new TypeError('No method named "'+n+'"');t[n]()}})},s(i,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return Ae}},{key:"NAME",get:function(){return ve}},{key:"DATA_KEY",get:function(){return ye}},{key:"Event",get:function(){return Ne}},{key:"EVENT_KEY",get:function(){return Ee}},{key:"DefaultType",get:function(){return Se}}]),i}(),pe.fn[ve]=We._jQueryInterface,pe.fn[ve].Constructor=We,pe.fn[ve].noConflict=function(){return pe.fn[ve]=Ce,We._jQueryInterface},We),Jn=(qe="popover",Ke="."+(Fe="bs.popover"),Me=(Ue=e).fn[qe],Qe="bs-popover",Be=new RegExp("(^|\\s)"+Qe+"\\S+","g"),Ve=l({},zn.Default,{placement:"right",trigger:"click",content:"",template:'<div class="popover" role="tooltip"><div class="arrow"></div><h3 class="popover-header"></h3><div class="popover-body"></div></div>'}),Ye=l({},zn.DefaultType,{content:"(string|element|function)"}),ze="fade",Ze=".popover-header",Ge=".popover-body",$e={HIDE:"hide"+Ke,HIDDEN:"hidden"+Ke,SHOW:(Je="show")+Ke,SHOWN:"shown"+Ke,INSERTED:"inserted"+Ke,CLICK:"click"+Ke,FOCUSIN:"focusin"+Ke,FOCUSOUT:"focusout"+Ke,MOUSEENTER:"mouseenter"+Ke,MOUSELEAVE:"mouseleave"+Ke},Xe=function(t){var e,n;function i(){return t.apply(this,arguments)||this}n=t,(e=i).prototype=Object.create(n.prototype),(e.prototype.constructor=e).__proto__=n;var r=i.prototype;return r.isWithContent=function(){return this.getTitle()||this._getContent()},r.addAttachmentClass=function(t){Ue(this.getTipElement()).addClass(Qe+"-"+t)},r.getTipElement=function(){return this.tip=this.tip||Ue(this.config.template)[0],this.tip},r.setContent=function(){var t=Ue(this.getTipElement());this.setElementContent(t.find(Ze),this.getTitle());var e=this._getContent();"function"==typeof e&&(e=e.call(this.element)),this.setElementContent(t.find(Ge),e),t.removeClass(ze+" "+Je)},r._getContent=function(){return this.element.getAttribute("data-content")||this.config.content},r._cleanTipClass=function(){var t=Ue(this.getTipElement()),e=t.attr("class").match(Be);null!==e&&0<e.length&&t.removeClass(e.join(""))},i._jQueryInterface=function(n){return this.each(function(){var t=Ue(this).data(Fe),e="object"==typeof n?n:null;if((t||!/destroy|hide/.test(n))&&(t||(t=new i(this,e),Ue(this).data(Fe,t)),"string"==typeof n)){if("undefined"==typeof t[n])throw new TypeError('No method named "'+n+'"');t[n]()}})},s(i,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return Ve}},{key:"NAME",get:function(){return qe}},{key:"DATA_KEY",get:function(){return Fe}},{key:"Event",get:function(){return $e}},{key:"EVENT_KEY",get:function(){return Ke}},{key:"DefaultType",get:function(){return Ye}}]),i}(zn),Ue.fn[qe]=Xe._jQueryInterface,Ue.fn[qe].Constructor=Xe,Ue.fn[qe].noConflict=function(){return Ue.fn[qe]=Me,Xe._jQueryInterface},Xe),Zn=(en="scrollspy",rn="."+(nn="bs.scrollspy"),on=(tn=e).fn[en],sn={offset:10,method:"auto",target:""},an={offset:"number",method:"string",target:"(string|element)"},ln={ACTIVATE:"activate"+rn,SCROLL:"scroll"+rn,LOAD_DATA_API:"load"+rn+".data-api"},cn="dropdown-item",hn="active",un='[data-spy="scroll"]',fn=".active",dn=".nav, .list-group",gn=".nav-link",_n=".nav-item",mn=".list-group-item",pn=".dropdown",vn=".dropdown-item",yn=".dropdown-toggle",En="offset",Cn="position",Tn=function(){function n(t,e){var n=this;this._element=t,this._scrollElement="BODY"===t.tagName?window:t,this._config=this._getConfig(e),this._selector=this._config.target+" "+gn+","+this._config.target+" "+mn+","+this._config.target+" "+vn,this._offsets=[],this._targets=[],this._activeTarget=null,this._scrollHeight=0,tn(this._scrollElement).on(ln.SCROLL,function(t){return n._process(t)}),this.refresh(),this._process()}var t=n.prototype;return t.refresh=function(){var e=this,t=this._scrollElement===this._scrollElement.window?En:Cn,r="auto"===this._config.method?t:this._config.method,o=r===Cn?this._getScrollTop():0;this._offsets=[],this._targets=[],this._scrollHeight=this._getScrollHeight(),[].slice.call(document.querySelectorAll(this._selector)).map(function(t){var e,n=Fn.getSelectorFromElement(t);if(n&&(e=document.querySelector(n)),e){var i=e.getBoundingClientRect();if(i.width||i.height)return[tn(e)[r]().top+o,n]}return null}).filter(function(t){return t}).sort(function(t,e){return t[0]-e[0]}).forEach(function(t){e._offsets.push(t[0]),e._targets.push(t[1])})},t.dispose=function(){tn.removeData(this._element,nn),tn(this._scrollElement).off(rn),this._element=null,this._scrollElement=null,this._config=null,this._selector=null,this._offsets=null,this._targets=null,this._activeTarget=null,this._scrollHeight=null},t._getConfig=function(t){if("string"!=typeof(t=l({},sn,"object"==typeof t&&t?t:{})).target){var e=tn(t.target).attr("id");e||(e=Fn.getUID(en),tn(t.target).attr("id",e)),t.target="#"+e}return Fn.typeCheckConfig(en,t,an),t},t._getScrollTop=function(){return this._scrollElement===window?this._scrollElement.pageYOffset:this._scrollElement.scrollTop},t._getScrollHeight=function(){return this._scrollElement.scrollHeight||Math.max(document.body.scrollHeight,document.documentElement.scrollHeight)},t._getOffsetHeight=function(){return this._scrollElement===window?window.innerHeight:this._scrollElement.getBoundingClientRect().height},t._process=function(){var t=this._getScrollTop()+this._config.offset,e=this._getScrollHeight(),n=this._config.offset+e-this._getOffsetHeight();if(this._scrollHeight!==e&&this.refresh(),n<=t){var i=this._targets[this._targets.length-1];this._activeTarget!==i&&this._activate(i)}else{if(this._activeTarget&&t<this._offsets[0]&&0<this._offsets[0])return this._activeTarget=null,void this._clear();for(var r=this._offsets.length;r--;){this._activeTarget!==this._targets[r]&&t>=this._offsets[r]&&("undefined"==typeof this._offsets[r+1]||t<this._offsets[r+1])&&this._activate(this._targets[r])}}},t._activate=function(e){this._activeTarget=e,this._clear();var t=this._selector.split(",");t=t.map(function(t){return t+'[data-target="'+e+'"],'+t+'[href="'+e+'"]'});var n=tn([].slice.call(document.querySelectorAll(t.join(","))));n.hasClass(cn)?(n.closest(pn).find(yn).addClass(hn),n.addClass(hn)):(n.addClass(hn),n.parents(dn).prev(gn+", "+mn).addClass(hn),n.parents(dn).prev(_n).children(gn).addClass(hn)),tn(this._scrollElement).trigger(ln.ACTIVATE,{relatedTarget:e})},t._clear=function(){var t=[].slice.call(document.querySelectorAll(this._selector));tn(t).filter(fn).removeClass(hn)},n._jQueryInterface=function(e){return this.each(function(){var t=tn(this).data(nn);if(t||(t=new n(this,"object"==typeof e&&e),tn(this).data(nn,t)),"string"==typeof e){if("undefined"==typeof t[e])throw new TypeError('No method named "'+e+'"');t[e]()}})},s(n,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return sn}}]),n}(),tn(window).on(ln.LOAD_DATA_API,function(){for(var t=[].slice.call(document.querySelectorAll(un)),e=t.length;e--;){var n=tn(t[e]);Tn._jQueryInterface.call(n,n.data())}}),tn.fn[en]=Tn._jQueryInterface,tn.fn[en].Constructor=Tn,tn.fn[en].noConflict=function(){return tn.fn[en]=on,Tn._jQueryInterface},Tn),Gn=(In="."+(Sn="bs.tab"),An=(bn=e).fn.tab,Dn={HIDE:"hide"+In,HIDDEN:"hidden"+In,SHOW:"show"+In,SHOWN:"shown"+In,CLICK_DATA_API:"click"+In+".data-api"},wn="dropdown-menu",Nn="active",On="disabled",kn="fade",Pn="show",jn=".dropdown",Hn=".nav, .list-group",Ln=".active",Rn="> li > .active",xn='[data-toggle="tab"], [data-toggle="pill"], [data-toggle="list"]',Wn=".dropdown-toggle",Un="> .dropdown-menu .active",qn=function(){function i(t){this._element=t}var t=i.prototype;return t.show=function(){var n=this;if(!(this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE&&bn(this._element).hasClass(Nn)||bn(this._element).hasClass(On))){var t,i,e=bn(this._element).closest(Hn)[0],r=Fn.getSelectorFromElement(this._element);if(e){var o="UL"===e.nodeName?Rn:Ln;i=(i=bn.makeArray(bn(e).find(o)))[i.length-1]}var s=bn.Event(Dn.HIDE,{relatedTarget:this._element}),a=bn.Event(Dn.SHOW,{relatedTarget:i});if(i&&bn(i).trigger(s),bn(this._element).trigger(a),!a.isDefaultPrevented()&&!s.isDefaultPrevented()){r&&(t=document.querySelector(r)),this._activate(this._element,e);var l=function(){var t=bn.Event(Dn.HIDDEN,{relatedTarget:n._element}),e=bn.Event(Dn.SHOWN,{relatedTarget:i});bn(i).trigger(t),bn(n._element).trigger(e)};t?this._activate(t,t.parentNode,l):l()}}},t.dispose=function(){bn.removeData(this._element,Sn),this._element=null},t._activate=function(t,e,n){var i=this,r=("UL"===e.nodeName?bn(e).find(Rn):bn(e).children(Ln))[0],o=n&&r&&bn(r).hasClass(kn),s=function(){return i._transitionComplete(t,r,n)};if(r&&o){var a=Fn.getTransitionDurationFromElement(r);bn(r).one(Fn.TRANSITION_END,s).emulateTransitionEnd(a)}else s()},t._transitionComplete=function(t,e,n){if(e){bn(e).removeClass(Pn+" "+Nn);var i=bn(e.parentNode).find(Un)[0];i&&bn(i).removeClass(Nn),"tab"===e.getAttribute("role")&&e.setAttribute("aria-selected",!1)}if(bn(t).addClass(Nn),"tab"===t.getAttribute("role")&&t.setAttribute("aria-selected",!0),Fn.reflow(t),bn(t).addClass(Pn),t.parentNode&&bn(t.parentNode).hasClass(wn)){var r=bn(t).closest(jn)[0];if(r){var o=[].slice.call(r.querySelectorAll(Wn));bn(o).addClass(Nn)}t.setAttribute("aria-expanded",!0)}n&&n()},i._jQueryInterface=function(n){return this.each(function(){var t=bn(this),e=t.data(Sn);if(e||(e=new i(this),t.data(Sn,e)),"string"==typeof n){if("undefined"==typeof e[n])throw new TypeError('No method named "'+n+'"');e[n]()}})},s(i,null,[{key:"VERSION",get:function(){return"4.1.3"}}]),i}(),bn(document).on(Dn.CLICK_DATA_API,xn,function(t){t.preventDefault(),qn._jQueryInterface.call(bn(this),"show")}),bn.fn.tab=qn._jQueryInterface,bn.fn.tab.Constructor=qn,bn.fn.tab.noConflict=function(){return bn.fn.tab=An,qn._jQueryInterface},qn);!function(t){if("undefined"==typeof t)throw new TypeError("Bootstrap's JavaScript requires jQuery. jQuery must be included before Bootstrap's JavaScript.");var e=t.fn.jquery.split(" ")[0].split(".");if(e[0]<2&&e[1]<9||1===e[0]&&9===e[1]&&e[2]<1||4<=e[0])throw new Error("Bootstrap's JavaScript requires at least jQuery v1.9.1 but less than v4.0.0")}(e),t.Util=Fn,t.Alert=Kn,t.Button=Mn,t.Carousel=Qn,t.Collapse=Bn,t.Dropdown=Vn,t.Modal=Yn,t.Popover=Jn,t.Scrollspy=Zn,t.Tab=Gn,t.Tooltip=zn,Object.defineProperty(t,"__esModule",{value:!0})});
//# sourceMappingURL=bootstrap.min.js.map
| 0.031634 | 0.207295 |
!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?t(exports,require("jquery")):"function"==typeof define&&define.amd?define(["exports","jquery"],t):t(e.bootstrap={},e.jQuery)}(this,function(e,t){"use strict";function i(e,t){for(var n=0;n<t.length;n++){var i=t[n];i.enumerable=i.enumerable||!1,i.configurable=!0,"value"in i&&(i.writable=!0),Object.defineProperty(e,i.key,i)}}function s(e,t,n){return t&&i(e.prototype,t),n&&i(e,n),e}function l(r){for(var e=1;e<arguments.length;e++){var o=null!=arguments[e]?arguments[e]:{},t=Object.keys(o);"function"==typeof Object.getOwnPropertySymbols&&(t=t.concat(Object.getOwnPropertySymbols(o).filter(function(e){return Object.getOwnPropertyDescriptor(o,e).enumerable}))),t.forEach(function(e){var t,n,i;t=r,i=o[n=e],n in t?Object.defineProperty(t,n,{value:i,enumerable:!0,configurable:!0,writable:!0}):t[n]=i})}return r}for(var r,n,o,a,c,u,f,h,d,p,m,g,_,v,y,E,b,w,C,T,S,D,A,I,O,N,k,x,P,L,j,H,M,F,W,R,U,B,q,K,Q,Y,V,z,G,J,Z,X,$,ee,te,ne,ie,re,oe,se,ae,le,ce,ue,fe,he,de,pe,me,ge,_e,ve,ye,Ee,be,we=function(i){var t="transitionend";function e(e){var t=this,n=!1;return i(this).one(l.TRANSITION_END,function(){n=!0}),setTimeout(function(){n||l.triggerTransitionEnd(t)},e),this}var l={TRANSITION_END:"bsTransitionEnd",getUID:function(e){for(;e+=~~(1e6*Math.random()),document.getElementById(e););return e},getSelectorFromElement:function(e){var t=e.getAttribute("data-target");t&&"#"!==t||(t=e.getAttribute("href")||"");try{return document.querySelector(t)?t:null}catch(e){return null}},getTransitionDurationFromElement:function(e){if(!e)return 0;var t=i(e).css("transition-duration");return parseFloat(t)?(t=t.split(",")[0],1e3*parseFloat(t)):0},reflow:function(e){return e.offsetHeight},triggerTransitionEnd:function(e){i(e).trigger(t)},supportsTransitionEnd:function(){return Boolean(t)},isElement:function(e){return(e[0]||e).nodeType},typeCheckConfig:function(e,t,n){for(var i in n)if(Object.prototype.hasOwnProperty.call(n,i)){var r=n[i],o=t[i],s=o&&l.isElement(o)?"element":(a=o,{}.toString.call(a).match(/\s([a-z]+)/i)[1].toLowerCase());if(!new RegExp(r).test(s))throw new Error(e.toUpperCase()+': Option "'+i+'" provided type "'+s+'" but expected type "'+r+'".')}var a}};return i.fn.emulateTransitionEnd=e,i.event.special[l.TRANSITION_END]={bindType:t,delegateType:t,handle:function(e){if(i(e.target).is(this))return e.handleObj.handler.apply(this,arguments)}},l}(t=t&&t.hasOwnProperty("default")?t.default:t),Ce=(n="alert",a="."+(o="bs.alert"),c=(r=t).fn[n],u={CLOSE:"close"+a,CLOSED:"closed"+a,CLICK_DATA_API:"click"+a+".data-api"},f="alert",h="fade",d="show",p=function(){function i(e){this._element=e}var e=i.prototype;return e.close=function(e){var t=this._element;e&&(t=this._getRootElement(e)),this._triggerCloseEvent(t).isDefaultPrevented()||this._removeElement(t)},e.dispose=function(){r.removeData(this._element,o),this._element=null},e._getRootElement=function(e){var t=we.getSelectorFromElement(e),n=!1;return t&&(n=document.querySelector(t)),n||(n=r(e).closest("."+f)[0]),n},e._triggerCloseEvent=function(e){var t=r.Event(u.CLOSE);return r(e).trigger(t),t},e._removeElement=function(t){var n=this;if(r(t).removeClass(d),r(t).hasClass(h)){var e=we.getTransitionDurationFromElement(t);r(t).one(we.TRANSITION_END,function(e){return n._destroyElement(t,e)}).emulateTransitionEnd(e)}else this._destroyElement(t)},e._destroyElement=function(e){r(e).detach().trigger(u.CLOSED).remove()},i._jQueryInterface=function(n){return this.each(function(){var e=r(this),t=e.data(o);t||(t=new i(this),e.data(o,t)),"close"===n&&t[n](this)})},i._handleDismiss=function(t){return function(e){e&&e.preventDefault(),t.close(this)}},s(i,null,[{key:"VERSION",get:function(){return"4.1.3"}}]),i}(),r(document).on(u.CLICK_DATA_API,'[data-dismiss="alert"]',p._handleDismiss(new p)),r.fn[n]=p._jQueryInterface,r.fn[n].Constructor=p,r.fn[n].noConflict=function(){return r.fn[n]=c,p._jQueryInterface},p),Te=(g="button",v="."+(_="bs.button"),y=".data-api",E=(m=t).fn[g],b="active",w="btn",T='[data-toggle^="button"]',S='[data-toggle="buttons"]',D="input",A=".active",I=".btn",O={CLICK_DATA_API:"click"+v+y,FOCUS_BLUR_DATA_API:(C="focus")+v+y+" blur"+v+y},N=function(){function n(e){this._element=e}var e=n.prototype;return e.toggle=function(){var e=!0,t=!0,n=m(this._element).closest(S)[0];if(n){var i=this._element.querySelector(D);if(i){if("radio"===i.type)if(i.checked&&this._element.classList.contains(b))e=!1;else{var r=n.querySelector(A);r&&m(r).removeClass(b)}if(e){if(i.hasAttribute("disabled")||n.hasAttribute("disabled")||i.classList.contains("disabled")||n.classList.contains("disabled"))return;i.checked=!this._element.classList.contains(b),m(i).trigger("change")}i.focus(),t=!1}}t&&this._element.setAttribute("aria-pressed",!this._element.classList.contains(b)),e&&m(this._element).toggleClass(b)},e.dispose=function(){m.removeData(this._element,_),this._element=null},n._jQueryInterface=function(t){return this.each(function(){var e=m(this).data(_);e||(e=new n(this),m(this).data(_,e)),"toggle"===t&&e[t]()})},s(n,null,[{key:"VERSION",get:function(){return"4.1.3"}}]),n}(),m(document).on(O.CLICK_DATA_API,T,function(e){e.preventDefault();var t=e.target;m(t).hasClass(w)||(t=m(t).closest(I)),N._jQueryInterface.call(m(t),"toggle")}).on(O.FOCUS_BLUR_DATA_API,T,function(e){var t=m(e.target).closest(I)[0];m(t).toggleClass(C,/^focus(in)?$/.test(e.type))}),m.fn[g]=N._jQueryInterface,m.fn[g].Constructor=N,m.fn[g].noConflict=function(){return m.fn[g]=E,N._jQueryInterface},N),Se=(x="carousel",L="."+(P="bs.carousel"),j=".data-api",H=(k=t).fn[x],M={interval:5e3,keyboard:!0,slide:!1,pause:"hover",wrap:!0},F={interval:"(number|boolean)",keyboard:"boolean",slide:"(boolean|string)",pause:"(string|boolean)",wrap:"boolean"},W="next",R="prev",U="left",B="right",q={SLIDE:"slide"+L,SLID:"slid"+L,KEYDOWN:"keydown"+L,MOUSEENTER:"mouseenter"+L,MOUSELEAVE:"mouseleave"+L,TOUCHEND:"touchend"+L,LOAD_DATA_API:"load"+L+j,CLICK_DATA_API:"click"+L+j},K="carousel",Q="active",Y="slide",V="carousel-item-right",z="carousel-item-left",G="carousel-item-next",J="carousel-item-prev",Z=".active",X=".active.carousel-item",$=".carousel-item",ee=".carousel-item-next, .carousel-item-prev",te=".carousel-indicators",ne="[data-slide], [data-slide-to]",ie='[data-ride="carousel"]',re=function(){function o(e,t){this._items=null,this._interval=null,this._activeElement=null,this._isPaused=!1,this._isSliding=!1,this.touchTimeout=null,this._config=this._getConfig(t),this._element=k(e)[0],this._indicatorsElement=this._element.querySelector(te),this._addEventListeners()}var e=o.prototype;return e.next=function(){this._isSliding||this._slide(W)},e.nextWhenVisible=function(){!document.hidden&&k(this._element).is(":visible")&&"hidden"!==k(this._element).css("visibility")&&this.next()},e.prev=function(){this._isSliding||this._slide(R)},e.pause=function(e){e||(this._isPaused=!0),this._element.querySelector(ee)&&(we.triggerTransitionEnd(this._element),this.cycle(!0)),clearInterval(this._interval),this._interval=null},e.cycle=function(e){e||(this._isPaused=!1),this._interval&&(clearInterval(this._interval),this._interval=null),this._config.interval&&!this._isPaused&&(this._interval=setInterval((document.visibilityState?this.nextWhenVisible:this.next).bind(this),this._config.interval))},e.to=function(e){var t=this;this._activeElement=this._element.querySelector(X);var n=this._getItemIndex(this._activeElement);if(!(e>this._items.length-1||e<0))if(this._isSliding)k(this._element).one(q.SLID,function(){return t.to(e)});else{if(n===e)return this.pause(),void this.cycle();var i=n<e?W:R;this._slide(i,this._items[e])}},e.dispose=function(){k(this._element).off(L),k.removeData(this._element,P),this._items=null,this._config=null,this._element=null,this._interval=null,this._isPaused=null,this._isSliding=null,this._activeElement=null,this._indicatorsElement=null},e._getConfig=function(e){return e=l({},M,e),we.typeCheckConfig(x,e,F),e},e._addEventListeners=function(){var t=this;this._config.keyboard&&k(this._element).on(q.KEYDOWN,function(e){return t._keydown(e)}),"hover"===this._config.pause&&(k(this._element).on(q.MOUSEENTER,function(e){return t.pause(e)}).on(q.MOUSELEAVE,function(e){return t.cycle(e)}),"ontouchstart"in document.documentElement&&k(this._element).on(q.TOUCHEND,function(){t.pause(),t.touchTimeout&&clearTimeout(t.touchTimeout),t.touchTimeout=setTimeout(function(e){return t.cycle(e)},500+t._config.interval)}))},e._keydown=function(e){if(!/input|textarea/i.test(e.target.tagName))switch(e.which){case 37:e.preventDefault(),this.prev();break;case 39:e.preventDefault(),this.next()}},e._getItemIndex=function(e){return this._items=e&&e.parentNode?[].slice.call(e.parentNode.querySelectorAll($)):[],this._items.indexOf(e)},e._getItemByDirection=function(e,t){var n=e===W,i=e===R,r=this._getItemIndex(t),o=this._items.length-1;if((i&&0===r||n&&r===o)&&!this._config.wrap)return t;var s=(r+(e===R?-1:1))%this._items.length;return-1===s?this._items[this._items.length-1]:this._items[s]},e._triggerSlideEvent=function(e,t){var n=this._getItemIndex(e),i=this._getItemIndex(this._element.querySelector(X)),r=k.Event(q.SLIDE,{relatedTarget:e,direction:t,from:i,to:n});return k(this._element).trigger(r),r},e._setActiveIndicatorElement=function(e){if(this._indicatorsElement){var t=[].slice.call(this._indicatorsElement.querySelectorAll(Z));k(t).removeClass(Q);var n=this._indicatorsElement.children[this._getItemIndex(e)];n&&k(n).addClass(Q)}},e._slide=function(e,t){var n,i,r,o=this,s=this._element.querySelector(X),a=this._getItemIndex(s),l=t||s&&this._getItemByDirection(e,s),c=this._getItemIndex(l),u=Boolean(this._interval);if(e===W?(n=z,i=G,r=U):(n=V,i=J,r=B),l&&k(l).hasClass(Q))this._isSliding=!1;else if(!this._triggerSlideEvent(l,r).isDefaultPrevented()&&s&&l){this._isSliding=!0,u&&this.pause(),this._setActiveIndicatorElement(l);var f=k.Event(q.SLID,{relatedTarget:l,direction:r,from:a,to:c});if(k(this._element).hasClass(Y)){k(l).addClass(i),we.reflow(l),k(s).addClass(n),k(l).addClass(n);var h=we.getTransitionDurationFromElement(s);k(s).one(we.TRANSITION_END,function(){k(l).removeClass(n+" "+i).addClass(Q),k(s).removeClass(Q+" "+i+" "+n),o._isSliding=!1,setTimeout(function(){return k(o._element).trigger(f)},0)}).emulateTransitionEnd(h)}else k(s).removeClass(Q),k(l).addClass(Q),this._isSliding=!1,k(this._element).trigger(f);u&&this.cycle()}},o._jQueryInterface=function(i){return this.each(function(){var e=k(this).data(P),t=l({},M,k(this).data());"object"==typeof i&&(t=l({},t,i));var n="string"==typeof i?i:t.slide;if(e||(e=new o(this,t),k(this).data(P,e)),"number"==typeof i)e.to(i);else if("string"==typeof n){if("undefined"==typeof e[n])throw new TypeError('No method named "'+n+'"');e[n]()}else t.interval&&(e.pause(),e.cycle())})},o._dataApiClickHandler=function(e){var t=we.getSelectorFromElement(this);if(t){var n=k(t)[0];if(n&&k(n).hasClass(K)){var i=l({},k(n).data(),k(this).data()),r=this.getAttribute("data-slide-to");r&&(i.interval=!1),o._jQueryInterface.call(k(n),i),r&&k(n).data(P).to(r),e.preventDefault()}}},s(o,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return M}}]),o}(),k(document).on(q.CLICK_DATA_API,ne,re._dataApiClickHandler),k(window).on(q.LOAD_DATA_API,function(){for(var e=[].slice.call(document.querySelectorAll(ie)),t=0,n=e.length;t<n;t++){var i=k(e[t]);re._jQueryInterface.call(i,i.data())}}),k.fn[x]=re._jQueryInterface,k.fn[x].Constructor=re,k.fn[x].noConflict=function(){return k.fn[x]=H,re._jQueryInterface},re),De=(se="collapse",le="."+(ae="bs.collapse"),ce=(oe=t).fn[se],ue={toggle:!0,parent:""},fe={toggle:"boolean",parent:"(string|element)"},he={SHOW:"show"+le,SHOWN:"shown"+le,HIDE:"hide"+le,HIDDEN:"hidden"+le,CLICK_DATA_API:"click"+le+".data-api"},de="show",pe="collapse",me="collapsing",ge="collapsed",_e="width",ve="height",ye=".show, .collapsing",Ee='[data-toggle="collapse"]',be=function(){function a(t,e){this._isTransitioning=!1,this._element=t,this._config=this._getConfig(e),this._triggerArray=oe.makeArray(document.querySelectorAll('[data-toggle="collapse"][href="#'+t.id+'"],[data-toggle="collapse"][data-target="#'+t.id+'"]'));for(var n=[].slice.call(document.querySelectorAll(Ee)),i=0,r=n.length;i<r;i++){var o=n[i],s=we.getSelectorFromElement(o),a=[].slice.call(document.querySelectorAll(s)).filter(function(e){return e===t});null!==s&&0<a.length&&(this._selector=s,this._triggerArray.push(o))}this._parent=this._config.parent?this._getParent():null,this._config.parent||this._addAriaAndCollapsedClass(this._element,this._triggerArray),this._config.toggle&&this.toggle()}var e=a.prototype;return e.toggle=function(){oe(this._element).hasClass(de)?this.hide():this.show()},e.show=function(){var e,t,n=this;if(!this._isTransitioning&&!oe(this._element).hasClass(de)&&(this._parent&&0===(e=[].slice.call(this._parent.querySelectorAll(ye)).filter(function(e){return e.getAttribute("data-parent")===n._config.parent})).length&&(e=null),!(e&&(t=oe(e).not(this._selector).data(ae))&&t._isTransitioning))){var i=oe.Event(he.SHOW);if(oe(this._element).trigger(i),!i.isDefaultPrevented()){e&&(a._jQueryInterface.call(oe(e).not(this._selector),"hide"),t||oe(e).data(ae,null));var r=this._getDimension();oe(this._element).removeClass(pe).addClass(me),this._element.style[r]=0,this._triggerArray.length&&oe(this._triggerArray).removeClass(ge).attr("aria-expanded",!0),this.setTransitioning(!0);var o="scroll"+(r[0].toUpperCase()+r.slice(1)),s=we.getTransitionDurationFromElement(this._element);oe(this._element).one(we.TRANSITION_END,function(){oe(n._element).removeClass(me).addClass(pe).addClass(de),n._element.style[r]="",n.setTransitioning(!1),oe(n._element).trigger(he.SHOWN)}).emulateTransitionEnd(s),this._element.style[r]=this._element[o]+"px"}}},e.hide=function(){var e=this;if(!this._isTransitioning&&oe(this._element).hasClass(de)){var t=oe.Event(he.HIDE);if(oe(this._element).trigger(t),!t.isDefaultPrevented()){var n=this._getDimension();this._element.style[n]=this._element.getBoundingClientRect()[n]+"px",we.reflow(this._element),oe(this._element).addClass(me).removeClass(pe).removeClass(de);var i=this._triggerArray.length;if(0<i)for(var r=0;r<i;r++){var o=this._triggerArray[r],s=we.getSelectorFromElement(o);if(null!==s)oe([].slice.call(document.querySelectorAll(s))).hasClass(de)||oe(o).addClass(ge).attr("aria-expanded",!1)}this.setTransitioning(!0);this._element.style[n]="";var a=we.getTransitionDurationFromElement(this._element);oe(this._element).one(we.TRANSITION_END,function(){e.setTransitioning(!1),oe(e._element).removeClass(me).addClass(pe).trigger(he.HIDDEN)}).emulateTransitionEnd(a)}}},e.setTransitioning=function(e){this._isTransitioning=e},e.dispose=function(){oe.removeData(this._element,ae),this._config=null,this._parent=null,this._element=null,this._triggerArray=null,this._isTransitioning=null},e._getConfig=function(e){return(e=l({},ue,e)).toggle=Boolean(e.toggle),we.typeCheckConfig(se,e,fe),e},e._getDimension=function(){return oe(this._element).hasClass(_e)?_e:ve},e._getParent=function(){var n=this,e=null;we.isElement(this._config.parent)?(e=this._config.parent,"undefined"!=typeof this._config.parent.jquery&&(e=this._config.parent[0])):e=document.querySelector(this._config.parent);var t='[data-toggle="collapse"][data-parent="'+this._config.parent+'"]',i=[].slice.call(e.querySelectorAll(t));return oe(i).each(function(e,t){n._addAriaAndCollapsedClass(a._getTargetFromElement(t),[t])}),e},e._addAriaAndCollapsedClass=function(e,t){if(e){var n=oe(e).hasClass(de);t.length&&oe(t).toggleClass(ge,!n).attr("aria-expanded",n)}},a._getTargetFromElement=function(e){var t=we.getSelectorFromElement(e);return t?document.querySelector(t):null},a._jQueryInterface=function(i){return this.each(function(){var e=oe(this),t=e.data(ae),n=l({},ue,e.data(),"object"==typeof i&&i?i:{});if(!t&&n.toggle&&/show|hide/.test(i)&&(n.toggle=!1),t||(t=new a(this,n),e.data(ae,t)),"string"==typeof i){if("undefined"==typeof t[i])throw new TypeError('No method named "'+i+'"');t[i]()}})},s(a,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return ue}}]),a}(),oe(document).on(he.CLICK_DATA_API,Ee,function(e){"A"===e.currentTarget.tagName&&e.preventDefault();var n=oe(this),t=we.getSelectorFromElement(this),i=[].slice.call(document.querySelectorAll(t));oe(i).each(function(){var e=oe(this),t=e.data(ae)?"toggle":n.data();be._jQueryInterface.call(e,t)})}),oe.fn[se]=be._jQueryInterface,oe.fn[se].Constructor=be,oe.fn[se].noConflict=function(){return oe.fn[se]=ce,be._jQueryInterface},be),Ae="undefined"!=typeof window&&"undefined"!=typeof document,Ie=["Edge","Trident","Firefox"],Oe=0,Ne=0;Ne<Ie.length;Ne+=1)if(Ae&&0<=navigator.userAgent.indexOf(Ie[Ne])){Oe=1;break}var ke=Ae&&window.Promise?function(e){var t=!1;return function(){t||(t=!0,window.Promise.resolve().then(function(){t=!1,e()}))}}:function(e){var t=!1;return function(){t||(t=!0,setTimeout(function(){t=!1,e()},Oe))}};function xe(e){return e&&"[object Function]"==={}.toString.call(e)}function Pe(e,t){if(1!==e.nodeType)return[];var n=getComputedStyle(e,null);return t?n[t]:n}function Le(e){return"HTML"===e.nodeName?e:e.parentNode||e.host}function je(e){if(!e)return document.body;switch(e.nodeName){case"HTML":case"BODY":return e.ownerDocument.body;case"#document":return e.body}var t=Pe(e),n=t.overflow,i=t.overflowX,r=t.overflowY;return/(auto|scroll|overlay)/.test(n+r+i)?e:je(Le(e))}var He=Ae&&!(!window.MSInputMethodContext||!document.documentMode),Me=Ae&&/MSIE 10/.test(navigator.userAgent);function Fe(e){return 11===e?He:10===e?Me:He||Me}function We(e){if(!e)return document.documentElement;for(var t=Fe(10)?document.body:null,n=e.offsetParent;n===t&&e.nextElementSibling;)n=(e=e.nextElementSibling).offsetParent;var i=n&&n.nodeName;return i&&"BODY"!==i&&"HTML"!==i?-1!==["TD","TABLE"].indexOf(n.nodeName)&&"static"===Pe(n,"position")?We(n):n:e?e.ownerDocument.documentElement:document.documentElement}function Re(e){return null!==e.parentNode?Re(e.parentNode):e}function Ue(e,t){if(!(e&&e.nodeType&&t&&t.nodeType))return document.documentElement;var n=e.compareDocumentPosition(t)&Node.DOCUMENT_POSITION_FOLLOWING,i=n?e:t,r=n?t:e,o=document.createRange();o.setStart(i,0),o.setEnd(r,0);var s,a,l=o.commonAncestorContainer;if(e!==l&&t!==l||i.contains(r))return"BODY"===(a=(s=l).nodeName)||"HTML"!==a&&We(s.firstElementChild)!==s?We(l):l;var c=Re(e);return c.host?Ue(c.host,t):Ue(e,Re(t).host)}function Be(e){var t="top"===(1<arguments.length&&void 0!==arguments[1]?arguments[1]:"top")?"scrollTop":"scrollLeft",n=e.nodeName;if("BODY"===n||"HTML"===n){var i=e.ownerDocument.documentElement;return(e.ownerDocument.scrollingElement||i)[t]}return e[t]}function qe(e,t){var n="x"===t?"Left":"Top",i="Left"===n?"Right":"Bottom";return parseFloat(e["border"+n+"Width"],10)+parseFloat(e["border"+i+"Width"],10)}function Ke(e,t,n,i){return Math.max(t["offset"+e],t["scroll"+e],n["client"+e],n["offset"+e],n["scroll"+e],Fe(10)?n["offset"+e]+i["margin"+("Height"===e?"Top":"Left")]+i["margin"+("Height"===e?"Bottom":"Right")]:0)}function Qe(){var e=document.body,t=document.documentElement,n=Fe(10)&&getComputedStyle(t);return{height:Ke("Height",e,t,n),width:Ke("Width",e,t,n)}}var Ye=function(){function i(e,t){for(var n=0;n<t.length;n++){var i=t[n];i.enumerable=i.enumerable||!1,i.configurable=!0,"value"in i&&(i.writable=!0),Object.defineProperty(e,i.key,i)}}return function(e,t,n){return t&&i(e.prototype,t),n&&i(e,n),e}}(),Ve=function(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e},ze=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var i in n)Object.prototype.hasOwnProperty.call(n,i)&&(e[i]=n[i])}return e};function Ge(e){return ze({},e,{right:e.left+e.width,bottom:e.top+e.height})}function Je(e){var t={};try{if(Fe(10)){t=e.getBoundingClientRect();var n=Be(e,"top"),i=Be(e,"left");t.top+=n,t.left+=i,t.bottom+=n,t.right+=i}else t=e.getBoundingClientRect()}catch(e){}var r={left:t.left,top:t.top,width:t.right-t.left,height:t.bottom-t.top},o="HTML"===e.nodeName?Qe():{},s=o.width||e.clientWidth||r.right-r.left,a=o.height||e.clientHeight||r.bottom-r.top,l=e.offsetWidth-s,c=e.offsetHeight-a;if(l||c){var u=Pe(e);l-=qe(u,"x"),c-=qe(u,"y"),r.width-=l,r.height-=c}return Ge(r)}function Ze(e,t){var n=2<arguments.length&&void 0!==arguments[2]&&arguments[2],i=Fe(10),r="HTML"===t.nodeName,o=Je(e),s=Je(t),a=je(e),l=Pe(t),c=parseFloat(l.borderTopWidth,10),u=parseFloat(l.borderLeftWidth,10);n&&"HTML"===t.nodeName&&(s.top=Math.max(s.top,0),s.left=Math.max(s.left,0));var f=Ge({top:o.top-s.top-c,left:o.left-s.left-u,width:o.width,height:o.height});if(f.marginTop=0,f.marginLeft=0,!i&&r){var h=parseFloat(l.marginTop,10),d=parseFloat(l.marginLeft,10);f.top-=c-h,f.bottom-=c-h,f.left-=u-d,f.right-=u-d,f.marginTop=h,f.marginLeft=d}return(i&&!n?t.contains(a):t===a&&"BODY"!==a.nodeName)&&(f=function(e,t){var n=2<arguments.length&&void 0!==arguments[2]&&arguments[2],i=Be(t,"top"),r=Be(t,"left"),o=n?-1:1;return e.top+=i*o,e.bottom+=i*o,e.left+=r*o,e.right+=r*o,e}(f,t)),f}function Xe(e){if(!e||!e.parentElement||Fe())return document.documentElement;for(var t=e.parentElement;t&&"none"===Pe(t,"transform");)t=t.parentElement;return t||document.documentElement}function $e(e,t,n,i){var r=4<arguments.length&&void 0!==arguments[4]&&arguments[4],o={top:0,left:0},s=r?Xe(e):Ue(e,t);if("viewport"===i)o=function(e){var t=1<arguments.length&&void 0!==arguments[1]&&arguments[1],n=e.ownerDocument.documentElement,i=Ze(e,n),r=Math.max(n.clientWidth,window.innerWidth||0),o=Math.max(n.clientHeight,window.innerHeight||0),s=t?0:Be(n),a=t?0:Be(n,"left");return Ge({top:s-i.top+i.marginTop,left:a-i.left+i.marginLeft,width:r,height:o})}(s,r);else{var a=void 0;"scrollParent"===i?"BODY"===(a=je(Le(t))).nodeName&&(a=e.ownerDocument.documentElement):a="window"===i?e.ownerDocument.documentElement:i;var l=Ze(a,s,r);if("HTML"!==a.nodeName||function e(t){var n=t.nodeName;return"BODY"!==n&&"HTML"!==n&&("fixed"===Pe(t,"position")||e(Le(t)))}(s))o=l;else{var c=Qe(),u=c.height,f=c.width;o.top+=l.top-l.marginTop,o.bottom=u+l.top,o.left+=l.left-l.marginLeft,o.right=f+l.left}}return o.left+=n,o.top+=n,o.right-=n,o.bottom-=n,o}function et(e,t,i,n,r){var o=5<arguments.length&&void 0!==arguments[5]?arguments[5]:0;if(-1===e.indexOf("auto"))return e;var s=$e(i,n,o,r),a={top:{width:s.width,height:t.top-s.top},right:{width:s.right-t.right,height:s.height},bottom:{width:s.width,height:s.bottom-t.bottom},left:{width:t.left-s.left,height:s.height}},l=Object.keys(a).map(function(e){return ze({key:e},a[e],{area:(t=a[e],t.width*t.height)});var t}).sort(function(e,t){return t.area-e.area}),c=l.filter(function(e){var t=e.width,n=e.height;return t>=i.clientWidth&&n>=i.clientHeight}),u=0<c.length?c[0].key:l[0].key,f=e.split("-")[1];return u+(f?"-"+f:"")}function tt(e,t,n){var i=3<arguments.length&&void 0!==arguments[3]?arguments[3]:null;return Ze(n,i?Xe(t):Ue(t,n),i)}function nt(e){var t=getComputedStyle(e),n=parseFloat(t.marginTop)+parseFloat(t.marginBottom),i=parseFloat(t.marginLeft)+parseFloat(t.marginRight);return{width:e.offsetWidth+i,height:e.offsetHeight+n}}function it(e){var t={left:"right",right:"left",bottom:"top",top:"bottom"};return e.replace(/left|right|bottom|top/g,function(e){return t[e]})}function rt(e,t,n){n=n.split("-")[0];var i=nt(e),r={width:i.width,height:i.height},o=-1!==["right","left"].indexOf(n),s=o?"top":"left",a=o?"left":"top",l=o?"height":"width",c=o?"width":"height";return r[s]=t[s]+t[l]/2-i[l]/2,r[a]=n===a?t[a]-i[c]:t[it(a)],r}function ot(e,t){return Array.prototype.find?e.find(t):e.filter(t)[0]}function st(e,n,t){return(void 0===t?e:e.slice(0,function(e,t,n){if(Array.prototype.findIndex)return e.findIndex(function(e){return e[t]===n});var i=ot(e,function(e){return e[t]===n});return e.indexOf(i)}(e,"name",t))).forEach(function(e){e.function&&console.warn("`modifier.function` is deprecated, use `modifier.fn`!");var t=e.function||e.fn;e.enabled&&xe(t)&&(n.offsets.popper=Ge(n.offsets.popper),n.offsets.reference=Ge(n.offsets.reference),n=t(n,e))}),n}function at(e,n){return e.some(function(e){var t=e.name;return e.enabled&&t===n})}function lt(e){for(var t=[!1,"ms","Webkit","Moz","O"],n=e.charAt(0).toUpperCase()+e.slice(1),i=0;i<t.length;i++){var r=t[i],o=r?""+r+n:e;if("undefined"!=typeof document.body.style[o])return o}return null}function ct(e){var t=e.ownerDocument;return t?t.defaultView:window}function ut(e,t,n,i){n.updateBound=i,ct(e).addEventListener("resize",n.updateBound,{passive:!0});var r=je(e);return function e(t,n,i,r){var o="BODY"===t.nodeName,s=o?t.ownerDocument.defaultView:t;s.addEventListener(n,i,{passive:!0}),o||e(je(s.parentNode),n,i,r),r.push(s)}(r,"scroll",n.updateBound,n.scrollParents),n.scrollElement=r,n.eventsEnabled=!0,n}function ft(){var e,t;this.state.eventsEnabled&&(cancelAnimationFrame(this.scheduleUpdate),this.state=(e=this.reference,t=this.state,ct(e).removeEventListener("resize",t.updateBound),t.scrollParents.forEach(function(e){e.removeEventListener("scroll",t.updateBound)}),t.updateBound=null,t.scrollParents=[],t.scrollElement=null,t.eventsEnabled=!1,t))}function ht(e){return""!==e&&!isNaN(parseFloat(e))&&isFinite(e)}function dt(n,i){Object.keys(i).forEach(function(e){var t="";-1!==["width","height","top","right","bottom","left"].indexOf(e)&&ht(i[e])&&(t="px"),n.style[e]=i[e]+t})}function pt(e,t,n){var i=ot(e,function(e){return e.name===t}),r=!!i&&e.some(function(e){return e.name===n&&e.enabled&&e.order<i.order});if(!r){var o="`"+t+"`",s="`"+n+"`";console.warn(s+" modifier is required by "+o+" modifier in order to work, be sure to include it before "+o+"!")}return r}var mt=["auto-start","auto","auto-end","top-start","top","top-end","right-start","right","right-end","bottom-end","bottom","bottom-start","left-end","left","left-start"],gt=mt.slice(3);function _t(e){var t=1<arguments.length&&void 0!==arguments[1]&&arguments[1],n=gt.indexOf(e),i=gt.slice(n+1).concat(gt.slice(0,n));return t?i.reverse():i}var vt="flip",yt="clockwise",Et="counterclockwise";function bt(e,r,o,t){var s=[0,0],a=-1!==["right","left"].indexOf(t),n=e.split(/(\+|\-)/).map(function(e){return e.trim()}),i=n.indexOf(ot(n,function(e){return-1!==e.search(/,|\s/)}));n[i]&&-1===n[i].indexOf(",")&&console.warn("Offsets separated by white space(s) are deprecated, use a comma (,) instead.");var l=/\s*,\s*|\s+/,c=-1!==i?[n.slice(0,i).concat([n[i].split(l)[0]]),[n[i].split(l)[1]].concat(n.slice(i+1))]:[n];return(c=c.map(function(e,t){var n=(1===t?!a:a)?"height":"width",i=!1;return e.reduce(function(e,t){return""===e[e.length-1]&&-1!==["+","-"].indexOf(t)?(e[e.length-1]=t,i=!0,e):i?(e[e.length-1]+=t,i=!1,e):e.concat(t)},[]).map(function(e){return function(e,t,n,i){var r=e.match(/((?:\-|\+)?\d*\.?\d*)(.*)/),o=+r[1],s=r[2];if(!o)return e;if(0===s.indexOf("%")){var a=void 0;switch(s){case"%p":a=n;break;case"%":case"%r":default:a=i}return Ge(a)[t]/100*o}if("vh"===s||"vw"===s)return("vh"===s?Math.max(document.documentElement.clientHeight,window.innerHeight||0):Math.max(document.documentElement.clientWidth,window.innerWidth||0))/100*o;return o}(e,n,r,o)})})).forEach(function(n,i){n.forEach(function(e,t){ht(e)&&(s[i]+=e*("-"===n[t-1]?-1:1))})}),s}var wt={placement:"bottom",positionFixed:!1,eventsEnabled:!0,removeOnDestroy:!1,onCreate:function(){},onUpdate:function(){},modifiers:{shift:{order:100,enabled:!0,fn:function(e){var t=e.placement,n=t.split("-")[0],i=t.split("-")[1];if(i){var r=e.offsets,o=r.reference,s=r.popper,a=-1!==["bottom","top"].indexOf(n),l=a?"left":"top",c=a?"width":"height",u={start:Ve({},l,o[l]),end:Ve({},l,o[l]+o[c]-s[c])};e.offsets.popper=ze({},s,u[i])}return e}},offset:{order:200,enabled:!0,fn:function(e,t){var n=t.offset,i=e.placement,r=e.offsets,o=r.popper,s=r.reference,a=i.split("-")[0],l=void 0;return l=ht(+n)?[+n,0]:bt(n,o,s,a),"left"===a?(o.top+=l[0],o.left-=l[1]):"right"===a?(o.top+=l[0],o.left+=l[1]):"top"===a?(o.left+=l[0],o.top-=l[1]):"bottom"===a&&(o.left+=l[0],o.top+=l[1]),e.popper=o,e},offset:0},preventOverflow:{order:300,enabled:!0,fn:function(e,i){var t=i.boundariesElement||We(e.instance.popper);e.instance.reference===t&&(t=We(t));var n=lt("transform"),r=e.instance.popper.style,o=r.top,s=r.left,a=r[n];r.top="",r.left="",r[n]="";var l=$e(e.instance.popper,e.instance.reference,i.padding,t,e.positionFixed);r.top=o,r.left=s,r[n]=a,i.boundaries=l;var c=i.priority,u=e.offsets.popper,f={primary:function(e){var t=u[e];return u[e]<l[e]&&!i.escapeWithReference&&(t=Math.max(u[e],l[e])),Ve({},e,t)},secondary:function(e){var t="right"===e?"left":"top",n=u[t];return u[e]>l[e]&&!i.escapeWithReference&&(n=Math.min(u[t],l[e]-("right"===e?u.width:u.height))),Ve({},t,n)}};return c.forEach(function(e){var t=-1!==["left","top"].indexOf(e)?"primary":"secondary";u=ze({},u,f[t](e))}),e.offsets.popper=u,e},priority:["left","right","top","bottom"],padding:5,boundariesElement:"scrollParent"},keepTogether:{order:400,enabled:!0,fn:function(e){var t=e.offsets,n=t.popper,i=t.reference,r=e.placement.split("-")[0],o=Math.floor,s=-1!==["top","bottom"].indexOf(r),a=s?"right":"bottom",l=s?"left":"top",c=s?"width":"height";return n[a]<o(i[l])&&(e.offsets.popper[l]=o(i[l])-n[c]),n[l]>o(i[a])&&(e.offsets.popper[l]=o(i[a])),e}},arrow:{order:500,enabled:!0,fn:function(e,t){var n;if(!pt(e.instance.modifiers,"arrow","keepTogether"))return e;var i=t.element;if("string"==typeof i){if(!(i=e.instance.popper.querySelector(i)))return e}else if(!e.instance.popper.contains(i))return console.warn("WARNING: `arrow.element` must be child of its popper element!"),e;var r=e.placement.split("-")[0],o=e.offsets,s=o.popper,a=o.reference,l=-1!==["left","right"].indexOf(r),c=l?"height":"width",u=l?"Top":"Left",f=u.toLowerCase(),h=l?"left":"top",d=l?"bottom":"right",p=nt(i)[c];a[d]-p<s[f]&&(e.offsets.popper[f]-=s[f]-(a[d]-p)),a[f]+p>s[d]&&(e.offsets.popper[f]+=a[f]+p-s[d]),e.offsets.popper=Ge(e.offsets.popper);var m=a[f]+a[c]/2-p/2,g=Pe(e.instance.popper),_=parseFloat(g["margin"+u],10),v=parseFloat(g["border"+u+"Width"],10),y=m-e.offsets.popper[f]-_-v;return y=Math.max(Math.min(s[c]-p,y),0),e.arrowElement=i,e.offsets.arrow=(Ve(n={},f,Math.round(y)),Ve(n,h,""),n),e},element:"[x-arrow]"},flip:{order:600,enabled:!0,fn:function(p,m){if(at(p.instance.modifiers,"inner"))return p;if(p.flipped&&p.placement===p.originalPlacement)return p;var g=$e(p.instance.popper,p.instance.reference,m.padding,m.boundariesElement,p.positionFixed),_=p.placement.split("-")[0],v=it(_),y=p.placement.split("-")[1]||"",E=[];switch(m.behavior){case vt:E=[_,v];break;case yt:E=_t(_);break;case Et:E=_t(_,!0);break;default:E=m.behavior}return E.forEach(function(e,t){if(_!==e||E.length===t+1)return p;_=p.placement.split("-")[0],v=it(_);var n,i=p.offsets.popper,r=p.offsets.reference,o=Math.floor,s="left"===_&&o(i.right)>o(r.left)||"right"===_&&o(i.left)<o(r.right)||"top"===_&&o(i.bottom)>o(r.top)||"bottom"===_&&o(i.top)<o(r.bottom),a=o(i.left)<o(g.left),l=o(i.right)>o(g.right),c=o(i.top)<o(g.top),u=o(i.bottom)>o(g.bottom),f="left"===_&&a||"right"===_&&l||"top"===_&&c||"bottom"===_&&u,h=-1!==["top","bottom"].indexOf(_),d=!!m.flipVariations&&(h&&"start"===y&&a||h&&"end"===y&&l||!h&&"start"===y&&c||!h&&"end"===y&&u);(s||f||d)&&(p.flipped=!0,(s||f)&&(_=E[t+1]),d&&(y="end"===(n=y)?"start":"start"===n?"end":n),p.placement=_+(y?"-"+y:""),p.offsets.popper=ze({},p.offsets.popper,rt(p.instance.popper,p.offsets.reference,p.placement)),p=st(p.instance.modifiers,p,"flip"))}),p},behavior:"flip",padding:5,boundariesElement:"viewport"},inner:{order:700,enabled:!1,fn:function(e){var t=e.placement,n=t.split("-")[0],i=e.offsets,r=i.popper,o=i.reference,s=-1!==["left","right"].indexOf(n),a=-1===["top","left"].indexOf(n);return r[s?"left":"top"]=o[n]-(a?r[s?"width":"height"]:0),e.placement=it(t),e.offsets.popper=Ge(r),e}},hide:{order:800,enabled:!0,fn:function(e){if(!pt(e.instance.modifiers,"hide","preventOverflow"))return e;var t=e.offsets.reference,n=ot(e.instance.modifiers,function(e){return"preventOverflow"===e.name}).boundaries;if(t.bottom<n.top||t.left>n.right||t.top>n.bottom||t.right<n.left){if(!0===e.hide)return e;e.hide=!0,e.attributes["x-out-of-boundaries"]=""}else{if(!1===e.hide)return e;e.hide=!1,e.attributes["x-out-of-boundaries"]=!1}return e}},computeStyle:{order:850,enabled:!0,fn:function(e,t){var n=t.x,i=t.y,r=e.offsets.popper,o=ot(e.instance.modifiers,function(e){return"applyStyle"===e.name}).gpuAcceleration;void 0!==o&&console.warn("WARNING: `gpuAcceleration` option moved to `computeStyle` modifier and will not be supported in future versions of Popper.js!");var s=void 0!==o?o:t.gpuAcceleration,a=Je(We(e.instance.popper)),l={position:r.position},c={left:Math.floor(r.left),top:Math.round(r.top),bottom:Math.round(r.bottom),right:Math.floor(r.right)},u="bottom"===n?"top":"bottom",f="right"===i?"left":"right",h=lt("transform"),d=void 0,p=void 0;if(p="bottom"===u?-a.height+c.bottom:c.top,d="right"===f?-a.width+c.right:c.left,s&&h)l[h]="translate3d("+d+"px, "+p+"px, 0)",l[u]=0,l[f]=0,l.willChange="transform";else{var m="bottom"===u?-1:1,g="right"===f?-1:1;l[u]=p*m,l[f]=d*g,l.willChange=u+", "+f}var _={"x-placement":e.placement};return e.attributes=ze({},_,e.attributes),e.styles=ze({},l,e.styles),e.arrowStyles=ze({},e.offsets.arrow,e.arrowStyles),e},gpuAcceleration:!0,x:"bottom",y:"right"},applyStyle:{order:900,enabled:!0,fn:function(e){var t,n;return dt(e.instance.popper,e.styles),t=e.instance.popper,n=e.attributes,Object.keys(n).forEach(function(e){!1!==n[e]?t.setAttribute(e,n[e]):t.removeAttribute(e)}),e.arrowElement&&Object.keys(e.arrowStyles).length&&dt(e.arrowElement,e.arrowStyles),e},onLoad:function(e,t,n,i,r){var o=tt(r,t,e,n.positionFixed),s=et(n.placement,o,t,e,n.modifiers.flip.boundariesElement,n.modifiers.flip.padding);return t.setAttribute("x-placement",s),dt(t,{position:n.positionFixed?"fixed":"absolute"}),n},gpuAcceleration:void 0}}},Ct=function(){function o(e,t){var n=this,i=2<arguments.length&&void 0!==arguments[2]?arguments[2]:{};!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,o),this.scheduleUpdate=function(){return requestAnimationFrame(n.update)},this.update=ke(this.update.bind(this)),this.options=ze({},o.Defaults,i),this.state={isDestroyed:!1,isCreated:!1,scrollParents:[]},this.reference=e&&e.jquery?e[0]:e,this.popper=t&&t.jquery?t[0]:t,this.options.modifiers={},Object.keys(ze({},o.Defaults.modifiers,i.modifiers)).forEach(function(e){n.options.modifiers[e]=ze({},o.Defaults.modifiers[e]||{},i.modifiers?i.modifiers[e]:{})}),this.modifiers=Object.keys(this.options.modifiers).map(function(e){return ze({name:e},n.options.modifiers[e])}).sort(function(e,t){return e.order-t.order}),this.modifiers.forEach(function(e){e.enabled&&xe(e.onLoad)&&e.onLoad(n.reference,n.popper,n.options,e,n.state)}),this.update();var r=this.options.eventsEnabled;r&&this.enableEventListeners(),this.state.eventsEnabled=r}return Ye(o,[{key:"update",value:function(){return function(){if(!this.state.isDestroyed){var e={instance:this,styles:{},arrowStyles:{},attributes:{},flipped:!1,offsets:{}};e.offsets.reference=tt(this.state,this.popper,this.reference,this.options.positionFixed),e.placement=et(this.options.placement,e.offsets.reference,this.popper,this.reference,this.options.modifiers.flip.boundariesElement,this.options.modifiers.flip.padding),e.originalPlacement=e.placement,e.positionFixed=this.options.positionFixed,e.offsets.popper=rt(this.popper,e.offsets.reference,e.placement),e.offsets.popper.position=this.options.positionFixed?"fixed":"absolute",e=st(this.modifiers,e),this.state.isCreated?this.options.onUpdate(e):(this.state.isCreated=!0,this.options.onCreate(e))}}.call(this)}},{key:"destroy",value:function(){return function(){return this.state.isDestroyed=!0,at(this.modifiers,"applyStyle")&&(this.popper.removeAttribute("x-placement"),this.popper.style.position="",this.popper.style.top="",this.popper.style.left="",this.popper.style.right="",this.popper.style.bottom="",this.popper.style.willChange="",this.popper.style[lt("transform")]=""),this.disableEventListeners(),this.options.removeOnDestroy&&this.popper.parentNode.removeChild(this.popper),this}.call(this)}},{key:"enableEventListeners",value:function(){return function(){this.state.eventsEnabled||(this.state=ut(this.reference,this.options,this.state,this.scheduleUpdate))}.call(this)}},{key:"disableEventListeners",value:function(){return ft.call(this)}}]),o}();Ct.Utils=("undefined"!=typeof window?window:global).PopperUtils,Ct.placements=mt,Ct.Defaults=wt;var Tt,St,Dt,At,It,Ot,Nt,kt,xt,Pt,Lt,jt,Ht,Mt,Ft,Wt,Rt,Ut,Bt,qt,Kt,Qt,Yt,Vt,zt,Gt,Jt,Zt,Xt,$t,en,tn,nn,rn,on,sn,an,ln,cn,un,fn,hn,dn,pn,mn,gn,_n,vn,yn,En,bn,wn,Cn,Tn,Sn,Dn,An,In,On,Nn,kn,xn,Pn,Ln,jn,Hn,Mn,Fn,Wn,Rn,Un,Bn,qn,Kn,Qn,Yn,Vn,zn,Gn,Jn,Zn,Xn,$n,ei,ti,ni,ii,ri,oi,si,ai,li,ci,ui,fi,hi,di,pi,mi,gi,_i,vi,yi,Ei,bi,wi,Ci,Ti,Si,Di,Ai,Ii,Oi,Ni,ki,xi,Pi,Li,ji,Hi,Mi,Fi,Wi,Ri,Ui,Bi=(St="dropdown",At="."+(Dt="bs.dropdown"),It=".data-api",Ot=(Tt=t).fn[St],Nt=new RegExp("38|40|27"),kt={HIDE:"hide"+At,HIDDEN:"hidden"+At,SHOW:"show"+At,SHOWN:"shown"+At,CLICK:"click"+At,CLICK_DATA_API:"click"+At+It,KEYDOWN_DATA_API:"keydown"+At+It,KEYUP_DATA_API:"keyup"+At+It},xt="disabled",Pt="show",Lt="dropup",jt="dropright",Ht="dropleft",Mt="dropdown-menu-right",Ft="position-static",Wt='[data-toggle="dropdown"]',Rt=".dropdown form",Ut=".dropdown-menu",Bt=".navbar-nav",qt=".dropdown-menu .dropdown-item:not(.disabled):not(:disabled)",Kt="top-start",Qt="top-end",Yt="bottom-start",Vt="bottom-end",zt="right-start",Gt="left-start",Jt={offset:0,flip:!0,boundary:"scrollParent",reference:"toggle",display:"dynamic"},Zt={offset:"(number|string|function)",flip:"boolean",boundary:"(string|element)",reference:"(string|element)",display:"string"},Xt=function(){function c(e,t){this._element=e,this._popper=null,this._config=this._getConfig(t),this._menu=this._getMenuElement(),this._inNavbar=this._detectNavbar(),this._addEventListeners()}var e=c.prototype;return e.toggle=function(){if(!this._element.disabled&&!Tt(this._element).hasClass(xt)){var e=c._getParentFromElement(this._element),t=Tt(this._menu).hasClass(Pt);if(c._clearMenus(),!t){var n={relatedTarget:this._element},i=Tt.Event(kt.SHOW,n);if(Tt(e).trigger(i),!i.isDefaultPrevented()){if(!this._inNavbar){if("undefined"==typeof Ct)throw new TypeError("Bootstrap dropdown require Popper.js (https://popper.js.org)");var r=this._element;"parent"===this._config.reference?r=e:we.isElement(this._config.reference)&&(r=this._config.reference,"undefined"!=typeof this._config.reference.jquery&&(r=this._config.reference[0])),"scrollParent"!==this._config.boundary&&Tt(e).addClass(Ft),this._popper=new Ct(r,this._menu,this._getPopperConfig())}"ontouchstart"in document.documentElement&&0===Tt(e).closest(Bt).length&&Tt(document.body).children().on("mouseover",null,Tt.noop),this._element.focus(),this._element.setAttribute("aria-expanded",!0),Tt(this._menu).toggleClass(Pt),Tt(e).toggleClass(Pt).trigger(Tt.Event(kt.SHOWN,n))}}}},e.dispose=function(){Tt.removeData(this._element,Dt),Tt(this._element).off(At),this._element=null,(this._menu=null)!==this._popper&&(this._popper.destroy(),this._popper=null)},e.update=function(){this._inNavbar=this._detectNavbar(),null!==this._popper&&this._popper.scheduleUpdate()},e._addEventListeners=function(){var t=this;Tt(this._element).on(kt.CLICK,function(e){e.preventDefault(),e.stopPropagation(),t.toggle()})},e._getConfig=function(e){return e=l({},this.constructor.Default,Tt(this._element).data(),e),we.typeCheckConfig(St,e,this.constructor.DefaultType),e},e._getMenuElement=function(){if(!this._menu){var e=c._getParentFromElement(this._element);e&&(this._menu=e.querySelector(Ut))}return this._menu},e._getPlacement=function(){var e=Tt(this._element.parentNode),t=Yt;return e.hasClass(Lt)?(t=Kt,Tt(this._menu).hasClass(Mt)&&(t=Qt)):e.hasClass(jt)?t=zt:e.hasClass(Ht)?t=Gt:Tt(this._menu).hasClass(Mt)&&(t=Vt),t},e._detectNavbar=function(){return 0<Tt(this._element).closest(".navbar").length},e._getPopperConfig=function(){var t=this,e={};"function"==typeof this._config.offset?e.fn=function(e){return e.offsets=l({},e.offsets,t._config.offset(e.offsets)||{}),e}:e.offset=this._config.offset;var n={placement:this._getPlacement(),modifiers:{offset:e,flip:{enabled:this._config.flip},preventOverflow:{boundariesElement:this._config.boundary}}};return"static"===this._config.display&&(n.modifiers.applyStyle={enabled:!1}),n},c._jQueryInterface=function(t){return this.each(function(){var e=Tt(this).data(Dt);if(e||(e=new c(this,"object"==typeof t?t:null),Tt(this).data(Dt,e)),"string"==typeof t){if("undefined"==typeof e[t])throw new TypeError('No method named "'+t+'"');e[t]()}})},c._clearMenus=function(e){if(!e||3!==e.which&&("keyup"!==e.type||9===e.which))for(var t=[].slice.call(document.querySelectorAll(Wt)),n=0,i=t.length;n<i;n++){var r=c._getParentFromElement(t[n]),o=Tt(t[n]).data(Dt),s={relatedTarget:t[n]};if(e&&"click"===e.type&&(s.clickEvent=e),o){var a=o._menu;if(Tt(r).hasClass(Pt)&&!(e&&("click"===e.type&&/input|textarea/i.test(e.target.tagName)||"keyup"===e.type&&9===e.which)&&Tt.contains(r,e.target))){var l=Tt.Event(kt.HIDE,s);Tt(r).trigger(l),l.isDefaultPrevented()||("ontouchstart"in document.documentElement&&Tt(document.body).children().off("mouseover",null,Tt.noop),t[n].setAttribute("aria-expanded","false"),Tt(a).removeClass(Pt),Tt(r).removeClass(Pt).trigger(Tt.Event(kt.HIDDEN,s)))}}}},c._getParentFromElement=function(e){var t,n=we.getSelectorFromElement(e);return n&&(t=document.querySelector(n)),t||e.parentNode},c._dataApiKeydownHandler=function(e){if((/input|textarea/i.test(e.target.tagName)?!(32===e.which||27!==e.which&&(40!==e.which&&38!==e.which||Tt(e.target).closest(Ut).length)):Nt.test(e.which))&&(e.preventDefault(),e.stopPropagation(),!this.disabled&&!Tt(this).hasClass(xt))){var t=c._getParentFromElement(this),n=Tt(t).hasClass(Pt);if((n||27===e.which&&32===e.which)&&(!n||27!==e.which&&32!==e.which)){var i=[].slice.call(t.querySelectorAll(qt));if(0!==i.length){var r=i.indexOf(e.target);38===e.which&&0<r&&r--,40===e.which&&r<i.length-1&&r++,r<0&&(r=0),i[r].focus()}}else{if(27===e.which){var o=t.querySelector(Wt);Tt(o).trigger("focus")}Tt(this).trigger("click")}}},s(c,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return Jt}},{key:"DefaultType",get:function(){return Zt}}]),c}(),Tt(document).on(kt.KEYDOWN_DATA_API,Wt,Xt._dataApiKeydownHandler).on(kt.KEYDOWN_DATA_API,Ut,Xt._dataApiKeydownHandler).on(kt.CLICK_DATA_API+" "+kt.KEYUP_DATA_API,Xt._clearMenus).on(kt.CLICK_DATA_API,Wt,function(e){e.preventDefault(),e.stopPropagation(),Xt._jQueryInterface.call(Tt(this),"toggle")}).on(kt.CLICK_DATA_API,Rt,function(e){e.stopPropagation()}),Tt.fn[St]=Xt._jQueryInterface,Tt.fn[St].Constructor=Xt,Tt.fn[St].noConflict=function(){return Tt.fn[St]=Ot,Xt._jQueryInterface},Xt),qi=(en="modal",nn="."+(tn="bs.modal"),rn=($t=t).fn[en],on={backdrop:!0,keyboard:!0,focus:!0,show:!0},sn={backdrop:"(boolean|string)",keyboard:"boolean",focus:"boolean",show:"boolean"},an={HIDE:"hide"+nn,HIDDEN:"hidden"+nn,SHOW:"show"+nn,SHOWN:"shown"+nn,FOCUSIN:"focusin"+nn,RESIZE:"resize"+nn,CLICK_DISMISS:"click.dismiss"+nn,KEYDOWN_DISMISS:"keydown.dismiss"+nn,MOUSEUP_DISMISS:"mouseup.dismiss"+nn,MOUSEDOWN_DISMISS:"mousedown.dismiss"+nn,CLICK_DATA_API:"click"+nn+".data-api"},ln="modal-scrollbar-measure",cn="modal-backdrop",un="modal-open",fn="fade",hn="show",dn=".modal-dialog",pn='[data-toggle="modal"]',mn='[data-dismiss="modal"]',gn=".fixed-top, .fixed-bottom, .is-fixed, .sticky-top",_n=".sticky-top",vn=function(){function r(e,t){this._config=this._getConfig(t),this._element=e,this._dialog=e.querySelector(dn),this._backdrop=null,this._isShown=!1,this._isBodyOverflowing=!1,this._ignoreBackdropClick=!1,this._scrollbarWidth=0}var e=r.prototype;return e.toggle=function(e){return this._isShown?this.hide():this.show(e)},e.show=function(e){var t=this;if(!this._isTransitioning&&!this._isShown){$t(this._element).hasClass(fn)&&(this._isTransitioning=!0);var n=$t.Event(an.SHOW,{relatedTarget:e});$t(this._element).trigger(n),this._isShown||n.isDefaultPrevented()||(this._isShown=!0,this._checkScrollbar(),this._setScrollbar(),this._adjustDialog(),$t(document.body).addClass(un),this._setEscapeEvent(),this._setResizeEvent(),$t(this._element).on(an.CLICK_DISMISS,mn,function(e){return t.hide(e)}),$t(this._dialog).on(an.MOUSEDOWN_DISMISS,function(){$t(t._element).one(an.MOUSEUP_DISMISS,function(e){$t(e.target).is(t._element)&&(t._ignoreBackdropClick=!0)})}),this._showBackdrop(function(){return t._showElement(e)}))}},e.hide=function(e){var t=this;if(e&&e.preventDefault(),!this._isTransitioning&&this._isShown){var n=$t.Event(an.HIDE);if($t(this._element).trigger(n),this._isShown&&!n.isDefaultPrevented()){this._isShown=!1;var i=$t(this._element).hasClass(fn);if(i&&(this._isTransitioning=!0),this._setEscapeEvent(),this._setResizeEvent(),$t(document).off(an.FOCUSIN),$t(this._element).removeClass(hn),$t(this._element).off(an.CLICK_DISMISS),$t(this._dialog).off(an.MOUSEDOWN_DISMISS),i){var r=we.getTransitionDurationFromElement(this._element);$t(this._element).one(we.TRANSITION_END,function(e){return t._hideModal(e)}).emulateTransitionEnd(r)}else this._hideModal()}}},e.dispose=function(){$t.removeData(this._element,tn),$t(window,document,this._element,this._backdrop).off(nn),this._config=null,this._element=null,this._dialog=null,this._backdrop=null,this._isShown=null,this._isBodyOverflowing=null,this._ignoreBackdropClick=null,this._scrollbarWidth=null},e.handleUpdate=function(){this._adjustDialog()},e._getConfig=function(e){return e=l({},on,e),we.typeCheckConfig(en,e,sn),e},e._showElement=function(e){var t=this,n=$t(this._element).hasClass(fn);this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE||document.body.appendChild(this._element),this._element.style.display="block",this._element.removeAttribute("aria-hidden"),this._element.scrollTop=0,n&&we.reflow(this._element),$t(this._element).addClass(hn),this._config.focus&&this._enforceFocus();var i=$t.Event(an.SHOWN,{relatedTarget:e}),r=function(){t._config.focus&&t._element.focus(),t._isTransitioning=!1,$t(t._element).trigger(i)};if(n){var o=we.getTransitionDurationFromElement(this._element);$t(this._dialog).one(we.TRANSITION_END,r).emulateTransitionEnd(o)}else r()},e._enforceFocus=function(){var t=this;$t(document).off(an.FOCUSIN).on(an.FOCUSIN,function(e){document!==e.target&&t._element!==e.target&&0===$t(t._element).has(e.target).length&&t._element.focus()})},e._setEscapeEvent=function(){var t=this;this._isShown&&this._config.keyboard?$t(this._element).on(an.KEYDOWN_DISMISS,function(e){27===e.which&&(e.preventDefault(),t.hide())}):this._isShown||$t(this._element).off(an.KEYDOWN_DISMISS)},e._setResizeEvent=function(){var t=this;this._isShown?$t(window).on(an.RESIZE,function(e){return t.handleUpdate(e)}):$t(window).off(an.RESIZE)},e._hideModal=function(){var e=this;this._element.style.display="none",this._element.setAttribute("aria-hidden",!0),this._isTransitioning=!1,this._showBackdrop(function(){$t(document.body).removeClass(un),e._resetAdjustments(),e._resetScrollbar(),$t(e._element).trigger(an.HIDDEN)})},e._removeBackdrop=function(){this._backdrop&&($t(this._backdrop).remove(),this._backdrop=null)},e._showBackdrop=function(e){var t=this,n=$t(this._element).hasClass(fn)?fn:"";if(this._isShown&&this._config.backdrop){if(this._backdrop=document.createElement("div"),this._backdrop.className=cn,n&&this._backdrop.classList.add(n),$t(this._backdrop).appendTo(document.body),$t(this._element).on(an.CLICK_DISMISS,function(e){t._ignoreBackdropClick?t._ignoreBackdropClick=!1:e.target===e.currentTarget&&("static"===t._config.backdrop?t._element.focus():t.hide())}),n&&we.reflow(this._backdrop),$t(this._backdrop).addClass(hn),!e)return;if(!n)return void e();var i=we.getTransitionDurationFromElement(this._backdrop);$t(this._backdrop).one(we.TRANSITION_END,e).emulateTransitionEnd(i)}else if(!this._isShown&&this._backdrop){$t(this._backdrop).removeClass(hn);var r=function(){t._removeBackdrop(),e&&e()};if($t(this._element).hasClass(fn)){var o=we.getTransitionDurationFromElement(this._backdrop);$t(this._backdrop).one(we.TRANSITION_END,r).emulateTransitionEnd(o)}else r()}else e&&e()},e._adjustDialog=function(){var e=this._element.scrollHeight>document.documentElement.clientHeight;!this._isBodyOverflowing&&e&&(this._element.style.paddingLeft=this._scrollbarWidth+"px"),this._isBodyOverflowing&&!e&&(this._element.style.paddingRight=this._scrollbarWidth+"px")},e._resetAdjustments=function(){this._element.style.paddingLeft="",this._element.style.paddingRight=""},e._checkScrollbar=function(){var e=document.body.getBoundingClientRect();this._isBodyOverflowing=e.left+e.right<window.innerWidth,this._scrollbarWidth=this._getScrollbarWidth()},e._setScrollbar=function(){var r=this;if(this._isBodyOverflowing){var e=[].slice.call(document.querySelectorAll(gn)),t=[].slice.call(document.querySelectorAll(_n));$t(e).each(function(e,t){var n=t.style.paddingRight,i=$t(t).css("padding-right");$t(t).data("padding-right",n).css("padding-right",parseFloat(i)+r._scrollbarWidth+"px")}),$t(t).each(function(e,t){var n=t.style.marginRight,i=$t(t).css("margin-right");$t(t).data("margin-right",n).css("margin-right",parseFloat(i)-r._scrollbarWidth+"px")});var n=document.body.style.paddingRight,i=$t(document.body).css("padding-right");$t(document.body).data("padding-right",n).css("padding-right",parseFloat(i)+this._scrollbarWidth+"px")}},e._resetScrollbar=function(){var e=[].slice.call(document.querySelectorAll(gn));$t(e).each(function(e,t){var n=$t(t).data("padding-right");$t(t).removeData("padding-right"),t.style.paddingRight=n||""});var t=[].slice.call(document.querySelectorAll(""+_n));$t(t).each(function(e,t){var n=$t(t).data("margin-right");"undefined"!=typeof n&&$t(t).css("margin-right",n).removeData("margin-right")});var n=$t(document.body).data("padding-right");$t(document.body).removeData("padding-right"),document.body.style.paddingRight=n||""},e._getScrollbarWidth=function(){var e=document.createElement("div");e.className=ln,document.body.appendChild(e);var t=e.getBoundingClientRect().width-e.clientWidth;return document.body.removeChild(e),t},r._jQueryInterface=function(n,i){return this.each(function(){var e=$t(this).data(tn),t=l({},on,$t(this).data(),"object"==typeof n&&n?n:{});if(e||(e=new r(this,t),$t(this).data(tn,e)),"string"==typeof n){if("undefined"==typeof e[n])throw new TypeError('No method named "'+n+'"');e[n](i)}else t.show&&e.show(i)})},s(r,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return on}}]),r}(),$t(document).on(an.CLICK_DATA_API,pn,function(e){var t,n=this,i=we.getSelectorFromElement(this);i&&(t=document.querySelector(i));var r=$t(t).data(tn)?"toggle":l({},$t(t).data(),$t(this).data());"A"!==this.tagName&&"AREA"!==this.tagName||e.preventDefault();var o=$t(t).one(an.SHOW,function(e){e.isDefaultPrevented()||o.one(an.HIDDEN,function(){$t(n).is(":visible")&&n.focus()})});vn._jQueryInterface.call($t(t),r,this)}),$t.fn[en]=vn._jQueryInterface,$t.fn[en].Constructor=vn,$t.fn[en].noConflict=function(){return $t.fn[en]=rn,vn._jQueryInterface},vn),Ki=(En="tooltip",wn="."+(bn="bs.tooltip"),Cn=(yn=t).fn[En],Tn="bs-tooltip",Sn=new RegExp("(^|\\s)"+Tn+"\\S+","g"),In={animation:!0,template:'<div class="tooltip" role="tooltip"><div class="arrow"></div><div class="tooltip-inner"></div></div>',trigger:"hover focus",title:"",delay:0,html:!(An={AUTO:"auto",TOP:"top",RIGHT:"right",BOTTOM:"bottom",LEFT:"left"}),selector:!(Dn={animation:"boolean",template:"string",title:"(string|element|function)",trigger:"string",delay:"(number|object)",html:"boolean",selector:"(string|boolean)",placement:"(string|function)",offset:"(number|string)",container:"(string|element|boolean)",fallbackPlacement:"(string|array)",boundary:"(string|element)"}),placement:"top",offset:0,container:!1,fallbackPlacement:"flip",boundary:"scrollParent"},Nn="out",kn={HIDE:"hide"+wn,HIDDEN:"hidden"+wn,SHOW:(On="show")+wn,SHOWN:"shown"+wn,INSERTED:"inserted"+wn,CLICK:"click"+wn,FOCUSIN:"focusin"+wn,FOCUSOUT:"focusout"+wn,MOUSEENTER:"mouseenter"+wn,MOUSELEAVE:"mouseleave"+wn},xn="fade",Pn="show",Ln=".tooltip-inner",jn=".arrow",Hn="hover",Mn="focus",Fn="click",Wn="manual",Rn=function(){function i(e,t){if("undefined"==typeof Ct)throw new TypeError("Bootstrap tooltips require Popper.js (https://popper.js.org)");this._isEnabled=!0,this._timeout=0,this._hoverState="",this._activeTrigger={},this._popper=null,this.element=e,this.config=this._getConfig(t),this.tip=null,this._setListeners()}var e=i.prototype;return e.enable=function(){this._isEnabled=!0},e.disable=function(){this._isEnabled=!1},e.toggleEnabled=function(){this._isEnabled=!this._isEnabled},e.toggle=function(e){if(this._isEnabled)if(e){var t=this.constructor.DATA_KEY,n=yn(e.currentTarget).data(t);n||(n=new this.constructor(e.currentTarget,this._getDelegateConfig()),yn(e.currentTarget).data(t,n)),n._activeTrigger.click=!n._activeTrigger.click,n._isWithActiveTrigger()?n._enter(null,n):n._leave(null,n)}else{if(yn(this.getTipElement()).hasClass(Pn))return void this._leave(null,this);this._enter(null,this)}},e.dispose=function(){clearTimeout(this._timeout),yn.removeData(this.element,this.constructor.DATA_KEY),yn(this.element).off(this.constructor.EVENT_KEY),yn(this.element).closest(".modal").off("hide.bs.modal"),this.tip&&yn(this.tip).remove(),this._isEnabled=null,this._timeout=null,this._hoverState=null,(this._activeTrigger=null)!==this._popper&&this._popper.destroy(),this._popper=null,this.element=null,this.config=null,this.tip=null},e.show=function(){var t=this;if("none"===yn(this.element).css("display"))throw new Error("Please use show on visible elements");var e=yn.Event(this.constructor.Event.SHOW);if(this.isWithContent()&&this._isEnabled){yn(this.element).trigger(e);var n=yn.contains(this.element.ownerDocument.documentElement,this.element);if(e.isDefaultPrevented()||!n)return;var i=this.getTipElement(),r=we.getUID(this.constructor.NAME);i.setAttribute("id",r),this.element.setAttribute("aria-describedby",r),this.setContent(),this.config.animation&&yn(i).addClass(xn);var o="function"==typeof this.config.placement?this.config.placement.call(this,i,this.element):this.config.placement,s=this._getAttachment(o);this.addAttachmentClass(s);var a=!1===this.config.container?document.body:yn(document).find(this.config.container);yn(i).data(this.constructor.DATA_KEY,this),yn.contains(this.element.ownerDocument.documentElement,this.tip)||yn(i).appendTo(a),yn(this.element).trigger(this.constructor.Event.INSERTED),this._popper=new Ct(this.element,i,{placement:s,modifiers:{offset:{offset:this.config.offset},flip:{behavior:this.config.fallbackPlacement},arrow:{element:jn},preventOverflow:{boundariesElement:this.config.boundary}},onCreate:function(e){e.originalPlacement!==e.placement&&t._handlePopperPlacementChange(e)},onUpdate:function(e){t._handlePopperPlacementChange(e)}}),yn(i).addClass(Pn),"ontouchstart"in document.documentElement&&yn(document.body).children().on("mouseover",null,yn.noop);var l=function(){t.config.animation&&t._fixTransition();var e=t._hoverState;t._hoverState=null,yn(t.element).trigger(t.constructor.Event.SHOWN),e===Nn&&t._leave(null,t)};if(yn(this.tip).hasClass(xn)){var c=we.getTransitionDurationFromElement(this.tip);yn(this.tip).one(we.TRANSITION_END,l).emulateTransitionEnd(c)}else l()}},e.hide=function(e){var t=this,n=this.getTipElement(),i=yn.Event(this.constructor.Event.HIDE),r=function(){t._hoverState!==On&&n.parentNode&&n.parentNode.removeChild(n),t._cleanTipClass(),t.element.removeAttribute("aria-describedby"),yn(t.element).trigger(t.constructor.Event.HIDDEN),null!==t._popper&&t._popper.destroy(),e&&e()};if(yn(this.element).trigger(i),!i.isDefaultPrevented()){if(yn(n).removeClass(Pn),"ontouchstart"in document.documentElement&&yn(document.body).children().off("mouseover",null,yn.noop),this._activeTrigger[Fn]=!1,this._activeTrigger[Mn]=!1,this._activeTrigger[Hn]=!1,yn(this.tip).hasClass(xn)){var o=we.getTransitionDurationFromElement(n);yn(n).one(we.TRANSITION_END,r).emulateTransitionEnd(o)}else r();this._hoverState=""}},e.update=function(){null!==this._popper&&this._popper.scheduleUpdate()},e.isWithContent=function(){return Boolean(this.getTitle())},e.addAttachmentClass=function(e){yn(this.getTipElement()).addClass(Tn+"-"+e)},e.getTipElement=function(){return this.tip=this.tip||yn(this.config.template)[0],this.tip},e.setContent=function(){var e=this.getTipElement();this.setElementContent(yn(e.querySelectorAll(Ln)),this.getTitle()),yn(e).removeClass(xn+" "+Pn)},e.setElementContent=function(e,t){var n=this.config.html;"object"==typeof t&&(t.nodeType||t.jquery)?n?yn(t).parent().is(e)||e.empty().append(t):e.text(yn(t).text()):e[n?"html":"text"](t)},e.getTitle=function(){var e=this.element.getAttribute("data-original-title");return e||(e="function"==typeof this.config.title?this.config.title.call(this.element):this.config.title),e},e._getAttachment=function(e){return An[e.toUpperCase()]},e._setListeners=function(){var i=this;this.config.trigger.split(" ").forEach(function(e){if("click"===e)yn(i.element).on(i.constructor.Event.CLICK,i.config.selector,function(e){return i.toggle(e)});else if(e!==Wn){var t=e===Hn?i.constructor.Event.MOUSEENTER:i.constructor.Event.FOCUSIN,n=e===Hn?i.constructor.Event.MOUSELEAVE:i.constructor.Event.FOCUSOUT;yn(i.element).on(t,i.config.selector,function(e){return i._enter(e)}).on(n,i.config.selector,function(e){return i._leave(e)})}yn(i.element).closest(".modal").on("hide.bs.modal",function(){return i.hide()})}),this.config.selector?this.config=l({},this.config,{trigger:"manual",selector:""}):this._fixTitle()},e._fixTitle=function(){var e=typeof this.element.getAttribute("data-original-title");(this.element.getAttribute("title")||"string"!==e)&&(this.element.setAttribute("data-original-title",this.element.getAttribute("title")||""),this.element.setAttribute("title",""))},e._enter=function(e,t){var n=this.constructor.DATA_KEY;(t=t||yn(e.currentTarget).data(n))||(t=new this.constructor(e.currentTarget,this._getDelegateConfig()),yn(e.currentTarget).data(n,t)),e&&(t._activeTrigger["focusin"===e.type?Mn:Hn]=!0),yn(t.getTipElement()).hasClass(Pn)||t._hoverState===On?t._hoverState=On:(clearTimeout(t._timeout),t._hoverState=On,t.config.delay&&t.config.delay.show?t._timeout=setTimeout(function(){t._hoverState===On&&t.show()},t.config.delay.show):t.show())},e._leave=function(e,t){var n=this.constructor.DATA_KEY;(t=t||yn(e.currentTarget).data(n))||(t=new this.constructor(e.currentTarget,this._getDelegateConfig()),yn(e.currentTarget).data(n,t)),e&&(t._activeTrigger["focusout"===e.type?Mn:Hn]=!1),t._isWithActiveTrigger()||(clearTimeout(t._timeout),t._hoverState=Nn,t.config.delay&&t.config.delay.hide?t._timeout=setTimeout(function(){t._hoverState===Nn&&t.hide()},t.config.delay.hide):t.hide())},e._isWithActiveTrigger=function(){for(var e in this._activeTrigger)if(this._activeTrigger[e])return!0;return!1},e._getConfig=function(e){return"number"==typeof(e=l({},this.constructor.Default,yn(this.element).data(),"object"==typeof e&&e?e:{})).delay&&(e.delay={show:e.delay,hide:e.delay}),"number"==typeof e.title&&(e.title=e.title.toString()),"number"==typeof e.content&&(e.content=e.content.toString()),we.typeCheckConfig(En,e,this.constructor.DefaultType),e},e._getDelegateConfig=function(){var e={};if(this.config)for(var t in this.config)this.constructor.Default[t]!==this.config[t]&&(e[t]=this.config[t]);return e},e._cleanTipClass=function(){var e=yn(this.getTipElement()),t=e.attr("class").match(Sn);null!==t&&t.length&&e.removeClass(t.join(""))},e._handlePopperPlacementChange=function(e){var t=e.instance;this.tip=t.popper,this._cleanTipClass(),this.addAttachmentClass(this._getAttachment(e.placement))},e._fixTransition=function(){var e=this.getTipElement(),t=this.config.animation;null===e.getAttribute("x-placement")&&(yn(e).removeClass(xn),this.config.animation=!1,this.hide(),this.show(),this.config.animation=t)},i._jQueryInterface=function(n){return this.each(function(){var e=yn(this).data(bn),t="object"==typeof n&&n;if((e||!/dispose|hide/.test(n))&&(e||(e=new i(this,t),yn(this).data(bn,e)),"string"==typeof n)){if("undefined"==typeof e[n])throw new TypeError('No method named "'+n+'"');e[n]()}})},s(i,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return In}},{key:"NAME",get:function(){return En}},{key:"DATA_KEY",get:function(){return bn}},{key:"Event",get:function(){return kn}},{key:"EVENT_KEY",get:function(){return wn}},{key:"DefaultType",get:function(){return Dn}}]),i}(),yn.fn[En]=Rn._jQueryInterface,yn.fn[En].Constructor=Rn,yn.fn[En].noConflict=function(){return yn.fn[En]=Cn,Rn._jQueryInterface},Rn),Qi=(Bn="popover",Kn="."+(qn="bs.popover"),Qn=(Un=t).fn[Bn],Yn="bs-popover",Vn=new RegExp("(^|\\s)"+Yn+"\\S+","g"),zn=l({},Ki.Default,{placement:"right",trigger:"click",content:"",template:'<div class="popover" role="tooltip"><div class="arrow"></div><h3 class="popover-header"></h3><div class="popover-body"></div></div>'}),Gn=l({},Ki.DefaultType,{content:"(string|element|function)"}),Jn="fade",Xn=".popover-header",$n=".popover-body",ei={HIDE:"hide"+Kn,HIDDEN:"hidden"+Kn,SHOW:(Zn="show")+Kn,SHOWN:"shown"+Kn,INSERTED:"inserted"+Kn,CLICK:"click"+Kn,FOCUSIN:"focusin"+Kn,FOCUSOUT:"focusout"+Kn,MOUSEENTER:"mouseenter"+Kn,MOUSELEAVE:"mouseleave"+Kn},ti=function(e){var t,n;function i(){return e.apply(this,arguments)||this}n=e,(t=i).prototype=Object.create(n.prototype),(t.prototype.constructor=t).__proto__=n;var r=i.prototype;return r.isWithContent=function(){return this.getTitle()||this._getContent()},r.addAttachmentClass=function(e){Un(this.getTipElement()).addClass(Yn+"-"+e)},r.getTipElement=function(){return this.tip=this.tip||Un(this.config.template)[0],this.tip},r.setContent=function(){var e=Un(this.getTipElement());this.setElementContent(e.find(Xn),this.getTitle());var t=this._getContent();"function"==typeof t&&(t=t.call(this.element)),this.setElementContent(e.find($n),t),e.removeClass(Jn+" "+Zn)},r._getContent=function(){return this.element.getAttribute("data-content")||this.config.content},r._cleanTipClass=function(){var e=Un(this.getTipElement()),t=e.attr("class").match(Vn);null!==t&&0<t.length&&e.removeClass(t.join(""))},i._jQueryInterface=function(n){return this.each(function(){var e=Un(this).data(qn),t="object"==typeof n?n:null;if((e||!/destroy|hide/.test(n))&&(e||(e=new i(this,t),Un(this).data(qn,e)),"string"==typeof n)){if("undefined"==typeof e[n])throw new TypeError('No method named "'+n+'"');e[n]()}})},s(i,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return zn}},{key:"NAME",get:function(){return Bn}},{key:"DATA_KEY",get:function(){return qn}},{key:"Event",get:function(){return ei}},{key:"EVENT_KEY",get:function(){return Kn}},{key:"DefaultType",get:function(){return Gn}}]),i}(Ki),Un.fn[Bn]=ti._jQueryInterface,Un.fn[Bn].Constructor=ti,Un.fn[Bn].noConflict=function(){return Un.fn[Bn]=Qn,ti._jQueryInterface},ti),Yi=(ii="scrollspy",oi="."+(ri="bs.scrollspy"),si=(ni=t).fn[ii],ai={offset:10,method:"auto",target:""},li={offset:"number",method:"string",target:"(string|element)"},ci={ACTIVATE:"activate"+oi,SCROLL:"scroll"+oi,LOAD_DATA_API:"load"+oi+".data-api"},ui="dropdown-item",fi="active",hi='[data-spy="scroll"]',di=".active",pi=".nav, .list-group",mi=".nav-link",gi=".nav-item",_i=".list-group-item",vi=".dropdown",yi=".dropdown-item",Ei=".dropdown-toggle",bi="offset",wi="position",Ci=function(){function n(e,t){var n=this;this._element=e,this._scrollElement="BODY"===e.tagName?window:e,this._config=this._getConfig(t),this._selector=this._config.target+" "+mi+","+this._config.target+" "+_i+","+this._config.target+" "+yi,this._offsets=[],this._targets=[],this._activeTarget=null,this._scrollHeight=0,ni(this._scrollElement).on(ci.SCROLL,function(e){return n._process(e)}),this.refresh(),this._process()}var e=n.prototype;return e.refresh=function(){var t=this,e=this._scrollElement===this._scrollElement.window?bi:wi,r="auto"===this._config.method?e:this._config.method,o=r===wi?this._getScrollTop():0;this._offsets=[],this._targets=[],this._scrollHeight=this._getScrollHeight(),[].slice.call(document.querySelectorAll(this._selector)).map(function(e){var t,n=we.getSelectorFromElement(e);if(n&&(t=document.querySelector(n)),t){var i=t.getBoundingClientRect();if(i.width||i.height)return[ni(t)[r]().top+o,n]}return null}).filter(function(e){return e}).sort(function(e,t){return e[0]-t[0]}).forEach(function(e){t._offsets.push(e[0]),t._targets.push(e[1])})},e.dispose=function(){ni.removeData(this._element,ri),ni(this._scrollElement).off(oi),this._element=null,this._scrollElement=null,this._config=null,this._selector=null,this._offsets=null,this._targets=null,this._activeTarget=null,this._scrollHeight=null},e._getConfig=function(e){if("string"!=typeof(e=l({},ai,"object"==typeof e&&e?e:{})).target){var t=ni(e.target).attr("id");t||(t=we.getUID(ii),ni(e.target).attr("id",t)),e.target="#"+t}return we.typeCheckConfig(ii,e,li),e},e._getScrollTop=function(){return this._scrollElement===window?this._scrollElement.pageYOffset:this._scrollElement.scrollTop},e._getScrollHeight=function(){return this._scrollElement.scrollHeight||Math.max(document.body.scrollHeight,document.documentElement.scrollHeight)},e._getOffsetHeight=function(){return this._scrollElement===window?window.innerHeight:this._scrollElement.getBoundingClientRect().height},e._process=function(){var e=this._getScrollTop()+this._config.offset,t=this._getScrollHeight(),n=this._config.offset+t-this._getOffsetHeight();if(this._scrollHeight!==t&&this.refresh(),n<=e){var i=this._targets[this._targets.length-1];this._activeTarget!==i&&this._activate(i)}else{if(this._activeTarget&&e<this._offsets[0]&&0<this._offsets[0])return this._activeTarget=null,void this._clear();for(var r=this._offsets.length;r--;){this._activeTarget!==this._targets[r]&&e>=this._offsets[r]&&("undefined"==typeof this._offsets[r+1]||e<this._offsets[r+1])&&this._activate(this._targets[r])}}},e._activate=function(t){this._activeTarget=t,this._clear();var e=this._selector.split(",");e=e.map(function(e){return e+'[data-target="'+t+'"],'+e+'[href="'+t+'"]'});var n=ni([].slice.call(document.querySelectorAll(e.join(","))));n.hasClass(ui)?(n.closest(vi).find(Ei).addClass(fi),n.addClass(fi)):(n.addClass(fi),n.parents(pi).prev(mi+", "+_i).addClass(fi),n.parents(pi).prev(gi).children(mi).addClass(fi)),ni(this._scrollElement).trigger(ci.ACTIVATE,{relatedTarget:t})},e._clear=function(){var e=[].slice.call(document.querySelectorAll(this._selector));ni(e).filter(di).removeClass(fi)},n._jQueryInterface=function(t){return this.each(function(){var e=ni(this).data(ri);if(e||(e=new n(this,"object"==typeof t&&t),ni(this).data(ri,e)),"string"==typeof t){if("undefined"==typeof e[t])throw new TypeError('No method named "'+t+'"');e[t]()}})},s(n,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return ai}}]),n}(),ni(window).on(ci.LOAD_DATA_API,function(){for(var e=[].slice.call(document.querySelectorAll(hi)),t=e.length;t--;){var n=ni(e[t]);Ci._jQueryInterface.call(n,n.data())}}),ni.fn[ii]=Ci._jQueryInterface,ni.fn[ii].Constructor=Ci,ni.fn[ii].noConflict=function(){return ni.fn[ii]=si,Ci._jQueryInterface},Ci),Vi=(Di="."+(Si="bs.tab"),Ai=(Ti=t).fn.tab,Ii={HIDE:"hide"+Di,HIDDEN:"hidden"+Di,SHOW:"show"+Di,SHOWN:"shown"+Di,CLICK_DATA_API:"click"+Di+".data-api"},Oi="dropdown-menu",Ni="active",ki="disabled",xi="fade",Pi="show",Li=".dropdown",ji=".nav, .list-group",Hi=".active",Mi="> li > .active",Fi='[data-toggle="tab"], [data-toggle="pill"], [data-toggle="list"]',Wi=".dropdown-toggle",Ri="> .dropdown-menu .active",Ui=function(){function i(e){this._element=e}var e=i.prototype;return e.show=function(){var n=this;if(!(this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE&&Ti(this._element).hasClass(Ni)||Ti(this._element).hasClass(ki))){var e,i,t=Ti(this._element).closest(ji)[0],r=we.getSelectorFromElement(this._element);if(t){var o="UL"===t.nodeName?Mi:Hi;i=(i=Ti.makeArray(Ti(t).find(o)))[i.length-1]}var s=Ti.Event(Ii.HIDE,{relatedTarget:this._element}),a=Ti.Event(Ii.SHOW,{relatedTarget:i});if(i&&Ti(i).trigger(s),Ti(this._element).trigger(a),!a.isDefaultPrevented()&&!s.isDefaultPrevented()){r&&(e=document.querySelector(r)),this._activate(this._element,t);var l=function(){var e=Ti.Event(Ii.HIDDEN,{relatedTarget:n._element}),t=Ti.Event(Ii.SHOWN,{relatedTarget:i});Ti(i).trigger(e),Ti(n._element).trigger(t)};e?this._activate(e,e.parentNode,l):l()}}},e.dispose=function(){Ti.removeData(this._element,Si),this._element=null},e._activate=function(e,t,n){var i=this,r=("UL"===t.nodeName?Ti(t).find(Mi):Ti(t).children(Hi))[0],o=n&&r&&Ti(r).hasClass(xi),s=function(){return i._transitionComplete(e,r,n)};if(r&&o){var a=we.getTransitionDurationFromElement(r);Ti(r).one(we.TRANSITION_END,s).emulateTransitionEnd(a)}else s()},e._transitionComplete=function(e,t,n){if(t){Ti(t).removeClass(Pi+" "+Ni);var i=Ti(t.parentNode).find(Ri)[0];i&&Ti(i).removeClass(Ni),"tab"===t.getAttribute("role")&&t.setAttribute("aria-selected",!1)}if(Ti(e).addClass(Ni),"tab"===e.getAttribute("role")&&e.setAttribute("aria-selected",!0),we.reflow(e),Ti(e).addClass(Pi),e.parentNode&&Ti(e.parentNode).hasClass(Oi)){var r=Ti(e).closest(Li)[0];if(r){var o=[].slice.call(r.querySelectorAll(Wi));Ti(o).addClass(Ni)}e.setAttribute("aria-expanded",!0)}n&&n()},i._jQueryInterface=function(n){return this.each(function(){var e=Ti(this),t=e.data(Si);if(t||(t=new i(this),e.data(Si,t)),"string"==typeof n){if("undefined"==typeof t[n])throw new TypeError('No method named "'+n+'"');t[n]()}})},s(i,null,[{key:"VERSION",get:function(){return"4.1.3"}}]),i}(),Ti(document).on(Ii.CLICK_DATA_API,Fi,function(e){e.preventDefault(),Ui._jQueryInterface.call(Ti(this),"show")}),Ti.fn.tab=Ui._jQueryInterface,Ti.fn.tab.Constructor=Ui,Ti.fn.tab.noConflict=function(){return Ti.fn.tab=Ai,Ui._jQueryInterface},Ui);!function(e){if("undefined"==typeof e)throw new TypeError("Bootstrap's JavaScript requires jQuery. jQuery must be included before Bootstrap's JavaScript.");var t=e.fn.jquery.split(" ")[0].split(".");if(t[0]<2&&t[1]<9||1===t[0]&&9===t[1]&&t[2]<1||4<=t[0])throw new Error("Bootstrap's JavaScript requires at least jQuery v1.9.1 but less than v4.0.0")}(t),e.Util=we,e.Alert=Ce,e.Button=Te,e.Carousel=Se,e.Collapse=De,e.Dropdown=Bi,e.Modal=qi,e.Popover=Qi,e.Scrollspy=Yi,e.Tab=Vi,e.Tooltip=Ki,Object.defineProperty(e,"__esModule",{value:!0})});
//# sourceMappingURL=bootstrap.bundle.min.js.map
|
scheduler-front
|
/scheduler_front-0.0.11-py3-none-any.whl/scheduler_front/static/vendor/bootstrap/js/bootstrap.bundle.min.js
|
bootstrap.bundle.min.js
|
!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?t(exports,require("jquery")):"function"==typeof define&&define.amd?define(["exports","jquery"],t):t(e.bootstrap={},e.jQuery)}(this,function(e,t){"use strict";function i(e,t){for(var n=0;n<t.length;n++){var i=t[n];i.enumerable=i.enumerable||!1,i.configurable=!0,"value"in i&&(i.writable=!0),Object.defineProperty(e,i.key,i)}}function s(e,t,n){return t&&i(e.prototype,t),n&&i(e,n),e}function l(r){for(var e=1;e<arguments.length;e++){var o=null!=arguments[e]?arguments[e]:{},t=Object.keys(o);"function"==typeof Object.getOwnPropertySymbols&&(t=t.concat(Object.getOwnPropertySymbols(o).filter(function(e){return Object.getOwnPropertyDescriptor(o,e).enumerable}))),t.forEach(function(e){var t,n,i;t=r,i=o[n=e],n in t?Object.defineProperty(t,n,{value:i,enumerable:!0,configurable:!0,writable:!0}):t[n]=i})}return r}for(var r,n,o,a,c,u,f,h,d,p,m,g,_,v,y,E,b,w,C,T,S,D,A,I,O,N,k,x,P,L,j,H,M,F,W,R,U,B,q,K,Q,Y,V,z,G,J,Z,X,$,ee,te,ne,ie,re,oe,se,ae,le,ce,ue,fe,he,de,pe,me,ge,_e,ve,ye,Ee,be,we=function(i){var t="transitionend";function e(e){var t=this,n=!1;return i(this).one(l.TRANSITION_END,function(){n=!0}),setTimeout(function(){n||l.triggerTransitionEnd(t)},e),this}var l={TRANSITION_END:"bsTransitionEnd",getUID:function(e){for(;e+=~~(1e6*Math.random()),document.getElementById(e););return e},getSelectorFromElement:function(e){var t=e.getAttribute("data-target");t&&"#"!==t||(t=e.getAttribute("href")||"");try{return document.querySelector(t)?t:null}catch(e){return null}},getTransitionDurationFromElement:function(e){if(!e)return 0;var t=i(e).css("transition-duration");return parseFloat(t)?(t=t.split(",")[0],1e3*parseFloat(t)):0},reflow:function(e){return e.offsetHeight},triggerTransitionEnd:function(e){i(e).trigger(t)},supportsTransitionEnd:function(){return Boolean(t)},isElement:function(e){return(e[0]||e).nodeType},typeCheckConfig:function(e,t,n){for(var i in n)if(Object.prototype.hasOwnProperty.call(n,i)){var r=n[i],o=t[i],s=o&&l.isElement(o)?"element":(a=o,{}.toString.call(a).match(/\s([a-z]+)/i)[1].toLowerCase());if(!new RegExp(r).test(s))throw new Error(e.toUpperCase()+': Option "'+i+'" provided type "'+s+'" but expected type "'+r+'".')}var a}};return i.fn.emulateTransitionEnd=e,i.event.special[l.TRANSITION_END]={bindType:t,delegateType:t,handle:function(e){if(i(e.target).is(this))return e.handleObj.handler.apply(this,arguments)}},l}(t=t&&t.hasOwnProperty("default")?t.default:t),Ce=(n="alert",a="."+(o="bs.alert"),c=(r=t).fn[n],u={CLOSE:"close"+a,CLOSED:"closed"+a,CLICK_DATA_API:"click"+a+".data-api"},f="alert",h="fade",d="show",p=function(){function i(e){this._element=e}var e=i.prototype;return e.close=function(e){var t=this._element;e&&(t=this._getRootElement(e)),this._triggerCloseEvent(t).isDefaultPrevented()||this._removeElement(t)},e.dispose=function(){r.removeData(this._element,o),this._element=null},e._getRootElement=function(e){var t=we.getSelectorFromElement(e),n=!1;return t&&(n=document.querySelector(t)),n||(n=r(e).closest("."+f)[0]),n},e._triggerCloseEvent=function(e){var t=r.Event(u.CLOSE);return r(e).trigger(t),t},e._removeElement=function(t){var n=this;if(r(t).removeClass(d),r(t).hasClass(h)){var e=we.getTransitionDurationFromElement(t);r(t).one(we.TRANSITION_END,function(e){return n._destroyElement(t,e)}).emulateTransitionEnd(e)}else this._destroyElement(t)},e._destroyElement=function(e){r(e).detach().trigger(u.CLOSED).remove()},i._jQueryInterface=function(n){return this.each(function(){var e=r(this),t=e.data(o);t||(t=new i(this),e.data(o,t)),"close"===n&&t[n](this)})},i._handleDismiss=function(t){return function(e){e&&e.preventDefault(),t.close(this)}},s(i,null,[{key:"VERSION",get:function(){return"4.1.3"}}]),i}(),r(document).on(u.CLICK_DATA_API,'[data-dismiss="alert"]',p._handleDismiss(new p)),r.fn[n]=p._jQueryInterface,r.fn[n].Constructor=p,r.fn[n].noConflict=function(){return r.fn[n]=c,p._jQueryInterface},p),Te=(g="button",v="."+(_="bs.button"),y=".data-api",E=(m=t).fn[g],b="active",w="btn",T='[data-toggle^="button"]',S='[data-toggle="buttons"]',D="input",A=".active",I=".btn",O={CLICK_DATA_API:"click"+v+y,FOCUS_BLUR_DATA_API:(C="focus")+v+y+" blur"+v+y},N=function(){function n(e){this._element=e}var e=n.prototype;return e.toggle=function(){var e=!0,t=!0,n=m(this._element).closest(S)[0];if(n){var i=this._element.querySelector(D);if(i){if("radio"===i.type)if(i.checked&&this._element.classList.contains(b))e=!1;else{var r=n.querySelector(A);r&&m(r).removeClass(b)}if(e){if(i.hasAttribute("disabled")||n.hasAttribute("disabled")||i.classList.contains("disabled")||n.classList.contains("disabled"))return;i.checked=!this._element.classList.contains(b),m(i).trigger("change")}i.focus(),t=!1}}t&&this._element.setAttribute("aria-pressed",!this._element.classList.contains(b)),e&&m(this._element).toggleClass(b)},e.dispose=function(){m.removeData(this._element,_),this._element=null},n._jQueryInterface=function(t){return this.each(function(){var e=m(this).data(_);e||(e=new n(this),m(this).data(_,e)),"toggle"===t&&e[t]()})},s(n,null,[{key:"VERSION",get:function(){return"4.1.3"}}]),n}(),m(document).on(O.CLICK_DATA_API,T,function(e){e.preventDefault();var t=e.target;m(t).hasClass(w)||(t=m(t).closest(I)),N._jQueryInterface.call(m(t),"toggle")}).on(O.FOCUS_BLUR_DATA_API,T,function(e){var t=m(e.target).closest(I)[0];m(t).toggleClass(C,/^focus(in)?$/.test(e.type))}),m.fn[g]=N._jQueryInterface,m.fn[g].Constructor=N,m.fn[g].noConflict=function(){return m.fn[g]=E,N._jQueryInterface},N),Se=(x="carousel",L="."+(P="bs.carousel"),j=".data-api",H=(k=t).fn[x],M={interval:5e3,keyboard:!0,slide:!1,pause:"hover",wrap:!0},F={interval:"(number|boolean)",keyboard:"boolean",slide:"(boolean|string)",pause:"(string|boolean)",wrap:"boolean"},W="next",R="prev",U="left",B="right",q={SLIDE:"slide"+L,SLID:"slid"+L,KEYDOWN:"keydown"+L,MOUSEENTER:"mouseenter"+L,MOUSELEAVE:"mouseleave"+L,TOUCHEND:"touchend"+L,LOAD_DATA_API:"load"+L+j,CLICK_DATA_API:"click"+L+j},K="carousel",Q="active",Y="slide",V="carousel-item-right",z="carousel-item-left",G="carousel-item-next",J="carousel-item-prev",Z=".active",X=".active.carousel-item",$=".carousel-item",ee=".carousel-item-next, .carousel-item-prev",te=".carousel-indicators",ne="[data-slide], [data-slide-to]",ie='[data-ride="carousel"]',re=function(){function o(e,t){this._items=null,this._interval=null,this._activeElement=null,this._isPaused=!1,this._isSliding=!1,this.touchTimeout=null,this._config=this._getConfig(t),this._element=k(e)[0],this._indicatorsElement=this._element.querySelector(te),this._addEventListeners()}var e=o.prototype;return e.next=function(){this._isSliding||this._slide(W)},e.nextWhenVisible=function(){!document.hidden&&k(this._element).is(":visible")&&"hidden"!==k(this._element).css("visibility")&&this.next()},e.prev=function(){this._isSliding||this._slide(R)},e.pause=function(e){e||(this._isPaused=!0),this._element.querySelector(ee)&&(we.triggerTransitionEnd(this._element),this.cycle(!0)),clearInterval(this._interval),this._interval=null},e.cycle=function(e){e||(this._isPaused=!1),this._interval&&(clearInterval(this._interval),this._interval=null),this._config.interval&&!this._isPaused&&(this._interval=setInterval((document.visibilityState?this.nextWhenVisible:this.next).bind(this),this._config.interval))},e.to=function(e){var t=this;this._activeElement=this._element.querySelector(X);var n=this._getItemIndex(this._activeElement);if(!(e>this._items.length-1||e<0))if(this._isSliding)k(this._element).one(q.SLID,function(){return t.to(e)});else{if(n===e)return this.pause(),void this.cycle();var i=n<e?W:R;this._slide(i,this._items[e])}},e.dispose=function(){k(this._element).off(L),k.removeData(this._element,P),this._items=null,this._config=null,this._element=null,this._interval=null,this._isPaused=null,this._isSliding=null,this._activeElement=null,this._indicatorsElement=null},e._getConfig=function(e){return e=l({},M,e),we.typeCheckConfig(x,e,F),e},e._addEventListeners=function(){var t=this;this._config.keyboard&&k(this._element).on(q.KEYDOWN,function(e){return t._keydown(e)}),"hover"===this._config.pause&&(k(this._element).on(q.MOUSEENTER,function(e){return t.pause(e)}).on(q.MOUSELEAVE,function(e){return t.cycle(e)}),"ontouchstart"in document.documentElement&&k(this._element).on(q.TOUCHEND,function(){t.pause(),t.touchTimeout&&clearTimeout(t.touchTimeout),t.touchTimeout=setTimeout(function(e){return t.cycle(e)},500+t._config.interval)}))},e._keydown=function(e){if(!/input|textarea/i.test(e.target.tagName))switch(e.which){case 37:e.preventDefault(),this.prev();break;case 39:e.preventDefault(),this.next()}},e._getItemIndex=function(e){return this._items=e&&e.parentNode?[].slice.call(e.parentNode.querySelectorAll($)):[],this._items.indexOf(e)},e._getItemByDirection=function(e,t){var n=e===W,i=e===R,r=this._getItemIndex(t),o=this._items.length-1;if((i&&0===r||n&&r===o)&&!this._config.wrap)return t;var s=(r+(e===R?-1:1))%this._items.length;return-1===s?this._items[this._items.length-1]:this._items[s]},e._triggerSlideEvent=function(e,t){var n=this._getItemIndex(e),i=this._getItemIndex(this._element.querySelector(X)),r=k.Event(q.SLIDE,{relatedTarget:e,direction:t,from:i,to:n});return k(this._element).trigger(r),r},e._setActiveIndicatorElement=function(e){if(this._indicatorsElement){var t=[].slice.call(this._indicatorsElement.querySelectorAll(Z));k(t).removeClass(Q);var n=this._indicatorsElement.children[this._getItemIndex(e)];n&&k(n).addClass(Q)}},e._slide=function(e,t){var n,i,r,o=this,s=this._element.querySelector(X),a=this._getItemIndex(s),l=t||s&&this._getItemByDirection(e,s),c=this._getItemIndex(l),u=Boolean(this._interval);if(e===W?(n=z,i=G,r=U):(n=V,i=J,r=B),l&&k(l).hasClass(Q))this._isSliding=!1;else if(!this._triggerSlideEvent(l,r).isDefaultPrevented()&&s&&l){this._isSliding=!0,u&&this.pause(),this._setActiveIndicatorElement(l);var f=k.Event(q.SLID,{relatedTarget:l,direction:r,from:a,to:c});if(k(this._element).hasClass(Y)){k(l).addClass(i),we.reflow(l),k(s).addClass(n),k(l).addClass(n);var h=we.getTransitionDurationFromElement(s);k(s).one(we.TRANSITION_END,function(){k(l).removeClass(n+" "+i).addClass(Q),k(s).removeClass(Q+" "+i+" "+n),o._isSliding=!1,setTimeout(function(){return k(o._element).trigger(f)},0)}).emulateTransitionEnd(h)}else k(s).removeClass(Q),k(l).addClass(Q),this._isSliding=!1,k(this._element).trigger(f);u&&this.cycle()}},o._jQueryInterface=function(i){return this.each(function(){var e=k(this).data(P),t=l({},M,k(this).data());"object"==typeof i&&(t=l({},t,i));var n="string"==typeof i?i:t.slide;if(e||(e=new o(this,t),k(this).data(P,e)),"number"==typeof i)e.to(i);else if("string"==typeof n){if("undefined"==typeof e[n])throw new TypeError('No method named "'+n+'"');e[n]()}else t.interval&&(e.pause(),e.cycle())})},o._dataApiClickHandler=function(e){var t=we.getSelectorFromElement(this);if(t){var n=k(t)[0];if(n&&k(n).hasClass(K)){var i=l({},k(n).data(),k(this).data()),r=this.getAttribute("data-slide-to");r&&(i.interval=!1),o._jQueryInterface.call(k(n),i),r&&k(n).data(P).to(r),e.preventDefault()}}},s(o,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return M}}]),o}(),k(document).on(q.CLICK_DATA_API,ne,re._dataApiClickHandler),k(window).on(q.LOAD_DATA_API,function(){for(var e=[].slice.call(document.querySelectorAll(ie)),t=0,n=e.length;t<n;t++){var i=k(e[t]);re._jQueryInterface.call(i,i.data())}}),k.fn[x]=re._jQueryInterface,k.fn[x].Constructor=re,k.fn[x].noConflict=function(){return k.fn[x]=H,re._jQueryInterface},re),De=(se="collapse",le="."+(ae="bs.collapse"),ce=(oe=t).fn[se],ue={toggle:!0,parent:""},fe={toggle:"boolean",parent:"(string|element)"},he={SHOW:"show"+le,SHOWN:"shown"+le,HIDE:"hide"+le,HIDDEN:"hidden"+le,CLICK_DATA_API:"click"+le+".data-api"},de="show",pe="collapse",me="collapsing",ge="collapsed",_e="width",ve="height",ye=".show, .collapsing",Ee='[data-toggle="collapse"]',be=function(){function a(t,e){this._isTransitioning=!1,this._element=t,this._config=this._getConfig(e),this._triggerArray=oe.makeArray(document.querySelectorAll('[data-toggle="collapse"][href="#'+t.id+'"],[data-toggle="collapse"][data-target="#'+t.id+'"]'));for(var n=[].slice.call(document.querySelectorAll(Ee)),i=0,r=n.length;i<r;i++){var o=n[i],s=we.getSelectorFromElement(o),a=[].slice.call(document.querySelectorAll(s)).filter(function(e){return e===t});null!==s&&0<a.length&&(this._selector=s,this._triggerArray.push(o))}this._parent=this._config.parent?this._getParent():null,this._config.parent||this._addAriaAndCollapsedClass(this._element,this._triggerArray),this._config.toggle&&this.toggle()}var e=a.prototype;return e.toggle=function(){oe(this._element).hasClass(de)?this.hide():this.show()},e.show=function(){var e,t,n=this;if(!this._isTransitioning&&!oe(this._element).hasClass(de)&&(this._parent&&0===(e=[].slice.call(this._parent.querySelectorAll(ye)).filter(function(e){return e.getAttribute("data-parent")===n._config.parent})).length&&(e=null),!(e&&(t=oe(e).not(this._selector).data(ae))&&t._isTransitioning))){var i=oe.Event(he.SHOW);if(oe(this._element).trigger(i),!i.isDefaultPrevented()){e&&(a._jQueryInterface.call(oe(e).not(this._selector),"hide"),t||oe(e).data(ae,null));var r=this._getDimension();oe(this._element).removeClass(pe).addClass(me),this._element.style[r]=0,this._triggerArray.length&&oe(this._triggerArray).removeClass(ge).attr("aria-expanded",!0),this.setTransitioning(!0);var o="scroll"+(r[0].toUpperCase()+r.slice(1)),s=we.getTransitionDurationFromElement(this._element);oe(this._element).one(we.TRANSITION_END,function(){oe(n._element).removeClass(me).addClass(pe).addClass(de),n._element.style[r]="",n.setTransitioning(!1),oe(n._element).trigger(he.SHOWN)}).emulateTransitionEnd(s),this._element.style[r]=this._element[o]+"px"}}},e.hide=function(){var e=this;if(!this._isTransitioning&&oe(this._element).hasClass(de)){var t=oe.Event(he.HIDE);if(oe(this._element).trigger(t),!t.isDefaultPrevented()){var n=this._getDimension();this._element.style[n]=this._element.getBoundingClientRect()[n]+"px",we.reflow(this._element),oe(this._element).addClass(me).removeClass(pe).removeClass(de);var i=this._triggerArray.length;if(0<i)for(var r=0;r<i;r++){var o=this._triggerArray[r],s=we.getSelectorFromElement(o);if(null!==s)oe([].slice.call(document.querySelectorAll(s))).hasClass(de)||oe(o).addClass(ge).attr("aria-expanded",!1)}this.setTransitioning(!0);this._element.style[n]="";var a=we.getTransitionDurationFromElement(this._element);oe(this._element).one(we.TRANSITION_END,function(){e.setTransitioning(!1),oe(e._element).removeClass(me).addClass(pe).trigger(he.HIDDEN)}).emulateTransitionEnd(a)}}},e.setTransitioning=function(e){this._isTransitioning=e},e.dispose=function(){oe.removeData(this._element,ae),this._config=null,this._parent=null,this._element=null,this._triggerArray=null,this._isTransitioning=null},e._getConfig=function(e){return(e=l({},ue,e)).toggle=Boolean(e.toggle),we.typeCheckConfig(se,e,fe),e},e._getDimension=function(){return oe(this._element).hasClass(_e)?_e:ve},e._getParent=function(){var n=this,e=null;we.isElement(this._config.parent)?(e=this._config.parent,"undefined"!=typeof this._config.parent.jquery&&(e=this._config.parent[0])):e=document.querySelector(this._config.parent);var t='[data-toggle="collapse"][data-parent="'+this._config.parent+'"]',i=[].slice.call(e.querySelectorAll(t));return oe(i).each(function(e,t){n._addAriaAndCollapsedClass(a._getTargetFromElement(t),[t])}),e},e._addAriaAndCollapsedClass=function(e,t){if(e){var n=oe(e).hasClass(de);t.length&&oe(t).toggleClass(ge,!n).attr("aria-expanded",n)}},a._getTargetFromElement=function(e){var t=we.getSelectorFromElement(e);return t?document.querySelector(t):null},a._jQueryInterface=function(i){return this.each(function(){var e=oe(this),t=e.data(ae),n=l({},ue,e.data(),"object"==typeof i&&i?i:{});if(!t&&n.toggle&&/show|hide/.test(i)&&(n.toggle=!1),t||(t=new a(this,n),e.data(ae,t)),"string"==typeof i){if("undefined"==typeof t[i])throw new TypeError('No method named "'+i+'"');t[i]()}})},s(a,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return ue}}]),a}(),oe(document).on(he.CLICK_DATA_API,Ee,function(e){"A"===e.currentTarget.tagName&&e.preventDefault();var n=oe(this),t=we.getSelectorFromElement(this),i=[].slice.call(document.querySelectorAll(t));oe(i).each(function(){var e=oe(this),t=e.data(ae)?"toggle":n.data();be._jQueryInterface.call(e,t)})}),oe.fn[se]=be._jQueryInterface,oe.fn[se].Constructor=be,oe.fn[se].noConflict=function(){return oe.fn[se]=ce,be._jQueryInterface},be),Ae="undefined"!=typeof window&&"undefined"!=typeof document,Ie=["Edge","Trident","Firefox"],Oe=0,Ne=0;Ne<Ie.length;Ne+=1)if(Ae&&0<=navigator.userAgent.indexOf(Ie[Ne])){Oe=1;break}var ke=Ae&&window.Promise?function(e){var t=!1;return function(){t||(t=!0,window.Promise.resolve().then(function(){t=!1,e()}))}}:function(e){var t=!1;return function(){t||(t=!0,setTimeout(function(){t=!1,e()},Oe))}};function xe(e){return e&&"[object Function]"==={}.toString.call(e)}function Pe(e,t){if(1!==e.nodeType)return[];var n=getComputedStyle(e,null);return t?n[t]:n}function Le(e){return"HTML"===e.nodeName?e:e.parentNode||e.host}function je(e){if(!e)return document.body;switch(e.nodeName){case"HTML":case"BODY":return e.ownerDocument.body;case"#document":return e.body}var t=Pe(e),n=t.overflow,i=t.overflowX,r=t.overflowY;return/(auto|scroll|overlay)/.test(n+r+i)?e:je(Le(e))}var He=Ae&&!(!window.MSInputMethodContext||!document.documentMode),Me=Ae&&/MSIE 10/.test(navigator.userAgent);function Fe(e){return 11===e?He:10===e?Me:He||Me}function We(e){if(!e)return document.documentElement;for(var t=Fe(10)?document.body:null,n=e.offsetParent;n===t&&e.nextElementSibling;)n=(e=e.nextElementSibling).offsetParent;var i=n&&n.nodeName;return i&&"BODY"!==i&&"HTML"!==i?-1!==["TD","TABLE"].indexOf(n.nodeName)&&"static"===Pe(n,"position")?We(n):n:e?e.ownerDocument.documentElement:document.documentElement}function Re(e){return null!==e.parentNode?Re(e.parentNode):e}function Ue(e,t){if(!(e&&e.nodeType&&t&&t.nodeType))return document.documentElement;var n=e.compareDocumentPosition(t)&Node.DOCUMENT_POSITION_FOLLOWING,i=n?e:t,r=n?t:e,o=document.createRange();o.setStart(i,0),o.setEnd(r,0);var s,a,l=o.commonAncestorContainer;if(e!==l&&t!==l||i.contains(r))return"BODY"===(a=(s=l).nodeName)||"HTML"!==a&&We(s.firstElementChild)!==s?We(l):l;var c=Re(e);return c.host?Ue(c.host,t):Ue(e,Re(t).host)}function Be(e){var t="top"===(1<arguments.length&&void 0!==arguments[1]?arguments[1]:"top")?"scrollTop":"scrollLeft",n=e.nodeName;if("BODY"===n||"HTML"===n){var i=e.ownerDocument.documentElement;return(e.ownerDocument.scrollingElement||i)[t]}return e[t]}function qe(e,t){var n="x"===t?"Left":"Top",i="Left"===n?"Right":"Bottom";return parseFloat(e["border"+n+"Width"],10)+parseFloat(e["border"+i+"Width"],10)}function Ke(e,t,n,i){return Math.max(t["offset"+e],t["scroll"+e],n["client"+e],n["offset"+e],n["scroll"+e],Fe(10)?n["offset"+e]+i["margin"+("Height"===e?"Top":"Left")]+i["margin"+("Height"===e?"Bottom":"Right")]:0)}function Qe(){var e=document.body,t=document.documentElement,n=Fe(10)&&getComputedStyle(t);return{height:Ke("Height",e,t,n),width:Ke("Width",e,t,n)}}var Ye=function(){function i(e,t){for(var n=0;n<t.length;n++){var i=t[n];i.enumerable=i.enumerable||!1,i.configurable=!0,"value"in i&&(i.writable=!0),Object.defineProperty(e,i.key,i)}}return function(e,t,n){return t&&i(e.prototype,t),n&&i(e,n),e}}(),Ve=function(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e},ze=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var i in n)Object.prototype.hasOwnProperty.call(n,i)&&(e[i]=n[i])}return e};function Ge(e){return ze({},e,{right:e.left+e.width,bottom:e.top+e.height})}function Je(e){var t={};try{if(Fe(10)){t=e.getBoundingClientRect();var n=Be(e,"top"),i=Be(e,"left");t.top+=n,t.left+=i,t.bottom+=n,t.right+=i}else t=e.getBoundingClientRect()}catch(e){}var r={left:t.left,top:t.top,width:t.right-t.left,height:t.bottom-t.top},o="HTML"===e.nodeName?Qe():{},s=o.width||e.clientWidth||r.right-r.left,a=o.height||e.clientHeight||r.bottom-r.top,l=e.offsetWidth-s,c=e.offsetHeight-a;if(l||c){var u=Pe(e);l-=qe(u,"x"),c-=qe(u,"y"),r.width-=l,r.height-=c}return Ge(r)}function Ze(e,t){var n=2<arguments.length&&void 0!==arguments[2]&&arguments[2],i=Fe(10),r="HTML"===t.nodeName,o=Je(e),s=Je(t),a=je(e),l=Pe(t),c=parseFloat(l.borderTopWidth,10),u=parseFloat(l.borderLeftWidth,10);n&&"HTML"===t.nodeName&&(s.top=Math.max(s.top,0),s.left=Math.max(s.left,0));var f=Ge({top:o.top-s.top-c,left:o.left-s.left-u,width:o.width,height:o.height});if(f.marginTop=0,f.marginLeft=0,!i&&r){var h=parseFloat(l.marginTop,10),d=parseFloat(l.marginLeft,10);f.top-=c-h,f.bottom-=c-h,f.left-=u-d,f.right-=u-d,f.marginTop=h,f.marginLeft=d}return(i&&!n?t.contains(a):t===a&&"BODY"!==a.nodeName)&&(f=function(e,t){var n=2<arguments.length&&void 0!==arguments[2]&&arguments[2],i=Be(t,"top"),r=Be(t,"left"),o=n?-1:1;return e.top+=i*o,e.bottom+=i*o,e.left+=r*o,e.right+=r*o,e}(f,t)),f}function Xe(e){if(!e||!e.parentElement||Fe())return document.documentElement;for(var t=e.parentElement;t&&"none"===Pe(t,"transform");)t=t.parentElement;return t||document.documentElement}function $e(e,t,n,i){var r=4<arguments.length&&void 0!==arguments[4]&&arguments[4],o={top:0,left:0},s=r?Xe(e):Ue(e,t);if("viewport"===i)o=function(e){var t=1<arguments.length&&void 0!==arguments[1]&&arguments[1],n=e.ownerDocument.documentElement,i=Ze(e,n),r=Math.max(n.clientWidth,window.innerWidth||0),o=Math.max(n.clientHeight,window.innerHeight||0),s=t?0:Be(n),a=t?0:Be(n,"left");return Ge({top:s-i.top+i.marginTop,left:a-i.left+i.marginLeft,width:r,height:o})}(s,r);else{var a=void 0;"scrollParent"===i?"BODY"===(a=je(Le(t))).nodeName&&(a=e.ownerDocument.documentElement):a="window"===i?e.ownerDocument.documentElement:i;var l=Ze(a,s,r);if("HTML"!==a.nodeName||function e(t){var n=t.nodeName;return"BODY"!==n&&"HTML"!==n&&("fixed"===Pe(t,"position")||e(Le(t)))}(s))o=l;else{var c=Qe(),u=c.height,f=c.width;o.top+=l.top-l.marginTop,o.bottom=u+l.top,o.left+=l.left-l.marginLeft,o.right=f+l.left}}return o.left+=n,o.top+=n,o.right-=n,o.bottom-=n,o}function et(e,t,i,n,r){var o=5<arguments.length&&void 0!==arguments[5]?arguments[5]:0;if(-1===e.indexOf("auto"))return e;var s=$e(i,n,o,r),a={top:{width:s.width,height:t.top-s.top},right:{width:s.right-t.right,height:s.height},bottom:{width:s.width,height:s.bottom-t.bottom},left:{width:t.left-s.left,height:s.height}},l=Object.keys(a).map(function(e){return ze({key:e},a[e],{area:(t=a[e],t.width*t.height)});var t}).sort(function(e,t){return t.area-e.area}),c=l.filter(function(e){var t=e.width,n=e.height;return t>=i.clientWidth&&n>=i.clientHeight}),u=0<c.length?c[0].key:l[0].key,f=e.split("-")[1];return u+(f?"-"+f:"")}function tt(e,t,n){var i=3<arguments.length&&void 0!==arguments[3]?arguments[3]:null;return Ze(n,i?Xe(t):Ue(t,n),i)}function nt(e){var t=getComputedStyle(e),n=parseFloat(t.marginTop)+parseFloat(t.marginBottom),i=parseFloat(t.marginLeft)+parseFloat(t.marginRight);return{width:e.offsetWidth+i,height:e.offsetHeight+n}}function it(e){var t={left:"right",right:"left",bottom:"top",top:"bottom"};return e.replace(/left|right|bottom|top/g,function(e){return t[e]})}function rt(e,t,n){n=n.split("-")[0];var i=nt(e),r={width:i.width,height:i.height},o=-1!==["right","left"].indexOf(n),s=o?"top":"left",a=o?"left":"top",l=o?"height":"width",c=o?"width":"height";return r[s]=t[s]+t[l]/2-i[l]/2,r[a]=n===a?t[a]-i[c]:t[it(a)],r}function ot(e,t){return Array.prototype.find?e.find(t):e.filter(t)[0]}function st(e,n,t){return(void 0===t?e:e.slice(0,function(e,t,n){if(Array.prototype.findIndex)return e.findIndex(function(e){return e[t]===n});var i=ot(e,function(e){return e[t]===n});return e.indexOf(i)}(e,"name",t))).forEach(function(e){e.function&&console.warn("`modifier.function` is deprecated, use `modifier.fn`!");var t=e.function||e.fn;e.enabled&&xe(t)&&(n.offsets.popper=Ge(n.offsets.popper),n.offsets.reference=Ge(n.offsets.reference),n=t(n,e))}),n}function at(e,n){return e.some(function(e){var t=e.name;return e.enabled&&t===n})}function lt(e){for(var t=[!1,"ms","Webkit","Moz","O"],n=e.charAt(0).toUpperCase()+e.slice(1),i=0;i<t.length;i++){var r=t[i],o=r?""+r+n:e;if("undefined"!=typeof document.body.style[o])return o}return null}function ct(e){var t=e.ownerDocument;return t?t.defaultView:window}function ut(e,t,n,i){n.updateBound=i,ct(e).addEventListener("resize",n.updateBound,{passive:!0});var r=je(e);return function e(t,n,i,r){var o="BODY"===t.nodeName,s=o?t.ownerDocument.defaultView:t;s.addEventListener(n,i,{passive:!0}),o||e(je(s.parentNode),n,i,r),r.push(s)}(r,"scroll",n.updateBound,n.scrollParents),n.scrollElement=r,n.eventsEnabled=!0,n}function ft(){var e,t;this.state.eventsEnabled&&(cancelAnimationFrame(this.scheduleUpdate),this.state=(e=this.reference,t=this.state,ct(e).removeEventListener("resize",t.updateBound),t.scrollParents.forEach(function(e){e.removeEventListener("scroll",t.updateBound)}),t.updateBound=null,t.scrollParents=[],t.scrollElement=null,t.eventsEnabled=!1,t))}function ht(e){return""!==e&&!isNaN(parseFloat(e))&&isFinite(e)}function dt(n,i){Object.keys(i).forEach(function(e){var t="";-1!==["width","height","top","right","bottom","left"].indexOf(e)&&ht(i[e])&&(t="px"),n.style[e]=i[e]+t})}function pt(e,t,n){var i=ot(e,function(e){return e.name===t}),r=!!i&&e.some(function(e){return e.name===n&&e.enabled&&e.order<i.order});if(!r){var o="`"+t+"`",s="`"+n+"`";console.warn(s+" modifier is required by "+o+" modifier in order to work, be sure to include it before "+o+"!")}return r}var mt=["auto-start","auto","auto-end","top-start","top","top-end","right-start","right","right-end","bottom-end","bottom","bottom-start","left-end","left","left-start"],gt=mt.slice(3);function _t(e){var t=1<arguments.length&&void 0!==arguments[1]&&arguments[1],n=gt.indexOf(e),i=gt.slice(n+1).concat(gt.slice(0,n));return t?i.reverse():i}var vt="flip",yt="clockwise",Et="counterclockwise";function bt(e,r,o,t){var s=[0,0],a=-1!==["right","left"].indexOf(t),n=e.split(/(\+|\-)/).map(function(e){return e.trim()}),i=n.indexOf(ot(n,function(e){return-1!==e.search(/,|\s/)}));n[i]&&-1===n[i].indexOf(",")&&console.warn("Offsets separated by white space(s) are deprecated, use a comma (,) instead.");var l=/\s*,\s*|\s+/,c=-1!==i?[n.slice(0,i).concat([n[i].split(l)[0]]),[n[i].split(l)[1]].concat(n.slice(i+1))]:[n];return(c=c.map(function(e,t){var n=(1===t?!a:a)?"height":"width",i=!1;return e.reduce(function(e,t){return""===e[e.length-1]&&-1!==["+","-"].indexOf(t)?(e[e.length-1]=t,i=!0,e):i?(e[e.length-1]+=t,i=!1,e):e.concat(t)},[]).map(function(e){return function(e,t,n,i){var r=e.match(/((?:\-|\+)?\d*\.?\d*)(.*)/),o=+r[1],s=r[2];if(!o)return e;if(0===s.indexOf("%")){var a=void 0;switch(s){case"%p":a=n;break;case"%":case"%r":default:a=i}return Ge(a)[t]/100*o}if("vh"===s||"vw"===s)return("vh"===s?Math.max(document.documentElement.clientHeight,window.innerHeight||0):Math.max(document.documentElement.clientWidth,window.innerWidth||0))/100*o;return o}(e,n,r,o)})})).forEach(function(n,i){n.forEach(function(e,t){ht(e)&&(s[i]+=e*("-"===n[t-1]?-1:1))})}),s}var wt={placement:"bottom",positionFixed:!1,eventsEnabled:!0,removeOnDestroy:!1,onCreate:function(){},onUpdate:function(){},modifiers:{shift:{order:100,enabled:!0,fn:function(e){var t=e.placement,n=t.split("-")[0],i=t.split("-")[1];if(i){var r=e.offsets,o=r.reference,s=r.popper,a=-1!==["bottom","top"].indexOf(n),l=a?"left":"top",c=a?"width":"height",u={start:Ve({},l,o[l]),end:Ve({},l,o[l]+o[c]-s[c])};e.offsets.popper=ze({},s,u[i])}return e}},offset:{order:200,enabled:!0,fn:function(e,t){var n=t.offset,i=e.placement,r=e.offsets,o=r.popper,s=r.reference,a=i.split("-")[0],l=void 0;return l=ht(+n)?[+n,0]:bt(n,o,s,a),"left"===a?(o.top+=l[0],o.left-=l[1]):"right"===a?(o.top+=l[0],o.left+=l[1]):"top"===a?(o.left+=l[0],o.top-=l[1]):"bottom"===a&&(o.left+=l[0],o.top+=l[1]),e.popper=o,e},offset:0},preventOverflow:{order:300,enabled:!0,fn:function(e,i){var t=i.boundariesElement||We(e.instance.popper);e.instance.reference===t&&(t=We(t));var n=lt("transform"),r=e.instance.popper.style,o=r.top,s=r.left,a=r[n];r.top="",r.left="",r[n]="";var l=$e(e.instance.popper,e.instance.reference,i.padding,t,e.positionFixed);r.top=o,r.left=s,r[n]=a,i.boundaries=l;var c=i.priority,u=e.offsets.popper,f={primary:function(e){var t=u[e];return u[e]<l[e]&&!i.escapeWithReference&&(t=Math.max(u[e],l[e])),Ve({},e,t)},secondary:function(e){var t="right"===e?"left":"top",n=u[t];return u[e]>l[e]&&!i.escapeWithReference&&(n=Math.min(u[t],l[e]-("right"===e?u.width:u.height))),Ve({},t,n)}};return c.forEach(function(e){var t=-1!==["left","top"].indexOf(e)?"primary":"secondary";u=ze({},u,f[t](e))}),e.offsets.popper=u,e},priority:["left","right","top","bottom"],padding:5,boundariesElement:"scrollParent"},keepTogether:{order:400,enabled:!0,fn:function(e){var t=e.offsets,n=t.popper,i=t.reference,r=e.placement.split("-")[0],o=Math.floor,s=-1!==["top","bottom"].indexOf(r),a=s?"right":"bottom",l=s?"left":"top",c=s?"width":"height";return n[a]<o(i[l])&&(e.offsets.popper[l]=o(i[l])-n[c]),n[l]>o(i[a])&&(e.offsets.popper[l]=o(i[a])),e}},arrow:{order:500,enabled:!0,fn:function(e,t){var n;if(!pt(e.instance.modifiers,"arrow","keepTogether"))return e;var i=t.element;if("string"==typeof i){if(!(i=e.instance.popper.querySelector(i)))return e}else if(!e.instance.popper.contains(i))return console.warn("WARNING: `arrow.element` must be child of its popper element!"),e;var r=e.placement.split("-")[0],o=e.offsets,s=o.popper,a=o.reference,l=-1!==["left","right"].indexOf(r),c=l?"height":"width",u=l?"Top":"Left",f=u.toLowerCase(),h=l?"left":"top",d=l?"bottom":"right",p=nt(i)[c];a[d]-p<s[f]&&(e.offsets.popper[f]-=s[f]-(a[d]-p)),a[f]+p>s[d]&&(e.offsets.popper[f]+=a[f]+p-s[d]),e.offsets.popper=Ge(e.offsets.popper);var m=a[f]+a[c]/2-p/2,g=Pe(e.instance.popper),_=parseFloat(g["margin"+u],10),v=parseFloat(g["border"+u+"Width"],10),y=m-e.offsets.popper[f]-_-v;return y=Math.max(Math.min(s[c]-p,y),0),e.arrowElement=i,e.offsets.arrow=(Ve(n={},f,Math.round(y)),Ve(n,h,""),n),e},element:"[x-arrow]"},flip:{order:600,enabled:!0,fn:function(p,m){if(at(p.instance.modifiers,"inner"))return p;if(p.flipped&&p.placement===p.originalPlacement)return p;var g=$e(p.instance.popper,p.instance.reference,m.padding,m.boundariesElement,p.positionFixed),_=p.placement.split("-")[0],v=it(_),y=p.placement.split("-")[1]||"",E=[];switch(m.behavior){case vt:E=[_,v];break;case yt:E=_t(_);break;case Et:E=_t(_,!0);break;default:E=m.behavior}return E.forEach(function(e,t){if(_!==e||E.length===t+1)return p;_=p.placement.split("-")[0],v=it(_);var n,i=p.offsets.popper,r=p.offsets.reference,o=Math.floor,s="left"===_&&o(i.right)>o(r.left)||"right"===_&&o(i.left)<o(r.right)||"top"===_&&o(i.bottom)>o(r.top)||"bottom"===_&&o(i.top)<o(r.bottom),a=o(i.left)<o(g.left),l=o(i.right)>o(g.right),c=o(i.top)<o(g.top),u=o(i.bottom)>o(g.bottom),f="left"===_&&a||"right"===_&&l||"top"===_&&c||"bottom"===_&&u,h=-1!==["top","bottom"].indexOf(_),d=!!m.flipVariations&&(h&&"start"===y&&a||h&&"end"===y&&l||!h&&"start"===y&&c||!h&&"end"===y&&u);(s||f||d)&&(p.flipped=!0,(s||f)&&(_=E[t+1]),d&&(y="end"===(n=y)?"start":"start"===n?"end":n),p.placement=_+(y?"-"+y:""),p.offsets.popper=ze({},p.offsets.popper,rt(p.instance.popper,p.offsets.reference,p.placement)),p=st(p.instance.modifiers,p,"flip"))}),p},behavior:"flip",padding:5,boundariesElement:"viewport"},inner:{order:700,enabled:!1,fn:function(e){var t=e.placement,n=t.split("-")[0],i=e.offsets,r=i.popper,o=i.reference,s=-1!==["left","right"].indexOf(n),a=-1===["top","left"].indexOf(n);return r[s?"left":"top"]=o[n]-(a?r[s?"width":"height"]:0),e.placement=it(t),e.offsets.popper=Ge(r),e}},hide:{order:800,enabled:!0,fn:function(e){if(!pt(e.instance.modifiers,"hide","preventOverflow"))return e;var t=e.offsets.reference,n=ot(e.instance.modifiers,function(e){return"preventOverflow"===e.name}).boundaries;if(t.bottom<n.top||t.left>n.right||t.top>n.bottom||t.right<n.left){if(!0===e.hide)return e;e.hide=!0,e.attributes["x-out-of-boundaries"]=""}else{if(!1===e.hide)return e;e.hide=!1,e.attributes["x-out-of-boundaries"]=!1}return e}},computeStyle:{order:850,enabled:!0,fn:function(e,t){var n=t.x,i=t.y,r=e.offsets.popper,o=ot(e.instance.modifiers,function(e){return"applyStyle"===e.name}).gpuAcceleration;void 0!==o&&console.warn("WARNING: `gpuAcceleration` option moved to `computeStyle` modifier and will not be supported in future versions of Popper.js!");var s=void 0!==o?o:t.gpuAcceleration,a=Je(We(e.instance.popper)),l={position:r.position},c={left:Math.floor(r.left),top:Math.round(r.top),bottom:Math.round(r.bottom),right:Math.floor(r.right)},u="bottom"===n?"top":"bottom",f="right"===i?"left":"right",h=lt("transform"),d=void 0,p=void 0;if(p="bottom"===u?-a.height+c.bottom:c.top,d="right"===f?-a.width+c.right:c.left,s&&h)l[h]="translate3d("+d+"px, "+p+"px, 0)",l[u]=0,l[f]=0,l.willChange="transform";else{var m="bottom"===u?-1:1,g="right"===f?-1:1;l[u]=p*m,l[f]=d*g,l.willChange=u+", "+f}var _={"x-placement":e.placement};return e.attributes=ze({},_,e.attributes),e.styles=ze({},l,e.styles),e.arrowStyles=ze({},e.offsets.arrow,e.arrowStyles),e},gpuAcceleration:!0,x:"bottom",y:"right"},applyStyle:{order:900,enabled:!0,fn:function(e){var t,n;return dt(e.instance.popper,e.styles),t=e.instance.popper,n=e.attributes,Object.keys(n).forEach(function(e){!1!==n[e]?t.setAttribute(e,n[e]):t.removeAttribute(e)}),e.arrowElement&&Object.keys(e.arrowStyles).length&&dt(e.arrowElement,e.arrowStyles),e},onLoad:function(e,t,n,i,r){var o=tt(r,t,e,n.positionFixed),s=et(n.placement,o,t,e,n.modifiers.flip.boundariesElement,n.modifiers.flip.padding);return t.setAttribute("x-placement",s),dt(t,{position:n.positionFixed?"fixed":"absolute"}),n},gpuAcceleration:void 0}}},Ct=function(){function o(e,t){var n=this,i=2<arguments.length&&void 0!==arguments[2]?arguments[2]:{};!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,o),this.scheduleUpdate=function(){return requestAnimationFrame(n.update)},this.update=ke(this.update.bind(this)),this.options=ze({},o.Defaults,i),this.state={isDestroyed:!1,isCreated:!1,scrollParents:[]},this.reference=e&&e.jquery?e[0]:e,this.popper=t&&t.jquery?t[0]:t,this.options.modifiers={},Object.keys(ze({},o.Defaults.modifiers,i.modifiers)).forEach(function(e){n.options.modifiers[e]=ze({},o.Defaults.modifiers[e]||{},i.modifiers?i.modifiers[e]:{})}),this.modifiers=Object.keys(this.options.modifiers).map(function(e){return ze({name:e},n.options.modifiers[e])}).sort(function(e,t){return e.order-t.order}),this.modifiers.forEach(function(e){e.enabled&&xe(e.onLoad)&&e.onLoad(n.reference,n.popper,n.options,e,n.state)}),this.update();var r=this.options.eventsEnabled;r&&this.enableEventListeners(),this.state.eventsEnabled=r}return Ye(o,[{key:"update",value:function(){return function(){if(!this.state.isDestroyed){var e={instance:this,styles:{},arrowStyles:{},attributes:{},flipped:!1,offsets:{}};e.offsets.reference=tt(this.state,this.popper,this.reference,this.options.positionFixed),e.placement=et(this.options.placement,e.offsets.reference,this.popper,this.reference,this.options.modifiers.flip.boundariesElement,this.options.modifiers.flip.padding),e.originalPlacement=e.placement,e.positionFixed=this.options.positionFixed,e.offsets.popper=rt(this.popper,e.offsets.reference,e.placement),e.offsets.popper.position=this.options.positionFixed?"fixed":"absolute",e=st(this.modifiers,e),this.state.isCreated?this.options.onUpdate(e):(this.state.isCreated=!0,this.options.onCreate(e))}}.call(this)}},{key:"destroy",value:function(){return function(){return this.state.isDestroyed=!0,at(this.modifiers,"applyStyle")&&(this.popper.removeAttribute("x-placement"),this.popper.style.position="",this.popper.style.top="",this.popper.style.left="",this.popper.style.right="",this.popper.style.bottom="",this.popper.style.willChange="",this.popper.style[lt("transform")]=""),this.disableEventListeners(),this.options.removeOnDestroy&&this.popper.parentNode.removeChild(this.popper),this}.call(this)}},{key:"enableEventListeners",value:function(){return function(){this.state.eventsEnabled||(this.state=ut(this.reference,this.options,this.state,this.scheduleUpdate))}.call(this)}},{key:"disableEventListeners",value:function(){return ft.call(this)}}]),o}();Ct.Utils=("undefined"!=typeof window?window:global).PopperUtils,Ct.placements=mt,Ct.Defaults=wt;var Tt,St,Dt,At,It,Ot,Nt,kt,xt,Pt,Lt,jt,Ht,Mt,Ft,Wt,Rt,Ut,Bt,qt,Kt,Qt,Yt,Vt,zt,Gt,Jt,Zt,Xt,$t,en,tn,nn,rn,on,sn,an,ln,cn,un,fn,hn,dn,pn,mn,gn,_n,vn,yn,En,bn,wn,Cn,Tn,Sn,Dn,An,In,On,Nn,kn,xn,Pn,Ln,jn,Hn,Mn,Fn,Wn,Rn,Un,Bn,qn,Kn,Qn,Yn,Vn,zn,Gn,Jn,Zn,Xn,$n,ei,ti,ni,ii,ri,oi,si,ai,li,ci,ui,fi,hi,di,pi,mi,gi,_i,vi,yi,Ei,bi,wi,Ci,Ti,Si,Di,Ai,Ii,Oi,Ni,ki,xi,Pi,Li,ji,Hi,Mi,Fi,Wi,Ri,Ui,Bi=(St="dropdown",At="."+(Dt="bs.dropdown"),It=".data-api",Ot=(Tt=t).fn[St],Nt=new RegExp("38|40|27"),kt={HIDE:"hide"+At,HIDDEN:"hidden"+At,SHOW:"show"+At,SHOWN:"shown"+At,CLICK:"click"+At,CLICK_DATA_API:"click"+At+It,KEYDOWN_DATA_API:"keydown"+At+It,KEYUP_DATA_API:"keyup"+At+It},xt="disabled",Pt="show",Lt="dropup",jt="dropright",Ht="dropleft",Mt="dropdown-menu-right",Ft="position-static",Wt='[data-toggle="dropdown"]',Rt=".dropdown form",Ut=".dropdown-menu",Bt=".navbar-nav",qt=".dropdown-menu .dropdown-item:not(.disabled):not(:disabled)",Kt="top-start",Qt="top-end",Yt="bottom-start",Vt="bottom-end",zt="right-start",Gt="left-start",Jt={offset:0,flip:!0,boundary:"scrollParent",reference:"toggle",display:"dynamic"},Zt={offset:"(number|string|function)",flip:"boolean",boundary:"(string|element)",reference:"(string|element)",display:"string"},Xt=function(){function c(e,t){this._element=e,this._popper=null,this._config=this._getConfig(t),this._menu=this._getMenuElement(),this._inNavbar=this._detectNavbar(),this._addEventListeners()}var e=c.prototype;return e.toggle=function(){if(!this._element.disabled&&!Tt(this._element).hasClass(xt)){var e=c._getParentFromElement(this._element),t=Tt(this._menu).hasClass(Pt);if(c._clearMenus(),!t){var n={relatedTarget:this._element},i=Tt.Event(kt.SHOW,n);if(Tt(e).trigger(i),!i.isDefaultPrevented()){if(!this._inNavbar){if("undefined"==typeof Ct)throw new TypeError("Bootstrap dropdown require Popper.js (https://popper.js.org)");var r=this._element;"parent"===this._config.reference?r=e:we.isElement(this._config.reference)&&(r=this._config.reference,"undefined"!=typeof this._config.reference.jquery&&(r=this._config.reference[0])),"scrollParent"!==this._config.boundary&&Tt(e).addClass(Ft),this._popper=new Ct(r,this._menu,this._getPopperConfig())}"ontouchstart"in document.documentElement&&0===Tt(e).closest(Bt).length&&Tt(document.body).children().on("mouseover",null,Tt.noop),this._element.focus(),this._element.setAttribute("aria-expanded",!0),Tt(this._menu).toggleClass(Pt),Tt(e).toggleClass(Pt).trigger(Tt.Event(kt.SHOWN,n))}}}},e.dispose=function(){Tt.removeData(this._element,Dt),Tt(this._element).off(At),this._element=null,(this._menu=null)!==this._popper&&(this._popper.destroy(),this._popper=null)},e.update=function(){this._inNavbar=this._detectNavbar(),null!==this._popper&&this._popper.scheduleUpdate()},e._addEventListeners=function(){var t=this;Tt(this._element).on(kt.CLICK,function(e){e.preventDefault(),e.stopPropagation(),t.toggle()})},e._getConfig=function(e){return e=l({},this.constructor.Default,Tt(this._element).data(),e),we.typeCheckConfig(St,e,this.constructor.DefaultType),e},e._getMenuElement=function(){if(!this._menu){var e=c._getParentFromElement(this._element);e&&(this._menu=e.querySelector(Ut))}return this._menu},e._getPlacement=function(){var e=Tt(this._element.parentNode),t=Yt;return e.hasClass(Lt)?(t=Kt,Tt(this._menu).hasClass(Mt)&&(t=Qt)):e.hasClass(jt)?t=zt:e.hasClass(Ht)?t=Gt:Tt(this._menu).hasClass(Mt)&&(t=Vt),t},e._detectNavbar=function(){return 0<Tt(this._element).closest(".navbar").length},e._getPopperConfig=function(){var t=this,e={};"function"==typeof this._config.offset?e.fn=function(e){return e.offsets=l({},e.offsets,t._config.offset(e.offsets)||{}),e}:e.offset=this._config.offset;var n={placement:this._getPlacement(),modifiers:{offset:e,flip:{enabled:this._config.flip},preventOverflow:{boundariesElement:this._config.boundary}}};return"static"===this._config.display&&(n.modifiers.applyStyle={enabled:!1}),n},c._jQueryInterface=function(t){return this.each(function(){var e=Tt(this).data(Dt);if(e||(e=new c(this,"object"==typeof t?t:null),Tt(this).data(Dt,e)),"string"==typeof t){if("undefined"==typeof e[t])throw new TypeError('No method named "'+t+'"');e[t]()}})},c._clearMenus=function(e){if(!e||3!==e.which&&("keyup"!==e.type||9===e.which))for(var t=[].slice.call(document.querySelectorAll(Wt)),n=0,i=t.length;n<i;n++){var r=c._getParentFromElement(t[n]),o=Tt(t[n]).data(Dt),s={relatedTarget:t[n]};if(e&&"click"===e.type&&(s.clickEvent=e),o){var a=o._menu;if(Tt(r).hasClass(Pt)&&!(e&&("click"===e.type&&/input|textarea/i.test(e.target.tagName)||"keyup"===e.type&&9===e.which)&&Tt.contains(r,e.target))){var l=Tt.Event(kt.HIDE,s);Tt(r).trigger(l),l.isDefaultPrevented()||("ontouchstart"in document.documentElement&&Tt(document.body).children().off("mouseover",null,Tt.noop),t[n].setAttribute("aria-expanded","false"),Tt(a).removeClass(Pt),Tt(r).removeClass(Pt).trigger(Tt.Event(kt.HIDDEN,s)))}}}},c._getParentFromElement=function(e){var t,n=we.getSelectorFromElement(e);return n&&(t=document.querySelector(n)),t||e.parentNode},c._dataApiKeydownHandler=function(e){if((/input|textarea/i.test(e.target.tagName)?!(32===e.which||27!==e.which&&(40!==e.which&&38!==e.which||Tt(e.target).closest(Ut).length)):Nt.test(e.which))&&(e.preventDefault(),e.stopPropagation(),!this.disabled&&!Tt(this).hasClass(xt))){var t=c._getParentFromElement(this),n=Tt(t).hasClass(Pt);if((n||27===e.which&&32===e.which)&&(!n||27!==e.which&&32!==e.which)){var i=[].slice.call(t.querySelectorAll(qt));if(0!==i.length){var r=i.indexOf(e.target);38===e.which&&0<r&&r--,40===e.which&&r<i.length-1&&r++,r<0&&(r=0),i[r].focus()}}else{if(27===e.which){var o=t.querySelector(Wt);Tt(o).trigger("focus")}Tt(this).trigger("click")}}},s(c,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return Jt}},{key:"DefaultType",get:function(){return Zt}}]),c}(),Tt(document).on(kt.KEYDOWN_DATA_API,Wt,Xt._dataApiKeydownHandler).on(kt.KEYDOWN_DATA_API,Ut,Xt._dataApiKeydownHandler).on(kt.CLICK_DATA_API+" "+kt.KEYUP_DATA_API,Xt._clearMenus).on(kt.CLICK_DATA_API,Wt,function(e){e.preventDefault(),e.stopPropagation(),Xt._jQueryInterface.call(Tt(this),"toggle")}).on(kt.CLICK_DATA_API,Rt,function(e){e.stopPropagation()}),Tt.fn[St]=Xt._jQueryInterface,Tt.fn[St].Constructor=Xt,Tt.fn[St].noConflict=function(){return Tt.fn[St]=Ot,Xt._jQueryInterface},Xt),qi=(en="modal",nn="."+(tn="bs.modal"),rn=($t=t).fn[en],on={backdrop:!0,keyboard:!0,focus:!0,show:!0},sn={backdrop:"(boolean|string)",keyboard:"boolean",focus:"boolean",show:"boolean"},an={HIDE:"hide"+nn,HIDDEN:"hidden"+nn,SHOW:"show"+nn,SHOWN:"shown"+nn,FOCUSIN:"focusin"+nn,RESIZE:"resize"+nn,CLICK_DISMISS:"click.dismiss"+nn,KEYDOWN_DISMISS:"keydown.dismiss"+nn,MOUSEUP_DISMISS:"mouseup.dismiss"+nn,MOUSEDOWN_DISMISS:"mousedown.dismiss"+nn,CLICK_DATA_API:"click"+nn+".data-api"},ln="modal-scrollbar-measure",cn="modal-backdrop",un="modal-open",fn="fade",hn="show",dn=".modal-dialog",pn='[data-toggle="modal"]',mn='[data-dismiss="modal"]',gn=".fixed-top, .fixed-bottom, .is-fixed, .sticky-top",_n=".sticky-top",vn=function(){function r(e,t){this._config=this._getConfig(t),this._element=e,this._dialog=e.querySelector(dn),this._backdrop=null,this._isShown=!1,this._isBodyOverflowing=!1,this._ignoreBackdropClick=!1,this._scrollbarWidth=0}var e=r.prototype;return e.toggle=function(e){return this._isShown?this.hide():this.show(e)},e.show=function(e){var t=this;if(!this._isTransitioning&&!this._isShown){$t(this._element).hasClass(fn)&&(this._isTransitioning=!0);var n=$t.Event(an.SHOW,{relatedTarget:e});$t(this._element).trigger(n),this._isShown||n.isDefaultPrevented()||(this._isShown=!0,this._checkScrollbar(),this._setScrollbar(),this._adjustDialog(),$t(document.body).addClass(un),this._setEscapeEvent(),this._setResizeEvent(),$t(this._element).on(an.CLICK_DISMISS,mn,function(e){return t.hide(e)}),$t(this._dialog).on(an.MOUSEDOWN_DISMISS,function(){$t(t._element).one(an.MOUSEUP_DISMISS,function(e){$t(e.target).is(t._element)&&(t._ignoreBackdropClick=!0)})}),this._showBackdrop(function(){return t._showElement(e)}))}},e.hide=function(e){var t=this;if(e&&e.preventDefault(),!this._isTransitioning&&this._isShown){var n=$t.Event(an.HIDE);if($t(this._element).trigger(n),this._isShown&&!n.isDefaultPrevented()){this._isShown=!1;var i=$t(this._element).hasClass(fn);if(i&&(this._isTransitioning=!0),this._setEscapeEvent(),this._setResizeEvent(),$t(document).off(an.FOCUSIN),$t(this._element).removeClass(hn),$t(this._element).off(an.CLICK_DISMISS),$t(this._dialog).off(an.MOUSEDOWN_DISMISS),i){var r=we.getTransitionDurationFromElement(this._element);$t(this._element).one(we.TRANSITION_END,function(e){return t._hideModal(e)}).emulateTransitionEnd(r)}else this._hideModal()}}},e.dispose=function(){$t.removeData(this._element,tn),$t(window,document,this._element,this._backdrop).off(nn),this._config=null,this._element=null,this._dialog=null,this._backdrop=null,this._isShown=null,this._isBodyOverflowing=null,this._ignoreBackdropClick=null,this._scrollbarWidth=null},e.handleUpdate=function(){this._adjustDialog()},e._getConfig=function(e){return e=l({},on,e),we.typeCheckConfig(en,e,sn),e},e._showElement=function(e){var t=this,n=$t(this._element).hasClass(fn);this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE||document.body.appendChild(this._element),this._element.style.display="block",this._element.removeAttribute("aria-hidden"),this._element.scrollTop=0,n&&we.reflow(this._element),$t(this._element).addClass(hn),this._config.focus&&this._enforceFocus();var i=$t.Event(an.SHOWN,{relatedTarget:e}),r=function(){t._config.focus&&t._element.focus(),t._isTransitioning=!1,$t(t._element).trigger(i)};if(n){var o=we.getTransitionDurationFromElement(this._element);$t(this._dialog).one(we.TRANSITION_END,r).emulateTransitionEnd(o)}else r()},e._enforceFocus=function(){var t=this;$t(document).off(an.FOCUSIN).on(an.FOCUSIN,function(e){document!==e.target&&t._element!==e.target&&0===$t(t._element).has(e.target).length&&t._element.focus()})},e._setEscapeEvent=function(){var t=this;this._isShown&&this._config.keyboard?$t(this._element).on(an.KEYDOWN_DISMISS,function(e){27===e.which&&(e.preventDefault(),t.hide())}):this._isShown||$t(this._element).off(an.KEYDOWN_DISMISS)},e._setResizeEvent=function(){var t=this;this._isShown?$t(window).on(an.RESIZE,function(e){return t.handleUpdate(e)}):$t(window).off(an.RESIZE)},e._hideModal=function(){var e=this;this._element.style.display="none",this._element.setAttribute("aria-hidden",!0),this._isTransitioning=!1,this._showBackdrop(function(){$t(document.body).removeClass(un),e._resetAdjustments(),e._resetScrollbar(),$t(e._element).trigger(an.HIDDEN)})},e._removeBackdrop=function(){this._backdrop&&($t(this._backdrop).remove(),this._backdrop=null)},e._showBackdrop=function(e){var t=this,n=$t(this._element).hasClass(fn)?fn:"";if(this._isShown&&this._config.backdrop){if(this._backdrop=document.createElement("div"),this._backdrop.className=cn,n&&this._backdrop.classList.add(n),$t(this._backdrop).appendTo(document.body),$t(this._element).on(an.CLICK_DISMISS,function(e){t._ignoreBackdropClick?t._ignoreBackdropClick=!1:e.target===e.currentTarget&&("static"===t._config.backdrop?t._element.focus():t.hide())}),n&&we.reflow(this._backdrop),$t(this._backdrop).addClass(hn),!e)return;if(!n)return void e();var i=we.getTransitionDurationFromElement(this._backdrop);$t(this._backdrop).one(we.TRANSITION_END,e).emulateTransitionEnd(i)}else if(!this._isShown&&this._backdrop){$t(this._backdrop).removeClass(hn);var r=function(){t._removeBackdrop(),e&&e()};if($t(this._element).hasClass(fn)){var o=we.getTransitionDurationFromElement(this._backdrop);$t(this._backdrop).one(we.TRANSITION_END,r).emulateTransitionEnd(o)}else r()}else e&&e()},e._adjustDialog=function(){var e=this._element.scrollHeight>document.documentElement.clientHeight;!this._isBodyOverflowing&&e&&(this._element.style.paddingLeft=this._scrollbarWidth+"px"),this._isBodyOverflowing&&!e&&(this._element.style.paddingRight=this._scrollbarWidth+"px")},e._resetAdjustments=function(){this._element.style.paddingLeft="",this._element.style.paddingRight=""},e._checkScrollbar=function(){var e=document.body.getBoundingClientRect();this._isBodyOverflowing=e.left+e.right<window.innerWidth,this._scrollbarWidth=this._getScrollbarWidth()},e._setScrollbar=function(){var r=this;if(this._isBodyOverflowing){var e=[].slice.call(document.querySelectorAll(gn)),t=[].slice.call(document.querySelectorAll(_n));$t(e).each(function(e,t){var n=t.style.paddingRight,i=$t(t).css("padding-right");$t(t).data("padding-right",n).css("padding-right",parseFloat(i)+r._scrollbarWidth+"px")}),$t(t).each(function(e,t){var n=t.style.marginRight,i=$t(t).css("margin-right");$t(t).data("margin-right",n).css("margin-right",parseFloat(i)-r._scrollbarWidth+"px")});var n=document.body.style.paddingRight,i=$t(document.body).css("padding-right");$t(document.body).data("padding-right",n).css("padding-right",parseFloat(i)+this._scrollbarWidth+"px")}},e._resetScrollbar=function(){var e=[].slice.call(document.querySelectorAll(gn));$t(e).each(function(e,t){var n=$t(t).data("padding-right");$t(t).removeData("padding-right"),t.style.paddingRight=n||""});var t=[].slice.call(document.querySelectorAll(""+_n));$t(t).each(function(e,t){var n=$t(t).data("margin-right");"undefined"!=typeof n&&$t(t).css("margin-right",n).removeData("margin-right")});var n=$t(document.body).data("padding-right");$t(document.body).removeData("padding-right"),document.body.style.paddingRight=n||""},e._getScrollbarWidth=function(){var e=document.createElement("div");e.className=ln,document.body.appendChild(e);var t=e.getBoundingClientRect().width-e.clientWidth;return document.body.removeChild(e),t},r._jQueryInterface=function(n,i){return this.each(function(){var e=$t(this).data(tn),t=l({},on,$t(this).data(),"object"==typeof n&&n?n:{});if(e||(e=new r(this,t),$t(this).data(tn,e)),"string"==typeof n){if("undefined"==typeof e[n])throw new TypeError('No method named "'+n+'"');e[n](i)}else t.show&&e.show(i)})},s(r,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return on}}]),r}(),$t(document).on(an.CLICK_DATA_API,pn,function(e){var t,n=this,i=we.getSelectorFromElement(this);i&&(t=document.querySelector(i));var r=$t(t).data(tn)?"toggle":l({},$t(t).data(),$t(this).data());"A"!==this.tagName&&"AREA"!==this.tagName||e.preventDefault();var o=$t(t).one(an.SHOW,function(e){e.isDefaultPrevented()||o.one(an.HIDDEN,function(){$t(n).is(":visible")&&n.focus()})});vn._jQueryInterface.call($t(t),r,this)}),$t.fn[en]=vn._jQueryInterface,$t.fn[en].Constructor=vn,$t.fn[en].noConflict=function(){return $t.fn[en]=rn,vn._jQueryInterface},vn),Ki=(En="tooltip",wn="."+(bn="bs.tooltip"),Cn=(yn=t).fn[En],Tn="bs-tooltip",Sn=new RegExp("(^|\\s)"+Tn+"\\S+","g"),In={animation:!0,template:'<div class="tooltip" role="tooltip"><div class="arrow"></div><div class="tooltip-inner"></div></div>',trigger:"hover focus",title:"",delay:0,html:!(An={AUTO:"auto",TOP:"top",RIGHT:"right",BOTTOM:"bottom",LEFT:"left"}),selector:!(Dn={animation:"boolean",template:"string",title:"(string|element|function)",trigger:"string",delay:"(number|object)",html:"boolean",selector:"(string|boolean)",placement:"(string|function)",offset:"(number|string)",container:"(string|element|boolean)",fallbackPlacement:"(string|array)",boundary:"(string|element)"}),placement:"top",offset:0,container:!1,fallbackPlacement:"flip",boundary:"scrollParent"},Nn="out",kn={HIDE:"hide"+wn,HIDDEN:"hidden"+wn,SHOW:(On="show")+wn,SHOWN:"shown"+wn,INSERTED:"inserted"+wn,CLICK:"click"+wn,FOCUSIN:"focusin"+wn,FOCUSOUT:"focusout"+wn,MOUSEENTER:"mouseenter"+wn,MOUSELEAVE:"mouseleave"+wn},xn="fade",Pn="show",Ln=".tooltip-inner",jn=".arrow",Hn="hover",Mn="focus",Fn="click",Wn="manual",Rn=function(){function i(e,t){if("undefined"==typeof Ct)throw new TypeError("Bootstrap tooltips require Popper.js (https://popper.js.org)");this._isEnabled=!0,this._timeout=0,this._hoverState="",this._activeTrigger={},this._popper=null,this.element=e,this.config=this._getConfig(t),this.tip=null,this._setListeners()}var e=i.prototype;return e.enable=function(){this._isEnabled=!0},e.disable=function(){this._isEnabled=!1},e.toggleEnabled=function(){this._isEnabled=!this._isEnabled},e.toggle=function(e){if(this._isEnabled)if(e){var t=this.constructor.DATA_KEY,n=yn(e.currentTarget).data(t);n||(n=new this.constructor(e.currentTarget,this._getDelegateConfig()),yn(e.currentTarget).data(t,n)),n._activeTrigger.click=!n._activeTrigger.click,n._isWithActiveTrigger()?n._enter(null,n):n._leave(null,n)}else{if(yn(this.getTipElement()).hasClass(Pn))return void this._leave(null,this);this._enter(null,this)}},e.dispose=function(){clearTimeout(this._timeout),yn.removeData(this.element,this.constructor.DATA_KEY),yn(this.element).off(this.constructor.EVENT_KEY),yn(this.element).closest(".modal").off("hide.bs.modal"),this.tip&&yn(this.tip).remove(),this._isEnabled=null,this._timeout=null,this._hoverState=null,(this._activeTrigger=null)!==this._popper&&this._popper.destroy(),this._popper=null,this.element=null,this.config=null,this.tip=null},e.show=function(){var t=this;if("none"===yn(this.element).css("display"))throw new Error("Please use show on visible elements");var e=yn.Event(this.constructor.Event.SHOW);if(this.isWithContent()&&this._isEnabled){yn(this.element).trigger(e);var n=yn.contains(this.element.ownerDocument.documentElement,this.element);if(e.isDefaultPrevented()||!n)return;var i=this.getTipElement(),r=we.getUID(this.constructor.NAME);i.setAttribute("id",r),this.element.setAttribute("aria-describedby",r),this.setContent(),this.config.animation&&yn(i).addClass(xn);var o="function"==typeof this.config.placement?this.config.placement.call(this,i,this.element):this.config.placement,s=this._getAttachment(o);this.addAttachmentClass(s);var a=!1===this.config.container?document.body:yn(document).find(this.config.container);yn(i).data(this.constructor.DATA_KEY,this),yn.contains(this.element.ownerDocument.documentElement,this.tip)||yn(i).appendTo(a),yn(this.element).trigger(this.constructor.Event.INSERTED),this._popper=new Ct(this.element,i,{placement:s,modifiers:{offset:{offset:this.config.offset},flip:{behavior:this.config.fallbackPlacement},arrow:{element:jn},preventOverflow:{boundariesElement:this.config.boundary}},onCreate:function(e){e.originalPlacement!==e.placement&&t._handlePopperPlacementChange(e)},onUpdate:function(e){t._handlePopperPlacementChange(e)}}),yn(i).addClass(Pn),"ontouchstart"in document.documentElement&&yn(document.body).children().on("mouseover",null,yn.noop);var l=function(){t.config.animation&&t._fixTransition();var e=t._hoverState;t._hoverState=null,yn(t.element).trigger(t.constructor.Event.SHOWN),e===Nn&&t._leave(null,t)};if(yn(this.tip).hasClass(xn)){var c=we.getTransitionDurationFromElement(this.tip);yn(this.tip).one(we.TRANSITION_END,l).emulateTransitionEnd(c)}else l()}},e.hide=function(e){var t=this,n=this.getTipElement(),i=yn.Event(this.constructor.Event.HIDE),r=function(){t._hoverState!==On&&n.parentNode&&n.parentNode.removeChild(n),t._cleanTipClass(),t.element.removeAttribute("aria-describedby"),yn(t.element).trigger(t.constructor.Event.HIDDEN),null!==t._popper&&t._popper.destroy(),e&&e()};if(yn(this.element).trigger(i),!i.isDefaultPrevented()){if(yn(n).removeClass(Pn),"ontouchstart"in document.documentElement&&yn(document.body).children().off("mouseover",null,yn.noop),this._activeTrigger[Fn]=!1,this._activeTrigger[Mn]=!1,this._activeTrigger[Hn]=!1,yn(this.tip).hasClass(xn)){var o=we.getTransitionDurationFromElement(n);yn(n).one(we.TRANSITION_END,r).emulateTransitionEnd(o)}else r();this._hoverState=""}},e.update=function(){null!==this._popper&&this._popper.scheduleUpdate()},e.isWithContent=function(){return Boolean(this.getTitle())},e.addAttachmentClass=function(e){yn(this.getTipElement()).addClass(Tn+"-"+e)},e.getTipElement=function(){return this.tip=this.tip||yn(this.config.template)[0],this.tip},e.setContent=function(){var e=this.getTipElement();this.setElementContent(yn(e.querySelectorAll(Ln)),this.getTitle()),yn(e).removeClass(xn+" "+Pn)},e.setElementContent=function(e,t){var n=this.config.html;"object"==typeof t&&(t.nodeType||t.jquery)?n?yn(t).parent().is(e)||e.empty().append(t):e.text(yn(t).text()):e[n?"html":"text"](t)},e.getTitle=function(){var e=this.element.getAttribute("data-original-title");return e||(e="function"==typeof this.config.title?this.config.title.call(this.element):this.config.title),e},e._getAttachment=function(e){return An[e.toUpperCase()]},e._setListeners=function(){var i=this;this.config.trigger.split(" ").forEach(function(e){if("click"===e)yn(i.element).on(i.constructor.Event.CLICK,i.config.selector,function(e){return i.toggle(e)});else if(e!==Wn){var t=e===Hn?i.constructor.Event.MOUSEENTER:i.constructor.Event.FOCUSIN,n=e===Hn?i.constructor.Event.MOUSELEAVE:i.constructor.Event.FOCUSOUT;yn(i.element).on(t,i.config.selector,function(e){return i._enter(e)}).on(n,i.config.selector,function(e){return i._leave(e)})}yn(i.element).closest(".modal").on("hide.bs.modal",function(){return i.hide()})}),this.config.selector?this.config=l({},this.config,{trigger:"manual",selector:""}):this._fixTitle()},e._fixTitle=function(){var e=typeof this.element.getAttribute("data-original-title");(this.element.getAttribute("title")||"string"!==e)&&(this.element.setAttribute("data-original-title",this.element.getAttribute("title")||""),this.element.setAttribute("title",""))},e._enter=function(e,t){var n=this.constructor.DATA_KEY;(t=t||yn(e.currentTarget).data(n))||(t=new this.constructor(e.currentTarget,this._getDelegateConfig()),yn(e.currentTarget).data(n,t)),e&&(t._activeTrigger["focusin"===e.type?Mn:Hn]=!0),yn(t.getTipElement()).hasClass(Pn)||t._hoverState===On?t._hoverState=On:(clearTimeout(t._timeout),t._hoverState=On,t.config.delay&&t.config.delay.show?t._timeout=setTimeout(function(){t._hoverState===On&&t.show()},t.config.delay.show):t.show())},e._leave=function(e,t){var n=this.constructor.DATA_KEY;(t=t||yn(e.currentTarget).data(n))||(t=new this.constructor(e.currentTarget,this._getDelegateConfig()),yn(e.currentTarget).data(n,t)),e&&(t._activeTrigger["focusout"===e.type?Mn:Hn]=!1),t._isWithActiveTrigger()||(clearTimeout(t._timeout),t._hoverState=Nn,t.config.delay&&t.config.delay.hide?t._timeout=setTimeout(function(){t._hoverState===Nn&&t.hide()},t.config.delay.hide):t.hide())},e._isWithActiveTrigger=function(){for(var e in this._activeTrigger)if(this._activeTrigger[e])return!0;return!1},e._getConfig=function(e){return"number"==typeof(e=l({},this.constructor.Default,yn(this.element).data(),"object"==typeof e&&e?e:{})).delay&&(e.delay={show:e.delay,hide:e.delay}),"number"==typeof e.title&&(e.title=e.title.toString()),"number"==typeof e.content&&(e.content=e.content.toString()),we.typeCheckConfig(En,e,this.constructor.DefaultType),e},e._getDelegateConfig=function(){var e={};if(this.config)for(var t in this.config)this.constructor.Default[t]!==this.config[t]&&(e[t]=this.config[t]);return e},e._cleanTipClass=function(){var e=yn(this.getTipElement()),t=e.attr("class").match(Sn);null!==t&&t.length&&e.removeClass(t.join(""))},e._handlePopperPlacementChange=function(e){var t=e.instance;this.tip=t.popper,this._cleanTipClass(),this.addAttachmentClass(this._getAttachment(e.placement))},e._fixTransition=function(){var e=this.getTipElement(),t=this.config.animation;null===e.getAttribute("x-placement")&&(yn(e).removeClass(xn),this.config.animation=!1,this.hide(),this.show(),this.config.animation=t)},i._jQueryInterface=function(n){return this.each(function(){var e=yn(this).data(bn),t="object"==typeof n&&n;if((e||!/dispose|hide/.test(n))&&(e||(e=new i(this,t),yn(this).data(bn,e)),"string"==typeof n)){if("undefined"==typeof e[n])throw new TypeError('No method named "'+n+'"');e[n]()}})},s(i,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return In}},{key:"NAME",get:function(){return En}},{key:"DATA_KEY",get:function(){return bn}},{key:"Event",get:function(){return kn}},{key:"EVENT_KEY",get:function(){return wn}},{key:"DefaultType",get:function(){return Dn}}]),i}(),yn.fn[En]=Rn._jQueryInterface,yn.fn[En].Constructor=Rn,yn.fn[En].noConflict=function(){return yn.fn[En]=Cn,Rn._jQueryInterface},Rn),Qi=(Bn="popover",Kn="."+(qn="bs.popover"),Qn=(Un=t).fn[Bn],Yn="bs-popover",Vn=new RegExp("(^|\\s)"+Yn+"\\S+","g"),zn=l({},Ki.Default,{placement:"right",trigger:"click",content:"",template:'<div class="popover" role="tooltip"><div class="arrow"></div><h3 class="popover-header"></h3><div class="popover-body"></div></div>'}),Gn=l({},Ki.DefaultType,{content:"(string|element|function)"}),Jn="fade",Xn=".popover-header",$n=".popover-body",ei={HIDE:"hide"+Kn,HIDDEN:"hidden"+Kn,SHOW:(Zn="show")+Kn,SHOWN:"shown"+Kn,INSERTED:"inserted"+Kn,CLICK:"click"+Kn,FOCUSIN:"focusin"+Kn,FOCUSOUT:"focusout"+Kn,MOUSEENTER:"mouseenter"+Kn,MOUSELEAVE:"mouseleave"+Kn},ti=function(e){var t,n;function i(){return e.apply(this,arguments)||this}n=e,(t=i).prototype=Object.create(n.prototype),(t.prototype.constructor=t).__proto__=n;var r=i.prototype;return r.isWithContent=function(){return this.getTitle()||this._getContent()},r.addAttachmentClass=function(e){Un(this.getTipElement()).addClass(Yn+"-"+e)},r.getTipElement=function(){return this.tip=this.tip||Un(this.config.template)[0],this.tip},r.setContent=function(){var e=Un(this.getTipElement());this.setElementContent(e.find(Xn),this.getTitle());var t=this._getContent();"function"==typeof t&&(t=t.call(this.element)),this.setElementContent(e.find($n),t),e.removeClass(Jn+" "+Zn)},r._getContent=function(){return this.element.getAttribute("data-content")||this.config.content},r._cleanTipClass=function(){var e=Un(this.getTipElement()),t=e.attr("class").match(Vn);null!==t&&0<t.length&&e.removeClass(t.join(""))},i._jQueryInterface=function(n){return this.each(function(){var e=Un(this).data(qn),t="object"==typeof n?n:null;if((e||!/destroy|hide/.test(n))&&(e||(e=new i(this,t),Un(this).data(qn,e)),"string"==typeof n)){if("undefined"==typeof e[n])throw new TypeError('No method named "'+n+'"');e[n]()}})},s(i,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return zn}},{key:"NAME",get:function(){return Bn}},{key:"DATA_KEY",get:function(){return qn}},{key:"Event",get:function(){return ei}},{key:"EVENT_KEY",get:function(){return Kn}},{key:"DefaultType",get:function(){return Gn}}]),i}(Ki),Un.fn[Bn]=ti._jQueryInterface,Un.fn[Bn].Constructor=ti,Un.fn[Bn].noConflict=function(){return Un.fn[Bn]=Qn,ti._jQueryInterface},ti),Yi=(ii="scrollspy",oi="."+(ri="bs.scrollspy"),si=(ni=t).fn[ii],ai={offset:10,method:"auto",target:""},li={offset:"number",method:"string",target:"(string|element)"},ci={ACTIVATE:"activate"+oi,SCROLL:"scroll"+oi,LOAD_DATA_API:"load"+oi+".data-api"},ui="dropdown-item",fi="active",hi='[data-spy="scroll"]',di=".active",pi=".nav, .list-group",mi=".nav-link",gi=".nav-item",_i=".list-group-item",vi=".dropdown",yi=".dropdown-item",Ei=".dropdown-toggle",bi="offset",wi="position",Ci=function(){function n(e,t){var n=this;this._element=e,this._scrollElement="BODY"===e.tagName?window:e,this._config=this._getConfig(t),this._selector=this._config.target+" "+mi+","+this._config.target+" "+_i+","+this._config.target+" "+yi,this._offsets=[],this._targets=[],this._activeTarget=null,this._scrollHeight=0,ni(this._scrollElement).on(ci.SCROLL,function(e){return n._process(e)}),this.refresh(),this._process()}var e=n.prototype;return e.refresh=function(){var t=this,e=this._scrollElement===this._scrollElement.window?bi:wi,r="auto"===this._config.method?e:this._config.method,o=r===wi?this._getScrollTop():0;this._offsets=[],this._targets=[],this._scrollHeight=this._getScrollHeight(),[].slice.call(document.querySelectorAll(this._selector)).map(function(e){var t,n=we.getSelectorFromElement(e);if(n&&(t=document.querySelector(n)),t){var i=t.getBoundingClientRect();if(i.width||i.height)return[ni(t)[r]().top+o,n]}return null}).filter(function(e){return e}).sort(function(e,t){return e[0]-t[0]}).forEach(function(e){t._offsets.push(e[0]),t._targets.push(e[1])})},e.dispose=function(){ni.removeData(this._element,ri),ni(this._scrollElement).off(oi),this._element=null,this._scrollElement=null,this._config=null,this._selector=null,this._offsets=null,this._targets=null,this._activeTarget=null,this._scrollHeight=null},e._getConfig=function(e){if("string"!=typeof(e=l({},ai,"object"==typeof e&&e?e:{})).target){var t=ni(e.target).attr("id");t||(t=we.getUID(ii),ni(e.target).attr("id",t)),e.target="#"+t}return we.typeCheckConfig(ii,e,li),e},e._getScrollTop=function(){return this._scrollElement===window?this._scrollElement.pageYOffset:this._scrollElement.scrollTop},e._getScrollHeight=function(){return this._scrollElement.scrollHeight||Math.max(document.body.scrollHeight,document.documentElement.scrollHeight)},e._getOffsetHeight=function(){return this._scrollElement===window?window.innerHeight:this._scrollElement.getBoundingClientRect().height},e._process=function(){var e=this._getScrollTop()+this._config.offset,t=this._getScrollHeight(),n=this._config.offset+t-this._getOffsetHeight();if(this._scrollHeight!==t&&this.refresh(),n<=e){var i=this._targets[this._targets.length-1];this._activeTarget!==i&&this._activate(i)}else{if(this._activeTarget&&e<this._offsets[0]&&0<this._offsets[0])return this._activeTarget=null,void this._clear();for(var r=this._offsets.length;r--;){this._activeTarget!==this._targets[r]&&e>=this._offsets[r]&&("undefined"==typeof this._offsets[r+1]||e<this._offsets[r+1])&&this._activate(this._targets[r])}}},e._activate=function(t){this._activeTarget=t,this._clear();var e=this._selector.split(",");e=e.map(function(e){return e+'[data-target="'+t+'"],'+e+'[href="'+t+'"]'});var n=ni([].slice.call(document.querySelectorAll(e.join(","))));n.hasClass(ui)?(n.closest(vi).find(Ei).addClass(fi),n.addClass(fi)):(n.addClass(fi),n.parents(pi).prev(mi+", "+_i).addClass(fi),n.parents(pi).prev(gi).children(mi).addClass(fi)),ni(this._scrollElement).trigger(ci.ACTIVATE,{relatedTarget:t})},e._clear=function(){var e=[].slice.call(document.querySelectorAll(this._selector));ni(e).filter(di).removeClass(fi)},n._jQueryInterface=function(t){return this.each(function(){var e=ni(this).data(ri);if(e||(e=new n(this,"object"==typeof t&&t),ni(this).data(ri,e)),"string"==typeof t){if("undefined"==typeof e[t])throw new TypeError('No method named "'+t+'"');e[t]()}})},s(n,null,[{key:"VERSION",get:function(){return"4.1.3"}},{key:"Default",get:function(){return ai}}]),n}(),ni(window).on(ci.LOAD_DATA_API,function(){for(var e=[].slice.call(document.querySelectorAll(hi)),t=e.length;t--;){var n=ni(e[t]);Ci._jQueryInterface.call(n,n.data())}}),ni.fn[ii]=Ci._jQueryInterface,ni.fn[ii].Constructor=Ci,ni.fn[ii].noConflict=function(){return ni.fn[ii]=si,Ci._jQueryInterface},Ci),Vi=(Di="."+(Si="bs.tab"),Ai=(Ti=t).fn.tab,Ii={HIDE:"hide"+Di,HIDDEN:"hidden"+Di,SHOW:"show"+Di,SHOWN:"shown"+Di,CLICK_DATA_API:"click"+Di+".data-api"},Oi="dropdown-menu",Ni="active",ki="disabled",xi="fade",Pi="show",Li=".dropdown",ji=".nav, .list-group",Hi=".active",Mi="> li > .active",Fi='[data-toggle="tab"], [data-toggle="pill"], [data-toggle="list"]',Wi=".dropdown-toggle",Ri="> .dropdown-menu .active",Ui=function(){function i(e){this._element=e}var e=i.prototype;return e.show=function(){var n=this;if(!(this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE&&Ti(this._element).hasClass(Ni)||Ti(this._element).hasClass(ki))){var e,i,t=Ti(this._element).closest(ji)[0],r=we.getSelectorFromElement(this._element);if(t){var o="UL"===t.nodeName?Mi:Hi;i=(i=Ti.makeArray(Ti(t).find(o)))[i.length-1]}var s=Ti.Event(Ii.HIDE,{relatedTarget:this._element}),a=Ti.Event(Ii.SHOW,{relatedTarget:i});if(i&&Ti(i).trigger(s),Ti(this._element).trigger(a),!a.isDefaultPrevented()&&!s.isDefaultPrevented()){r&&(e=document.querySelector(r)),this._activate(this._element,t);var l=function(){var e=Ti.Event(Ii.HIDDEN,{relatedTarget:n._element}),t=Ti.Event(Ii.SHOWN,{relatedTarget:i});Ti(i).trigger(e),Ti(n._element).trigger(t)};e?this._activate(e,e.parentNode,l):l()}}},e.dispose=function(){Ti.removeData(this._element,Si),this._element=null},e._activate=function(e,t,n){var i=this,r=("UL"===t.nodeName?Ti(t).find(Mi):Ti(t).children(Hi))[0],o=n&&r&&Ti(r).hasClass(xi),s=function(){return i._transitionComplete(e,r,n)};if(r&&o){var a=we.getTransitionDurationFromElement(r);Ti(r).one(we.TRANSITION_END,s).emulateTransitionEnd(a)}else s()},e._transitionComplete=function(e,t,n){if(t){Ti(t).removeClass(Pi+" "+Ni);var i=Ti(t.parentNode).find(Ri)[0];i&&Ti(i).removeClass(Ni),"tab"===t.getAttribute("role")&&t.setAttribute("aria-selected",!1)}if(Ti(e).addClass(Ni),"tab"===e.getAttribute("role")&&e.setAttribute("aria-selected",!0),we.reflow(e),Ti(e).addClass(Pi),e.parentNode&&Ti(e.parentNode).hasClass(Oi)){var r=Ti(e).closest(Li)[0];if(r){var o=[].slice.call(r.querySelectorAll(Wi));Ti(o).addClass(Ni)}e.setAttribute("aria-expanded",!0)}n&&n()},i._jQueryInterface=function(n){return this.each(function(){var e=Ti(this),t=e.data(Si);if(t||(t=new i(this),e.data(Si,t)),"string"==typeof n){if("undefined"==typeof t[n])throw new TypeError('No method named "'+n+'"');t[n]()}})},s(i,null,[{key:"VERSION",get:function(){return"4.1.3"}}]),i}(),Ti(document).on(Ii.CLICK_DATA_API,Fi,function(e){e.preventDefault(),Ui._jQueryInterface.call(Ti(this),"show")}),Ti.fn.tab=Ui._jQueryInterface,Ti.fn.tab.Constructor=Ui,Ti.fn.tab.noConflict=function(){return Ti.fn.tab=Ai,Ui._jQueryInterface},Ui);!function(e){if("undefined"==typeof e)throw new TypeError("Bootstrap's JavaScript requires jQuery. jQuery must be included before Bootstrap's JavaScript.");var t=e.fn.jquery.split(" ")[0].split(".");if(t[0]<2&&t[1]<9||1===t[0]&&9===t[1]&&t[2]<1||4<=t[0])throw new Error("Bootstrap's JavaScript requires at least jQuery v1.9.1 but less than v4.0.0")}(t),e.Util=we,e.Alert=Ce,e.Button=Te,e.Carousel=Se,e.Collapse=De,e.Dropdown=Bi,e.Modal=qi,e.Popover=Qi,e.Scrollspy=Yi,e.Tab=Vi,e.Tooltip=Ki,Object.defineProperty(e,"__esModule",{value:!0})});
//# sourceMappingURL=bootstrap.bundle.min.js.map
| 0.019653 | 0.217483 |
(function(factory){if(typeof define==="function"&&define.amd){define(["jquery"],function($){return factory($)})}else if(typeof module==="object"&&typeof module.exports==="object"){exports=factory(require("jquery"))}else{factory(jQuery)}})(function($){$.easing.jswing=$.easing.swing;var pow=Math.pow,sqrt=Math.sqrt,sin=Math.sin,cos=Math.cos,PI=Math.PI,c1=1.70158,c2=c1*1.525,c3=c1+1,c4=2*PI/3,c5=2*PI/4.5;function bounceOut(x){var n1=7.5625,d1=2.75;if(x<1/d1){return n1*x*x}else if(x<2/d1){return n1*(x-=1.5/d1)*x+.75}else if(x<2.5/d1){return n1*(x-=2.25/d1)*x+.9375}else{return n1*(x-=2.625/d1)*x+.984375}}$.extend($.easing,{def:"easeOutQuad",swing:function(x){return $.easing[$.easing.def](x)},easeInQuad:function(x){return x*x},easeOutQuad:function(x){return 1-(1-x)*(1-x)},easeInOutQuad:function(x){return x<.5?2*x*x:1-pow(-2*x+2,2)/2},easeInCubic:function(x){return x*x*x},easeOutCubic:function(x){return 1-pow(1-x,3)},easeInOutCubic:function(x){return x<.5?4*x*x*x:1-pow(-2*x+2,3)/2},easeInQuart:function(x){return x*x*x*x},easeOutQuart:function(x){return 1-pow(1-x,4)},easeInOutQuart:function(x){return x<.5?8*x*x*x*x:1-pow(-2*x+2,4)/2},easeInQuint:function(x){return x*x*x*x*x},easeOutQuint:function(x){return 1-pow(1-x,5)},easeInOutQuint:function(x){return x<.5?16*x*x*x*x*x:1-pow(-2*x+2,5)/2},easeInSine:function(x){return 1-cos(x*PI/2)},easeOutSine:function(x){return sin(x*PI/2)},easeInOutSine:function(x){return-(cos(PI*x)-1)/2},easeInExpo:function(x){return x===0?0:pow(2,10*x-10)},easeOutExpo:function(x){return x===1?1:1-pow(2,-10*x)},easeInOutExpo:function(x){return x===0?0:x===1?1:x<.5?pow(2,20*x-10)/2:(2-pow(2,-20*x+10))/2},easeInCirc:function(x){return 1-sqrt(1-pow(x,2))},easeOutCirc:function(x){return sqrt(1-pow(x-1,2))},easeInOutCirc:function(x){return x<.5?(1-sqrt(1-pow(2*x,2)))/2:(sqrt(1-pow(-2*x+2,2))+1)/2},easeInElastic:function(x){return x===0?0:x===1?1:-pow(2,10*x-10)*sin((x*10-10.75)*c4)},easeOutElastic:function(x){return x===0?0:x===1?1:pow(2,-10*x)*sin((x*10-.75)*c4)+1},easeInOutElastic:function(x){return x===0?0:x===1?1:x<.5?-(pow(2,20*x-10)*sin((20*x-11.125)*c5))/2:pow(2,-20*x+10)*sin((20*x-11.125)*c5)/2+1},easeInBack:function(x){return c3*x*x*x-c1*x*x},easeOutBack:function(x){return 1+c3*pow(x-1,3)+c1*pow(x-1,2)},easeInOutBack:function(x){return x<.5?pow(2*x,2)*((c2+1)*2*x-c2)/2:(pow(2*x-2,2)*((c2+1)*(x*2-2)+c2)+2)/2},easeInBounce:function(x){return 1-bounceOut(1-x)},easeOutBounce:bounceOut,easeInOutBounce:function(x){return x<.5?(1-bounceOut(1-2*x))/2:(1+bounceOut(2*x-1))/2}})});
|
scheduler-front
|
/scheduler_front-0.0.11-py3-none-any.whl/scheduler_front/static/vendor/jquery-easing/jquery.easing.min.js
|
jquery.easing.min.js
|
(function(factory){if(typeof define==="function"&&define.amd){define(["jquery"],function($){return factory($)})}else if(typeof module==="object"&&typeof module.exports==="object"){exports=factory(require("jquery"))}else{factory(jQuery)}})(function($){$.easing.jswing=$.easing.swing;var pow=Math.pow,sqrt=Math.sqrt,sin=Math.sin,cos=Math.cos,PI=Math.PI,c1=1.70158,c2=c1*1.525,c3=c1+1,c4=2*PI/3,c5=2*PI/4.5;function bounceOut(x){var n1=7.5625,d1=2.75;if(x<1/d1){return n1*x*x}else if(x<2/d1){return n1*(x-=1.5/d1)*x+.75}else if(x<2.5/d1){return n1*(x-=2.25/d1)*x+.9375}else{return n1*(x-=2.625/d1)*x+.984375}}$.extend($.easing,{def:"easeOutQuad",swing:function(x){return $.easing[$.easing.def](x)},easeInQuad:function(x){return x*x},easeOutQuad:function(x){return 1-(1-x)*(1-x)},easeInOutQuad:function(x){return x<.5?2*x*x:1-pow(-2*x+2,2)/2},easeInCubic:function(x){return x*x*x},easeOutCubic:function(x){return 1-pow(1-x,3)},easeInOutCubic:function(x){return x<.5?4*x*x*x:1-pow(-2*x+2,3)/2},easeInQuart:function(x){return x*x*x*x},easeOutQuart:function(x){return 1-pow(1-x,4)},easeInOutQuart:function(x){return x<.5?8*x*x*x*x:1-pow(-2*x+2,4)/2},easeInQuint:function(x){return x*x*x*x*x},easeOutQuint:function(x){return 1-pow(1-x,5)},easeInOutQuint:function(x){return x<.5?16*x*x*x*x*x:1-pow(-2*x+2,5)/2},easeInSine:function(x){return 1-cos(x*PI/2)},easeOutSine:function(x){return sin(x*PI/2)},easeInOutSine:function(x){return-(cos(PI*x)-1)/2},easeInExpo:function(x){return x===0?0:pow(2,10*x-10)},easeOutExpo:function(x){return x===1?1:1-pow(2,-10*x)},easeInOutExpo:function(x){return x===0?0:x===1?1:x<.5?pow(2,20*x-10)/2:(2-pow(2,-20*x+10))/2},easeInCirc:function(x){return 1-sqrt(1-pow(x,2))},easeOutCirc:function(x){return sqrt(1-pow(x-1,2))},easeInOutCirc:function(x){return x<.5?(1-sqrt(1-pow(2*x,2)))/2:(sqrt(1-pow(-2*x+2,2))+1)/2},easeInElastic:function(x){return x===0?0:x===1?1:-pow(2,10*x-10)*sin((x*10-10.75)*c4)},easeOutElastic:function(x){return x===0?0:x===1?1:pow(2,-10*x)*sin((x*10-.75)*c4)+1},easeInOutElastic:function(x){return x===0?0:x===1?1:x<.5?-(pow(2,20*x-10)*sin((20*x-11.125)*c5))/2:pow(2,-20*x+10)*sin((20*x-11.125)*c5)/2+1},easeInBack:function(x){return c3*x*x*x-c1*x*x},easeOutBack:function(x){return 1+c3*pow(x-1,3)+c1*pow(x-1,2)},easeInOutBack:function(x){return x<.5?pow(2*x,2)*((c2+1)*2*x-c2)/2:(pow(2*x-2,2)*((c2+1)*(x*2-2)+c2)+2)/2},easeInBounce:function(x){return 1-bounceOut(1-x)},easeOutBounce:bounceOut,easeInOutBounce:function(x){return x<.5?(1-bounceOut(1-2*x))/2:(1+bounceOut(2*x-1))/2}})});
| 0.030459 | 0.490907 |
(function (factory) {
if (typeof define === "function" && define.amd) {
define(['jquery'], function ($) {
return factory($);
});
} else if (typeof module === "object" && typeof module.exports === "object") {
exports = factory(require('jquery'));
} else {
factory(jQuery);
}
})(function($){
// Preserve the original jQuery "swing" easing as "jswing"
$.easing.jswing = $.easing.swing;
var pow = Math.pow,
sqrt = Math.sqrt,
sin = Math.sin,
cos = Math.cos,
PI = Math.PI,
c1 = 1.70158,
c2 = c1 * 1.525,
c3 = c1 + 1,
c4 = ( 2 * PI ) / 3,
c5 = ( 2 * PI ) / 4.5;
// x is the fraction of animation progress, in the range 0..1
function bounceOut(x) {
var n1 = 7.5625,
d1 = 2.75;
if ( x < 1/d1 ) {
return n1*x*x;
} else if ( x < 2/d1 ) {
return n1*(x-=(1.5/d1))*x + 0.75;
} else if ( x < 2.5/d1 ) {
return n1*(x-=(2.25/d1))*x + 0.9375;
} else {
return n1*(x-=(2.625/d1))*x + 0.984375;
}
}
$.extend( $.easing,
{
def: 'easeOutQuad',
swing: function (x) {
return $.easing[$.easing.def](x);
},
easeInQuad: function (x) {
return x * x;
},
easeOutQuad: function (x) {
return 1 - ( 1 - x ) * ( 1 - x );
},
easeInOutQuad: function (x) {
return x < 0.5 ?
2 * x * x :
1 - pow( -2 * x + 2, 2 ) / 2;
},
easeInCubic: function (x) {
return x * x * x;
},
easeOutCubic: function (x) {
return 1 - pow( 1 - x, 3 );
},
easeInOutCubic: function (x) {
return x < 0.5 ?
4 * x * x * x :
1 - pow( -2 * x + 2, 3 ) / 2;
},
easeInQuart: function (x) {
return x * x * x * x;
},
easeOutQuart: function (x) {
return 1 - pow( 1 - x, 4 );
},
easeInOutQuart: function (x) {
return x < 0.5 ?
8 * x * x * x * x :
1 - pow( -2 * x + 2, 4 ) / 2;
},
easeInQuint: function (x) {
return x * x * x * x * x;
},
easeOutQuint: function (x) {
return 1 - pow( 1 - x, 5 );
},
easeInOutQuint: function (x) {
return x < 0.5 ?
16 * x * x * x * x * x :
1 - pow( -2 * x + 2, 5 ) / 2;
},
easeInSine: function (x) {
return 1 - cos( x * PI/2 );
},
easeOutSine: function (x) {
return sin( x * PI/2 );
},
easeInOutSine: function (x) {
return -( cos( PI * x ) - 1 ) / 2;
},
easeInExpo: function (x) {
return x === 0 ? 0 : pow( 2, 10 * x - 10 );
},
easeOutExpo: function (x) {
return x === 1 ? 1 : 1 - pow( 2, -10 * x );
},
easeInOutExpo: function (x) {
return x === 0 ? 0 : x === 1 ? 1 : x < 0.5 ?
pow( 2, 20 * x - 10 ) / 2 :
( 2 - pow( 2, -20 * x + 10 ) ) / 2;
},
easeInCirc: function (x) {
return 1 - sqrt( 1 - pow( x, 2 ) );
},
easeOutCirc: function (x) {
return sqrt( 1 - pow( x - 1, 2 ) );
},
easeInOutCirc: function (x) {
return x < 0.5 ?
( 1 - sqrt( 1 - pow( 2 * x, 2 ) ) ) / 2 :
( sqrt( 1 - pow( -2 * x + 2, 2 ) ) + 1 ) / 2;
},
easeInElastic: function (x) {
return x === 0 ? 0 : x === 1 ? 1 :
-pow( 2, 10 * x - 10 ) * sin( ( x * 10 - 10.75 ) * c4 );
},
easeOutElastic: function (x) {
return x === 0 ? 0 : x === 1 ? 1 :
pow( 2, -10 * x ) * sin( ( x * 10 - 0.75 ) * c4 ) + 1;
},
easeInOutElastic: function (x) {
return x === 0 ? 0 : x === 1 ? 1 : x < 0.5 ?
-( pow( 2, 20 * x - 10 ) * sin( ( 20 * x - 11.125 ) * c5 )) / 2 :
pow( 2, -20 * x + 10 ) * sin( ( 20 * x - 11.125 ) * c5 ) / 2 + 1;
},
easeInBack: function (x) {
return c3 * x * x * x - c1 * x * x;
},
easeOutBack: function (x) {
return 1 + c3 * pow( x - 1, 3 ) + c1 * pow( x - 1, 2 );
},
easeInOutBack: function (x) {
return x < 0.5 ?
( pow( 2 * x, 2 ) * ( ( c2 + 1 ) * 2 * x - c2 ) ) / 2 :
( pow( 2 * x - 2, 2 ) *( ( c2 + 1 ) * ( x * 2 - 2 ) + c2 ) + 2 ) / 2;
},
easeInBounce: function (x) {
return 1 - bounceOut( 1 - x );
},
easeOutBounce: bounceOut,
easeInOutBounce: function (x) {
return x < 0.5 ?
( 1 - bounceOut( 1 - 2 * x ) ) / 2 :
( 1 + bounceOut( 2 * x - 1 ) ) / 2;
}
});
});
|
scheduler-front
|
/scheduler_front-0.0.11-py3-none-any.whl/scheduler_front/static/vendor/jquery-easing/jquery.easing.js
|
jquery.easing.js
|
(function (factory) {
if (typeof define === "function" && define.amd) {
define(['jquery'], function ($) {
return factory($);
});
} else if (typeof module === "object" && typeof module.exports === "object") {
exports = factory(require('jquery'));
} else {
factory(jQuery);
}
})(function($){
// Preserve the original jQuery "swing" easing as "jswing"
$.easing.jswing = $.easing.swing;
var pow = Math.pow,
sqrt = Math.sqrt,
sin = Math.sin,
cos = Math.cos,
PI = Math.PI,
c1 = 1.70158,
c2 = c1 * 1.525,
c3 = c1 + 1,
c4 = ( 2 * PI ) / 3,
c5 = ( 2 * PI ) / 4.5;
// x is the fraction of animation progress, in the range 0..1
function bounceOut(x) {
var n1 = 7.5625,
d1 = 2.75;
if ( x < 1/d1 ) {
return n1*x*x;
} else if ( x < 2/d1 ) {
return n1*(x-=(1.5/d1))*x + 0.75;
} else if ( x < 2.5/d1 ) {
return n1*(x-=(2.25/d1))*x + 0.9375;
} else {
return n1*(x-=(2.625/d1))*x + 0.984375;
}
}
$.extend( $.easing,
{
def: 'easeOutQuad',
swing: function (x) {
return $.easing[$.easing.def](x);
},
easeInQuad: function (x) {
return x * x;
},
easeOutQuad: function (x) {
return 1 - ( 1 - x ) * ( 1 - x );
},
easeInOutQuad: function (x) {
return x < 0.5 ?
2 * x * x :
1 - pow( -2 * x + 2, 2 ) / 2;
},
easeInCubic: function (x) {
return x * x * x;
},
easeOutCubic: function (x) {
return 1 - pow( 1 - x, 3 );
},
easeInOutCubic: function (x) {
return x < 0.5 ?
4 * x * x * x :
1 - pow( -2 * x + 2, 3 ) / 2;
},
easeInQuart: function (x) {
return x * x * x * x;
},
easeOutQuart: function (x) {
return 1 - pow( 1 - x, 4 );
},
easeInOutQuart: function (x) {
return x < 0.5 ?
8 * x * x * x * x :
1 - pow( -2 * x + 2, 4 ) / 2;
},
easeInQuint: function (x) {
return x * x * x * x * x;
},
easeOutQuint: function (x) {
return 1 - pow( 1 - x, 5 );
},
easeInOutQuint: function (x) {
return x < 0.5 ?
16 * x * x * x * x * x :
1 - pow( -2 * x + 2, 5 ) / 2;
},
easeInSine: function (x) {
return 1 - cos( x * PI/2 );
},
easeOutSine: function (x) {
return sin( x * PI/2 );
},
easeInOutSine: function (x) {
return -( cos( PI * x ) - 1 ) / 2;
},
easeInExpo: function (x) {
return x === 0 ? 0 : pow( 2, 10 * x - 10 );
},
easeOutExpo: function (x) {
return x === 1 ? 1 : 1 - pow( 2, -10 * x );
},
easeInOutExpo: function (x) {
return x === 0 ? 0 : x === 1 ? 1 : x < 0.5 ?
pow( 2, 20 * x - 10 ) / 2 :
( 2 - pow( 2, -20 * x + 10 ) ) / 2;
},
easeInCirc: function (x) {
return 1 - sqrt( 1 - pow( x, 2 ) );
},
easeOutCirc: function (x) {
return sqrt( 1 - pow( x - 1, 2 ) );
},
easeInOutCirc: function (x) {
return x < 0.5 ?
( 1 - sqrt( 1 - pow( 2 * x, 2 ) ) ) / 2 :
( sqrt( 1 - pow( -2 * x + 2, 2 ) ) + 1 ) / 2;
},
easeInElastic: function (x) {
return x === 0 ? 0 : x === 1 ? 1 :
-pow( 2, 10 * x - 10 ) * sin( ( x * 10 - 10.75 ) * c4 );
},
easeOutElastic: function (x) {
return x === 0 ? 0 : x === 1 ? 1 :
pow( 2, -10 * x ) * sin( ( x * 10 - 0.75 ) * c4 ) + 1;
},
easeInOutElastic: function (x) {
return x === 0 ? 0 : x === 1 ? 1 : x < 0.5 ?
-( pow( 2, 20 * x - 10 ) * sin( ( 20 * x - 11.125 ) * c5 )) / 2 :
pow( 2, -20 * x + 10 ) * sin( ( 20 * x - 11.125 ) * c5 ) / 2 + 1;
},
easeInBack: function (x) {
return c3 * x * x * x - c1 * x * x;
},
easeOutBack: function (x) {
return 1 + c3 * pow( x - 1, 3 ) + c1 * pow( x - 1, 2 );
},
easeInOutBack: function (x) {
return x < 0.5 ?
( pow( 2 * x, 2 ) * ( ( c2 + 1 ) * 2 * x - c2 ) ) / 2 :
( pow( 2 * x - 2, 2 ) *( ( c2 + 1 ) * ( x * 2 - 2 ) + c2 ) + 2 ) / 2;
},
easeInBounce: function (x) {
return 1 - bounceOut( 1 - x );
},
easeOutBounce: bounceOut,
easeInOutBounce: function (x) {
return x < 0.5 ?
( 1 - bounceOut( 1 - 2 * x ) ) / 2 :
( 1 + bounceOut( 2 * x - 1 ) ) / 2;
}
});
});
| 0.303422 | 0.498596 |
# Contributing
Contributions are welcome, and they are greatly appreciated! Every little bit
helps, and credit will always be given.
## Get Started!
Ready to contribute? Here's how to set up `scheduler_tools` for local development.
* Fork the `scheduler_tools` repo on GitHub.
* Clone your fork locally:
```
$ git clone --recurse-submodules [email protected]:{your_name_here}/scheduler_tools.git
```
* Install the project in editable mode. (It is also recommended to work in a virtualenv or anaconda environment):
```
$ cd scheduler_tools/
$ pip install -e .[dev]
```
* Create a branch for local development:
```
$ git checkout -b {your_development_type}/short-description
```
Ex: feature/read-tiff-files or bugfix/handle-file-not-found<br>
Now you can make your changes locally.<br>
* When you're done making changes, check that your changes pass linting and tests, including testing other Python
versions with make:
```
$ make build
```
* Commit your changes and push your branch to GitHub:
```
$ git add .
$ git commit -m "Resolves gh-###. Your detailed description of your changes."
$ git push origin {your_development_type}/short-description
```
* Submit a pull request through the GitHub website.
## Deploying
A reminder for the maintainers on how to deploy.
Make sure all your changes are committed.
Then run:
```
$ bumpversion patch # possible: major / minor / patch
$ git push
$ git push --tags
```
Make and merge a PR to branch `stable` and GitHub will then deploy to PyPI once merged.
|
scheduler-tools
|
/scheduler_tools-0.1.5.tar.gz/scheduler_tools-0.1.5/CONTRIBUTING.md
|
CONTRIBUTING.md
|
$ git clone --recurse-submodules [email protected]:{your_name_here}/scheduler_tools.git
$ cd scheduler_tools/
$ pip install -e .[dev]
$ git checkout -b {your_development_type}/short-description
$ make build
$ git add .
$ git commit -m "Resolves gh-###. Your detailed description of your changes."
$ git push origin {your_development_type}/short-description
$ bumpversion patch # possible: major / minor / patch
$ git push
$ git push --tags
| 0.331444 | 0.727709 |
.. highlight:: shell
============
Installation
============
Stable release
--------------
To install scheduler_tools, run this command in your terminal:
.. code-block:: console
$ pip install scheduler_tools
This is the preferred method to install scheduler_tools, as it will always install the most recent stable release.
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
you through the process.
.. _pip: https://pip.pypa.io
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
From sources
------------
The sources for scheduler_tools can be downloaded from the `Github repo`_.
You can either clone the public repository:
.. code-block:: console
$ git clone git://github.com/AllenCellModeling/scheduler_tools
Or download the `tarball`_:
.. code-block:: console
$ curl -OL https://github.com/AllenCellModeling/scheduler_tools/tarball/master
Once you have a copy of the source, you can install it with:
.. code-block:: console
$ python setup.py install
.. _Github repo: https://github.com/AllenCellModeling/scheduler_tools
.. _tarball: https://github.com/AllenCellModeling/scheduler_tools/tarball/master
|
scheduler-tools
|
/scheduler_tools-0.1.5.tar.gz/scheduler_tools-0.1.5/docs/installation.rst
|
installation.rst
|
.. highlight:: shell
============
Installation
============
Stable release
--------------
To install scheduler_tools, run this command in your terminal:
.. code-block:: console
$ pip install scheduler_tools
This is the preferred method to install scheduler_tools, as it will always install the most recent stable release.
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
you through the process.
.. _pip: https://pip.pypa.io
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
From sources
------------
The sources for scheduler_tools can be downloaded from the `Github repo`_.
You can either clone the public repository:
.. code-block:: console
$ git clone git://github.com/AllenCellModeling/scheduler_tools
Or download the `tarball`_:
.. code-block:: console
$ curl -OL https://github.com/AllenCellModeling/scheduler_tools/tarball/master
Once you have a copy of the source, you can install it with:
.. code-block:: console
$ python setup.py install
.. _Github repo: https://github.com/AllenCellModeling/scheduler_tools
.. _tarball: https://github.com/AllenCellModeling/scheduler_tools/tarball/master
| 0.722527 | 0.215433 |
from pathlib import Path
from scheduler_tools.types import PrefDict
class PrefectPreferences:
"""
This class handles reading of a ~/.prefect/ssh.json file. This file has settings for the
name of the gateway, the username to authenticate with, the path to the local ssh identity
file.
"""
def __init__(self, prefs: PrefDict):
"""
:param prefs:
"""
print(prefs)
p_localfolder = Path(prefs['localfolder'])
print("1: ")
if not p_localfolder.exists():
p_localfolder.mkdir(parents=True)
print("2: ")
self._path = p_localfolder
print("3: ")
self._data = prefs
def default_path(self) -> Path:
return self._path
@property
def gateway_url(self):
return self._data['gateway']['url']
@property
def username(self):
return self._data['gateway']['user']
@property
def identity_file(self):
return self._data['gateway']['identityfile']
@property
def known_hosts(self):
return Path('~/.ssh/known_hosts').expanduser()
def write_ssh_pid(self, pid):
with open(str(self.ssh_pid_path()), 'w') as fp:
fp.write(str(pid))
def read_ssh_pid(self) -> [str, type(None)]:
pid = None
if self.ssh_pid_path().expanduser().exists():
pid = open(str(self.ssh_pid_path().expanduser()), 'r').read()
return pid
def remove_ssh_pid(self):
if self.ssh_pid_path().exists():
self.ssh_pid_path().unlink()
def ssh_pid_path(self):
return self.default_path().expanduser() / "ssh_pid.txt"
def cluster_job_id_path(self):
return self.default_path().expanduser() / "cluster_job_id.txt"
def read_prefect_job_id(self) -> [str, type(None)]:
job_id = None
if self.cluster_job_id_path().exists():
job_id = open(str(self.cluster_job_id_path().expanduser()), 'r').read()
return job_id
def write_prefect_job_id(self, job_id):
print(f"jobid: {job_id}")
with open(str(self.cluster_job_id_path().expanduser()), 'w') as fp:
fp.write(str(job_id))
def remove_prefect_job_id(self):
if self.cluster_job_id_path().exists():
self.cluster_job_id_path().unlink()
def cluster_pid_path(self):
# this needs to be made dynamic
return self.default_path().relative_to(Path().home()) / "pidfile"
@property
def local_dask_port(self):
return self._data['dask_port']
@property
def local_dashboard_port(self):
return self._data['dashboard_port']
|
scheduler-tools
|
/scheduler_tools-0.1.5.tar.gz/scheduler_tools-0.1.5/scheduler_tools/PrefectPreferences.py
|
PrefectPreferences.py
|
from pathlib import Path
from scheduler_tools.types import PrefDict
class PrefectPreferences:
"""
This class handles reading of a ~/.prefect/ssh.json file. This file has settings for the
name of the gateway, the username to authenticate with, the path to the local ssh identity
file.
"""
def __init__(self, prefs: PrefDict):
"""
:param prefs:
"""
print(prefs)
p_localfolder = Path(prefs['localfolder'])
print("1: ")
if not p_localfolder.exists():
p_localfolder.mkdir(parents=True)
print("2: ")
self._path = p_localfolder
print("3: ")
self._data = prefs
def default_path(self) -> Path:
return self._path
@property
def gateway_url(self):
return self._data['gateway']['url']
@property
def username(self):
return self._data['gateway']['user']
@property
def identity_file(self):
return self._data['gateway']['identityfile']
@property
def known_hosts(self):
return Path('~/.ssh/known_hosts').expanduser()
def write_ssh_pid(self, pid):
with open(str(self.ssh_pid_path()), 'w') as fp:
fp.write(str(pid))
def read_ssh_pid(self) -> [str, type(None)]:
pid = None
if self.ssh_pid_path().expanduser().exists():
pid = open(str(self.ssh_pid_path().expanduser()), 'r').read()
return pid
def remove_ssh_pid(self):
if self.ssh_pid_path().exists():
self.ssh_pid_path().unlink()
def ssh_pid_path(self):
return self.default_path().expanduser() / "ssh_pid.txt"
def cluster_job_id_path(self):
return self.default_path().expanduser() / "cluster_job_id.txt"
def read_prefect_job_id(self) -> [str, type(None)]:
job_id = None
if self.cluster_job_id_path().exists():
job_id = open(str(self.cluster_job_id_path().expanduser()), 'r').read()
return job_id
def write_prefect_job_id(self, job_id):
print(f"jobid: {job_id}")
with open(str(self.cluster_job_id_path().expanduser()), 'w') as fp:
fp.write(str(job_id))
def remove_prefect_job_id(self):
if self.cluster_job_id_path().exists():
self.cluster_job_id_path().unlink()
def cluster_pid_path(self):
# this needs to be made dynamic
return self.default_path().relative_to(Path().home()) / "pidfile"
@property
def local_dask_port(self):
return self._data['dask_port']
@property
def local_dashboard_port(self):
return self._data['dashboard_port']
| 0.722918 | 0.268027 |
from datetime import datetime
from pathlib import Path
from time import sleep
from aicsdaemon import Daemon
from .load_custom_obj import load_custom_object
from .types import Pathlike
class AicsPrefectDaemon(Daemon):
def __init__(self, slurm_prefs: dict, pidfile: Pathlike, stdin: Pathlike = None, stdout: Pathlike = None,
stderr: Pathlike = None, foreground: bool = False):
super(AicsPrefectDaemon, self).__init__(pidfile=pidfile,
stdout=stdout,
stdin=stdin,
stderr=stderr,
foreground=foreground)
print("AicsPrefectDaemon: pidfile: ", pidfile)
print(f"prefs {slurm_prefs}")
open(stdout, 'w').close() if stdout else None
localdir = Path(pidfile).parent / "logs" / datetime.now().strftime("%Y%m%d_%H:%M:%S")
self._cluster_obj_name = slurm_prefs['cluster_obj_name']
self._cluster_prefs = slurm_prefs['host_conf'].copy()
self._cluster_prefs['local_directory'] = localdir
self._cluster_prefs['log_directory'] = localdir
self._adapt_prefs = slurm_prefs['adapt_conf'].copy()
def run(self):
object_kwargs = self._cluster_prefs
if self._adapt_prefs is None:
object_kwargs = {}
cluster = load_custom_object(module_path=self._cluster_obj_name['module'],
object_name=self._cluster_obj_name['object'],
object_kwargs=object_kwargs)
if self._adapt_prefs is not None:
cluster.adapt(**self._adapt_prefs)
print(f"{cluster.scheduler_info['address']}")
print(f"{cluster.scheduler_info['services']['dashboard']}", flush=True)
# this enables us to put something in stdin and if so this loop will
# exit. Thus using the stdin="filepath" argument gives us a clean way to
# shutdown the process
mtime = self.stdin.stat().st_mtime
while mtime == self.stdin.stat().st_mtime:
sleep(5)
|
scheduler-tools
|
/scheduler_tools-0.1.5.tar.gz/scheduler_tools-0.1.5/scheduler_tools/aics_prefect_daemon.py
|
aics_prefect_daemon.py
|
from datetime import datetime
from pathlib import Path
from time import sleep
from aicsdaemon import Daemon
from .load_custom_obj import load_custom_object
from .types import Pathlike
class AicsPrefectDaemon(Daemon):
def __init__(self, slurm_prefs: dict, pidfile: Pathlike, stdin: Pathlike = None, stdout: Pathlike = None,
stderr: Pathlike = None, foreground: bool = False):
super(AicsPrefectDaemon, self).__init__(pidfile=pidfile,
stdout=stdout,
stdin=stdin,
stderr=stderr,
foreground=foreground)
print("AicsPrefectDaemon: pidfile: ", pidfile)
print(f"prefs {slurm_prefs}")
open(stdout, 'w').close() if stdout else None
localdir = Path(pidfile).parent / "logs" / datetime.now().strftime("%Y%m%d_%H:%M:%S")
self._cluster_obj_name = slurm_prefs['cluster_obj_name']
self._cluster_prefs = slurm_prefs['host_conf'].copy()
self._cluster_prefs['local_directory'] = localdir
self._cluster_prefs['log_directory'] = localdir
self._adapt_prefs = slurm_prefs['adapt_conf'].copy()
def run(self):
object_kwargs = self._cluster_prefs
if self._adapt_prefs is None:
object_kwargs = {}
cluster = load_custom_object(module_path=self._cluster_obj_name['module'],
object_name=self._cluster_obj_name['object'],
object_kwargs=object_kwargs)
if self._adapt_prefs is not None:
cluster.adapt(**self._adapt_prefs)
print(f"{cluster.scheduler_info['address']}")
print(f"{cluster.scheduler_info['services']['dashboard']}", flush=True)
# this enables us to put something in stdin and if so this loop will
# exit. Thus using the stdin="filepath" argument gives us a clean way to
# shutdown the process
mtime = self.stdin.stat().st_mtime
while mtime == self.stdin.stat().st_mtime:
sleep(5)
| 0.54577 | 0.107297 |
from .pbstools import submit_jobs
import os
import string
import json
import shutil
def scatter(ndat, prefs, template_str):
verbose = prefs["verbose"]
# Get the template string
template = string.Template(prefs[template_str]["template"])
myjob = dict()
save_files = list()
if verbose:
print("Printing jobs scatter jobs for " + template_str)
# Slurm maxes out at ~32000 task array size
# This changes how rows are selected by multiplying by tasks_per_job instead of creating a range using it
reduced_range = int(ndat / prefs["job_prefs"]["tasks_per_job"]) + 1
# For every cell
for i in range(reduced_range):
job_top = i * prefs["job_prefs"]["tasks_per_job"] + prefs["job_prefs"]["tasks_per_job"]
if job_top > ndat:
job_top = ndat
job_range = range(i * prefs["job_prefs"]["tasks_per_job"], job_top)
# Get the info required to build that job from the data spreadsheet
myjob["prefs_path"] = prefs["my_path"]
myjob["save_log_path"] = prefs["save_log_path"]
myjob["row_index"] = " ".join([str(job_id) + " " for job_id in job_range])
# Apply job info to the template
outstr = prefs["prefix"] + "\n" + template.substitute(myjob)
save_file = (
prefs["job_prefs"]["script_dir"]
+ os.sep
+ template_str
+ "_"
+ str(i)
+ ".sh"
)
text_file = open(save_file, "w")
text_file.write(outstr)
text_file.close()
save_files.append(save_file)
tmp_file = "tmp_" + template_str + ".sh"
return submit_jobs(save_files, prefs["job_prefs"], tmp_file_name=tmp_file)
def gather(prefs, template_str):
verbose = prefs["verbose"]
# Get the template string
template = string.Template(prefs[template_str]["template"])
if verbose:
print("Printing " + template_str)
# Get the info required to build that job from the data spreadsheet
myjob = dict()
myjob["prefs_path"] = prefs["my_path"]
myjob["save_log_path"] = prefs["save_log_path"]
# Apply job info to the template
outstr = prefs["prefix"] + "\n" + template.substitute(myjob)
save_file = prefs["job_prefs"]["script_dir"] + os.sep + template_str + ".sh"
text_file = open(save_file, "w")
text_file.write(outstr)
text_file.close()
tmp_file = "tmp_" + template_str + ".sh"
return submit_jobs(save_file, prefs["job_prefs"], tmp_file_name=tmp_file)
def setup_prefs(json_path):
with open(json_path) as f:
prefs = json.load(f)
# pdb.set_trace()
if "save_parent" not in prefs["global_vars"]:
prefs["global_vars"]["save_parent"] = os.getcwd()
prefs["save_parent"] = os.path.abspath(prefs["global_vars"]["save_parent"])
with open(json_path, "w") as f:
json.dump(prefs, f, indent=4, separators=(",", ": "))
# make the parent directory if it doesnt exist
if not os.path.exists(prefs["save_parent"]):
os.makedirs(prefs["save_parent"])
json_path_local = prefs["save_parent"] + os.sep + "prefs.json"
if not os.path.exists(json_path_local):
# make a copy of the json object in the parent directory
shutil.copyfile(json_path, json_path_local)
else:
# use the local copy
print("Local copy of preference file already exists at " + json_path_local)
with open(json_path_local) as f:
prefs = json.load(f)
# record the location of the json object
prefs["my_path"] = json_path_local
# record the location of the data object
prefs["save_log_path"] = prefs["save_parent"] + os.sep + prefs["data_log_name"]
prefs["job_prefs"]["script_dir"] = (
prefs["save_parent"] + os.sep + prefs["script_dir"]
)
if not os.path.exists(prefs["job_prefs"]["script_dir"]):
os.makedirs(prefs["job_prefs"]["script_dir"])
prefs["verbose"] = prefs["global_vars"]["verbose"]
return prefs
|
scheduler-tools
|
/scheduler_tools-0.1.5.tar.gz/scheduler_tools-0.1.5/scheduler_tools/pipelinetools.py
|
pipelinetools.py
|
from .pbstools import submit_jobs
import os
import string
import json
import shutil
def scatter(ndat, prefs, template_str):
verbose = prefs["verbose"]
# Get the template string
template = string.Template(prefs[template_str]["template"])
myjob = dict()
save_files = list()
if verbose:
print("Printing jobs scatter jobs for " + template_str)
# Slurm maxes out at ~32000 task array size
# This changes how rows are selected by multiplying by tasks_per_job instead of creating a range using it
reduced_range = int(ndat / prefs["job_prefs"]["tasks_per_job"]) + 1
# For every cell
for i in range(reduced_range):
job_top = i * prefs["job_prefs"]["tasks_per_job"] + prefs["job_prefs"]["tasks_per_job"]
if job_top > ndat:
job_top = ndat
job_range = range(i * prefs["job_prefs"]["tasks_per_job"], job_top)
# Get the info required to build that job from the data spreadsheet
myjob["prefs_path"] = prefs["my_path"]
myjob["save_log_path"] = prefs["save_log_path"]
myjob["row_index"] = " ".join([str(job_id) + " " for job_id in job_range])
# Apply job info to the template
outstr = prefs["prefix"] + "\n" + template.substitute(myjob)
save_file = (
prefs["job_prefs"]["script_dir"]
+ os.sep
+ template_str
+ "_"
+ str(i)
+ ".sh"
)
text_file = open(save_file, "w")
text_file.write(outstr)
text_file.close()
save_files.append(save_file)
tmp_file = "tmp_" + template_str + ".sh"
return submit_jobs(save_files, prefs["job_prefs"], tmp_file_name=tmp_file)
def gather(prefs, template_str):
verbose = prefs["verbose"]
# Get the template string
template = string.Template(prefs[template_str]["template"])
if verbose:
print("Printing " + template_str)
# Get the info required to build that job from the data spreadsheet
myjob = dict()
myjob["prefs_path"] = prefs["my_path"]
myjob["save_log_path"] = prefs["save_log_path"]
# Apply job info to the template
outstr = prefs["prefix"] + "\n" + template.substitute(myjob)
save_file = prefs["job_prefs"]["script_dir"] + os.sep + template_str + ".sh"
text_file = open(save_file, "w")
text_file.write(outstr)
text_file.close()
tmp_file = "tmp_" + template_str + ".sh"
return submit_jobs(save_file, prefs["job_prefs"], tmp_file_name=tmp_file)
def setup_prefs(json_path):
with open(json_path) as f:
prefs = json.load(f)
# pdb.set_trace()
if "save_parent" not in prefs["global_vars"]:
prefs["global_vars"]["save_parent"] = os.getcwd()
prefs["save_parent"] = os.path.abspath(prefs["global_vars"]["save_parent"])
with open(json_path, "w") as f:
json.dump(prefs, f, indent=4, separators=(",", ": "))
# make the parent directory if it doesnt exist
if not os.path.exists(prefs["save_parent"]):
os.makedirs(prefs["save_parent"])
json_path_local = prefs["save_parent"] + os.sep + "prefs.json"
if not os.path.exists(json_path_local):
# make a copy of the json object in the parent directory
shutil.copyfile(json_path, json_path_local)
else:
# use the local copy
print("Local copy of preference file already exists at " + json_path_local)
with open(json_path_local) as f:
prefs = json.load(f)
# record the location of the json object
prefs["my_path"] = json_path_local
# record the location of the data object
prefs["save_log_path"] = prefs["save_parent"] + os.sep + prefs["data_log_name"]
prefs["job_prefs"]["script_dir"] = (
prefs["save_parent"] + os.sep + prefs["script_dir"]
)
if not os.path.exists(prefs["job_prefs"]["script_dir"]):
os.makedirs(prefs["job_prefs"]["script_dir"])
prefs["verbose"] = prefs["global_vars"]["verbose"]
return prefs
| 0.279435 | 0.118538 |
<p align="center">
<a href="https://gitlab.com/DigonIO/scheduler"><img alt="scheduler" src="https://gitlab.com/DigonIO/scheduler/-/raw/master/doc/_assets/logo_name.svg" width="60%"></a>
</p>
<p>A simple in-process python scheduler library with asyncio, threading and timezone support.
Schedule tasks by their time cycles, fixed times, weekdays, dates, weights, offsets and execution
counts and automate Jobs.</p>
[](https://gitlab.com/DigonIO/scheduler)
[](https://github.com/DigonIO/scheduler)
[](https://gitlab.com/DigonIO/scheduler/-/blob/master/LICENSE)
[](https://gitlab.com/DigonIO/scheduler/-/pipelines)
[](https://gitlab.com/DigonIO/scheduler/-/pipelines)
[](https://github.com/psf/black)
[](https://pycqa.github.io/isort/)
[](https://pypi.org/project/scheduler/)
[](https://pypi.org/project/scheduler/)
[](https://pepy.tech/project/scheduler)
[](https://pepy.tech/project/scheduler)
[](https://digon.io/hyd/project/scheduler/t/master)
---
## Features
* Easy and user friendly in-process Job scheduling
[(Quick Start)](https://digon.io/hyd/project/scheduler/t/master/pages/examples/quick_start.html)
* Asyncio scheduler [(Example)](https://digon.io/hyd/project/scheduler/t/master/pages/examples/asyncio.html)
* Threading scheduler [(Example)](https://digon.io/hyd/project/scheduler/t/master/pages/examples/threading.html)
* Timezone compatibility [(Example)](https://digon.io/hyd/project/scheduler/t/master/pages/examples/timezones.html)
* Passing of parameters
[(Example)](https://digon.io/hyd/project/scheduler/t/master/pages/examples/parameters.html)
* Job prioritization
* Default linear prioritization
[(Example)](https://digon.io/hyd/project/scheduler/t/master/pages/examples/job_prioritization.html)
* User definable prioritization functions
[(Guide)](https://digon.io/hyd/project/scheduler/t/master/pages/guides/custom_prioritization.html)
* Job tagging
[(Example)](https://digon.io/hyd/project/scheduler/t/master/pages/examples/tags.html)
* Job batching
[(Example)](https://digon.io/hyd/project/scheduler/t/master/pages/examples/job_batching.html)
* Job metadata
[(Example)](https://digon.io/hyd/project/scheduler/t/master/pages/examples/metrics.html)
* Lightweight
* High test coverage
* [Online documentation](https://digon.io/hyd/project/scheduler/t/master/readme.html)
## Installation
### pip
`scheduler` can be installed directly from the PyPI repositories with:
```bash
pip install scheduler
```
Alternatively install `scheduler` from the `git`
[repository](https://gitlab.com/DigonIO/scheduler) with:
```bash
git clone https://gitlab.com/DigonIO/scheduler.git
cd scheduler
pip install .
```
### Arch Linux
The `PKGBUILD` file can be utilized from the
[Arch Build System](https://wiki.archlinux.org/title/Arch_Build_System).
Download the `PKGBUILD` file and from within the containing folder run
```console
makepkg -i
```
## Example: *How to schedule Jobs*
The following example shows how the `Scheduler` is instantiated and how basic `Job`s are created.
For advanced scheduling examples please visit the online
[documentation](https://digon.io/hyd/project/scheduler/t/master/examples.html).
[//]: # (This example is not directly included in the testing environment. Make sure to also update the corresponding test in tests/test_readme.py when updating the following example.)
```py
import datetime as dt
from scheduler import Scheduler
from scheduler.trigger import Monday, Tuesday
def foo():
print("foo")
schedule = Scheduler()
schedule.cyclic(dt.timedelta(minutes=10), foo)
schedule.minutely(dt.time(second=15), foo)
schedule.hourly(dt.time(minute=30, second=15), foo)
schedule.daily(dt.time(hour=16, minute=30), foo)
schedule.weekly(Monday(), foo)
schedule.weekly(Monday(dt.time(hour=16, minute=30)), foo)
schedule.once(dt.timedelta(minutes=10), foo)
schedule.once(Tuesday(), foo)
schedule.once(dt.datetime(year=2022, month=2, day=15, minute=45), foo)
```
A human readable overview of the scheduled jobs can be created with a simple `print` statement:
```py
print(schedule)
```
```text
max_exec=inf, tzinfo=None, priority_function=linear_priority_function, #jobs=9
type function / alias due at due in attempts weight
-------- ---------------- ------------------- --------- ------------- ------
MINUTELY foo() 2021-05-26 03:55:15 0:00:14 0/inf 1
CYCLIC foo() 2021-05-26 04:05:00 0:09:59 0/inf 1
ONCE foo() 2021-05-26 04:05:00 0:09:59 0/1 1
HOURLY foo() 2021-05-26 04:30:15 0:35:14 0/inf 1
DAILY foo() 2021-05-26 16:30:00 12:34:59 0/inf 1
WEEKLY foo() 2021-05-31 00:00:00 4 days 0/inf 1
WEEKLY foo() 2021-05-31 16:30:00 5 days 0/inf 1
ONCE foo() 2021-06-01 00:00:00 5 days 0/1 1
ONCE foo() 2022-02-15 00:45:00 264 days 0/1 1
```
Executing pending `Job`s periodically can be achieved with a simple loop:
```py
import time
while True:
schedule.exec_jobs()
time.sleep(1)
```
## Documentation
View the API documentation [online](https://digon.io/hyd/project/scheduler/t/master/readme.html).
## Sponsor
<br>
<div align="center">
<a href="https://digon.io">
<img alt="Digon.IO GmbH - IT Dienstleister Wuppertal Softwareentwicklung und Datenwissenschaften" src="https://digon.io/static/landing/img/digon_name_right_grey.svg" width="50%">
</a>
</div>
<br>
<div align="center">
We would like to thank Digon.IO for sponsoring the development of this library.
Digon.IO is building bridges between data science and software development.
They enable companies to automate and accelerate their data-driven processes.
Please visit their website: <a href="https://digon.io/">digon.io</a>
</div>
## License
This free and open source software (FOSS) is published under the [LGPLv3 license](https://www.gnu.org/licenses/lgpl-3.0.en.html).
|
scheduler
|
/scheduler-0.8.4.tar.gz/scheduler-0.8.4/README.md
|
README.md
|
pip install scheduler
git clone https://gitlab.com/DigonIO/scheduler.git
cd scheduler
pip install .
makepkg -i
import datetime as dt
from scheduler import Scheduler
from scheduler.trigger import Monday, Tuesday
def foo():
print("foo")
schedule = Scheduler()
schedule.cyclic(dt.timedelta(minutes=10), foo)
schedule.minutely(dt.time(second=15), foo)
schedule.hourly(dt.time(minute=30, second=15), foo)
schedule.daily(dt.time(hour=16, minute=30), foo)
schedule.weekly(Monday(), foo)
schedule.weekly(Monday(dt.time(hour=16, minute=30)), foo)
schedule.once(dt.timedelta(minutes=10), foo)
schedule.once(Tuesday(), foo)
schedule.once(dt.datetime(year=2022, month=2, day=15, minute=45), foo)
print(schedule)
max_exec=inf, tzinfo=None, priority_function=linear_priority_function, #jobs=9
type function / alias due at due in attempts weight
-------- ---------------- ------------------- --------- ------------- ------
MINUTELY foo() 2021-05-26 03:55:15 0:00:14 0/inf 1
CYCLIC foo() 2021-05-26 04:05:00 0:09:59 0/inf 1
ONCE foo() 2021-05-26 04:05:00 0:09:59 0/1 1
HOURLY foo() 2021-05-26 04:30:15 0:35:14 0/inf 1
DAILY foo() 2021-05-26 16:30:00 12:34:59 0/inf 1
WEEKLY foo() 2021-05-31 00:00:00 4 days 0/inf 1
WEEKLY foo() 2021-05-31 16:30:00 5 days 0/inf 1
ONCE foo() 2021-06-01 00:00:00 5 days 0/1 1
ONCE foo() 2022-02-15 00:45:00 264 days 0/1 1
import time
while True:
schedule.exec_jobs()
time.sleep(1)
| 0.635562 | 0.959421 |
from timeit import default_timer
__all__ = ["Scheduler"]
class Scheduler(object):
def __init__(self):
self.remove_all()
def add(self, interval, count, callback, *args, **kwargs):
"""
Append a task to the scheduler and return the assigned ID.
Arguments:
interval -- Interval in which the callback will be executed (in seconds).
count -- Maximum number of times the callback will be executed.
The task will be removed after, at least, interval*count seconds.
If count is 0 the callback will be executed infinitely.
callback -- The function to be executed (with *args and **kwargs if any).
"""
if count < 0:
raise ValueError("count must be greater than or equal to 0.")
task = [0, interval, count, callback, args, kwargs]
self._tasks.append(task)
return id(task)
def remove(self, *tasks):
"""
Remove (a) task(s) from the scheduler. Arguments must be
as many as tasks to be removed. Attempting to remove an
unexisting task will do nothing.
Example -- Scheduler.remove(task_id_1, task_id_2, ...)
"""
for task in self._tasks:
if id(task) in tasks:
self._tasks.remove(task)
def remove_all(self):
"""Remove all tasks from the scheduler."""
self._tasks = []
def run(self):
completed_tasks = []
for i, task in enumerate(self._tasks):
prev_ticks, interval, count, callback, args, kwargs = task
if default_timer() - prev_ticks >= interval:
callback(*args, **kwargs)
if count > 0:
count -= 1
if count == 0:
# Do not change indices until all tasks
# have been executed.
completed_tasks.append(id(task))
continue
else:
self._tasks[i][2] = count
# Might take a while to execute the callback,
# so get ticks again.
self._tasks[i][0] = default_timer()
self.remove(*completed_tasks)
|
scheduler2
|
/scheduler2-1.0.2a1.zip/scheduler-1.0.0a1/scheduler/scheduler.py
|
scheduler.py
|
from timeit import default_timer
__all__ = ["Scheduler"]
class Scheduler(object):
def __init__(self):
self.remove_all()
def add(self, interval, count, callback, *args, **kwargs):
"""
Append a task to the scheduler and return the assigned ID.
Arguments:
interval -- Interval in which the callback will be executed (in seconds).
count -- Maximum number of times the callback will be executed.
The task will be removed after, at least, interval*count seconds.
If count is 0 the callback will be executed infinitely.
callback -- The function to be executed (with *args and **kwargs if any).
"""
if count < 0:
raise ValueError("count must be greater than or equal to 0.")
task = [0, interval, count, callback, args, kwargs]
self._tasks.append(task)
return id(task)
def remove(self, *tasks):
"""
Remove (a) task(s) from the scheduler. Arguments must be
as many as tasks to be removed. Attempting to remove an
unexisting task will do nothing.
Example -- Scheduler.remove(task_id_1, task_id_2, ...)
"""
for task in self._tasks:
if id(task) in tasks:
self._tasks.remove(task)
def remove_all(self):
"""Remove all tasks from the scheduler."""
self._tasks = []
def run(self):
completed_tasks = []
for i, task in enumerate(self._tasks):
prev_ticks, interval, count, callback, args, kwargs = task
if default_timer() - prev_ticks >= interval:
callback(*args, **kwargs)
if count > 0:
count -= 1
if count == 0:
# Do not change indices until all tasks
# have been executed.
completed_tasks.append(id(task))
continue
else:
self._tasks[i][2] = count
# Might take a while to execute the callback,
# so get ticks again.
self._tasks[i][0] = default_timer()
self.remove(*completed_tasks)
| 0.787523 | 0.252799 |
example:
1.`export serverIp = xxx export serverPort = xxx`
2.schedulerplus_client.conf
```
[schedulerplus_client]
zk_url = localhost:2181
schedulerplus_url = localhost:8888
job_group = demogroup
```
3.demo_executor.py
```python
from schedulerplus.client.job_executor import JobExecutor
from schedulerplus.client.job_code_messages import *
class DemoExecutor(JobExecutor):
def execute(self, external_data):
print external_data
return SUCCESS_CODE, SUCCESS_MESSAGE
```
4.web_main.py
```python
import cherrypy
from schedulerplus.client.config import Config
Config.instance().load("xxx")
from schedulerplus.client.job_dispatcher import JobDispatcher
from schedulerplus.client.job_register import JobRegister
from demo.demo_executor import DemoExecutor
class JobDispatchController(object):
job_dispatcher = JobDispatcher()
@cherrypy.expose
@cherrypy.tools.json_in()
def dispatch(self):
form = cherrypy.request.json
self.job_dispatcher.dispatch(form)
if __name__ == "__main__":
JobRegister.instance().register("demo", DemoExecutor())
cherrypy.quickstart(JobDispatchController())
```
|
schedulerplus-client
|
/schedulerplus-client-0.0.16.tar.gz/schedulerplus-client-0.0.16/README.rst
|
README.rst
|
[schedulerplus_client]
zk_url = localhost:2181
schedulerplus_url = localhost:8888
job_group = demogroup
from schedulerplus.client.job_executor import JobExecutor
from schedulerplus.client.job_code_messages import *
class DemoExecutor(JobExecutor):
def execute(self, external_data):
print external_data
return SUCCESS_CODE, SUCCESS_MESSAGE
import cherrypy
from schedulerplus.client.config import Config
Config.instance().load("xxx")
from schedulerplus.client.job_dispatcher import JobDispatcher
from schedulerplus.client.job_register import JobRegister
from demo.demo_executor import DemoExecutor
class JobDispatchController(object):
job_dispatcher = JobDispatcher()
@cherrypy.expose
@cherrypy.tools.json_in()
def dispatch(self):
form = cherrypy.request.json
self.job_dispatcher.dispatch(form)
if __name__ == "__main__":
JobRegister.instance().register("demo", DemoExecutor())
cherrypy.quickstart(JobDispatchController())
| 0.356895 | 0.690194 |
import logging
from abc import abstractmethod
from datetime import datetime
import time
import requests
from .job_zk_client import JobZkClient, JobZkClientException
from .job_code_messages import *
from .config import Config
class JobExecutor:
def __init__(self):
self.zk_client = JobZkClient.instance()
self.scheduler_plus_url = Config.instance().SCHEDULERPLUS_URL
self.LOG_RESULT_URL = "/logResult"
@abstractmethod
def execute(self, external_data):
"""
Job execute logic, needs be implemented by subclasses.
:param external_data: param given when triggered on web.
:return: r, m; boolean execute result and message
"""
pass
def execute_job(self, form):
job_name = form.get("jobName")
job_group = form.get("jobGroup")
job_schedule_id = form.get("jobScheduleId")
ori_schedule_id = form.get("oriScheduleId")
retry_times = form.get("retryTimes", 0)
external_data = form.get("externalData")
concurrency = form.get("concurrency", 1)
global_concurrency = form.get("globalConcurrency", 1)
try:
self.zk_client.prepare_execution(job_name, concurrency, global_concurrency)
except Exception as e:
logging.error("{send result failed {}".format(e))
self.send_result(None, None, PREPARE_EXEC_ERROR_CODE, e.message, job_schedule_id,
ori_schedule_id, retry_times, job_name, job_group, external_data)
return
start = datetime.now()
try:
code, message = self.execute(external_data)
except Exception as e:
code = EXECUTE_JOB_ERROR_CODE
message = e.message
finally:
interval = (datetime.now() - start).microseconds/1000 # milli secs needed
try:
start = int(time.mktime(start.timetuple()) * 1000) # alibaba fast json serialize date as timestamp
self.send_result(start, interval, code, message, job_schedule_id, ori_schedule_id,
retry_times, job_name, job_group, external_data)
except SendResultException as e:
logging.error("{}".format(e))
try:
self.zk_client.report_finished(job_name)
except JobZkClientException as e:
logging.error("{}".format(e))
def send_result(self, start, interval, code, message, job_schedule_id, ori_schedule_id,
retry_times, job_name, job_group, external_data):
d = {'start': start, 'interval': interval, 'code': code, 'message': message, 'jobScheduleId': job_schedule_id,
'oriScheduleId': ori_schedule_id, 'retryTimes': retry_times, 'jobName': job_name, 'jobGroup': job_group,
'externalData': external_data}
try:
requests.post(self.scheduler_plus_url + self.LOG_RESULT_URL, json=d)
except BaseException as e:
logging.error("send result error: {}".format(e))
raise SendResultException(e.message)
class SendResultException(Exception):
pass
|
schedulerplus-client
|
/schedulerplus-client-0.0.16.tar.gz/schedulerplus-client-0.0.16/client/job_executor.py
|
job_executor.py
|
import logging
from abc import abstractmethod
from datetime import datetime
import time
import requests
from .job_zk_client import JobZkClient, JobZkClientException
from .job_code_messages import *
from .config import Config
class JobExecutor:
def __init__(self):
self.zk_client = JobZkClient.instance()
self.scheduler_plus_url = Config.instance().SCHEDULERPLUS_URL
self.LOG_RESULT_URL = "/logResult"
@abstractmethod
def execute(self, external_data):
"""
Job execute logic, needs be implemented by subclasses.
:param external_data: param given when triggered on web.
:return: r, m; boolean execute result and message
"""
pass
def execute_job(self, form):
job_name = form.get("jobName")
job_group = form.get("jobGroup")
job_schedule_id = form.get("jobScheduleId")
ori_schedule_id = form.get("oriScheduleId")
retry_times = form.get("retryTimes", 0)
external_data = form.get("externalData")
concurrency = form.get("concurrency", 1)
global_concurrency = form.get("globalConcurrency", 1)
try:
self.zk_client.prepare_execution(job_name, concurrency, global_concurrency)
except Exception as e:
logging.error("{send result failed {}".format(e))
self.send_result(None, None, PREPARE_EXEC_ERROR_CODE, e.message, job_schedule_id,
ori_schedule_id, retry_times, job_name, job_group, external_data)
return
start = datetime.now()
try:
code, message = self.execute(external_data)
except Exception as e:
code = EXECUTE_JOB_ERROR_CODE
message = e.message
finally:
interval = (datetime.now() - start).microseconds/1000 # milli secs needed
try:
start = int(time.mktime(start.timetuple()) * 1000) # alibaba fast json serialize date as timestamp
self.send_result(start, interval, code, message, job_schedule_id, ori_schedule_id,
retry_times, job_name, job_group, external_data)
except SendResultException as e:
logging.error("{}".format(e))
try:
self.zk_client.report_finished(job_name)
except JobZkClientException as e:
logging.error("{}".format(e))
def send_result(self, start, interval, code, message, job_schedule_id, ori_schedule_id,
retry_times, job_name, job_group, external_data):
d = {'start': start, 'interval': interval, 'code': code, 'message': message, 'jobScheduleId': job_schedule_id,
'oriScheduleId': ori_schedule_id, 'retryTimes': retry_times, 'jobName': job_name, 'jobGroup': job_group,
'externalData': external_data}
try:
requests.post(self.scheduler_plus_url + self.LOG_RESULT_URL, json=d)
except BaseException as e:
logging.error("send result error: {}".format(e))
raise SendResultException(e.message)
class SendResultException(Exception):
pass
| 0.475849 | 0.055107 |
from kazoo.client import KazooClient
from kazoo.recipe.lock import Lock
import os
from client.config import Config
from .singleton import Singleton
from .job_running_num import JobRunningNum
import logging
logging.basicConfig()
def join(*args):
return os.sep.join(args)
class JobZkClient(object):
__metaclass__ = Singleton
def __init__(self):
self._zk_url = Config.instance().ZK_URL
self._zk_client = KazooClient(hosts=self._zk_url)
self._zk_client.start(5)
self.SERVER_IP = "SERVER_IP"
self.SERVER_PORT = "SERVER_PORT"
self.job_root = "/job"
self.job_lock_root = '/job_lock'
self.lock_path = "lock"
self.job_group = Config.instance().JOB_GROUP
def prepare_execution(self, job_name, concurrency, global_concurrency):
lock_path = join(self.job_lock_root, self.job_group, job_name)
lock = self._zk_client.Lock(lock_path)
with lock:
self.check_running_job_num(job_name, concurrency, global_concurrency)
n = JobRunningNum.instance().modify_job_running_num(job_name, 1)
self._zk_client.set(join(self.job_lock_root, self.job_group, job_name), str(n))
def check_running_job_num(self, job_name, concurrency, global_concurrency):
job_name_path = self.get_job_name_path(job_name)
c_list = self._zk_client.get_children(job_name_path)
global_running_num = 0
for c in c_list:
if self.lock_path == c:
continue
if self.get_address() == c:
num = JobRunningNum.instance().get_job_running_num(job_name)
else:
v, s = self._zk_client.get(join(job_name_path, c))
num = int(v)
global_running_num = global_running_num + num
if JobRunningNum.instance().get_job_running_num(job_name) >= concurrency or \
global_running_num >= global_concurrency:
raise JobZkClientException("No available Job Executor for job [" + job_name + "]")
def get_job_name_path(self, job_name):
return join(self.job_root, self.job_group, job_name)
def report_finished(self, job_name):
path = join(self.job_root, self.job_group, job_name, self.get_address())
n = JobRunningNum.instance().modify_job_running_num(job_name, -1)
self._zk_client.set(path, str(n))
def register_job(self, job_name):
lock_path = join(self.job_lock_root, self.job_group, job_name)
lock = self._zk_client.Lock(lock_path)
with lock:
self._create_path_if_not_exist(job_name)
self._recreate_address_path(job_name)
def _create_path_if_not_exist(self, job_name):
path = join(self.job_root, self.job_group, job_name)
if not self._zk_client.exists(path):
self._zk_client.create(path, makepath=True)
def _recreate_address_path(self, job_name):
path = join(self.job_root, self.job_group, job_name, self.get_address())
if self._zk_client.exists(path):
self._zk_client.delete(path)
n = JobRunningNum.instance().get_job_running_num(job_name)
self._zk_client.create(path, str(n), ephemeral=True, makepath=True)
def get_address(self):
ip = os.getenv(self.SERVER_IP)
port = os.getenv(self.SERVER_PORT) if os.getenv(self.SERVER_PORT) else "8001"
print "{}:{}".format(ip, port)
return "{}:{}".format(ip, port)
def add_listener(self, listener):
self._zk_client.add_listener(listener)
@staticmethod
def instance():
return JobZkClient()
class JobZkClientException(Exception):
pass
|
schedulerplus-client
|
/schedulerplus-client-0.0.16.tar.gz/schedulerplus-client-0.0.16/client/job_zk_client.py
|
job_zk_client.py
|
from kazoo.client import KazooClient
from kazoo.recipe.lock import Lock
import os
from client.config import Config
from .singleton import Singleton
from .job_running_num import JobRunningNum
import logging
logging.basicConfig()
def join(*args):
return os.sep.join(args)
class JobZkClient(object):
__metaclass__ = Singleton
def __init__(self):
self._zk_url = Config.instance().ZK_URL
self._zk_client = KazooClient(hosts=self._zk_url)
self._zk_client.start(5)
self.SERVER_IP = "SERVER_IP"
self.SERVER_PORT = "SERVER_PORT"
self.job_root = "/job"
self.job_lock_root = '/job_lock'
self.lock_path = "lock"
self.job_group = Config.instance().JOB_GROUP
def prepare_execution(self, job_name, concurrency, global_concurrency):
lock_path = join(self.job_lock_root, self.job_group, job_name)
lock = self._zk_client.Lock(lock_path)
with lock:
self.check_running_job_num(job_name, concurrency, global_concurrency)
n = JobRunningNum.instance().modify_job_running_num(job_name, 1)
self._zk_client.set(join(self.job_lock_root, self.job_group, job_name), str(n))
def check_running_job_num(self, job_name, concurrency, global_concurrency):
job_name_path = self.get_job_name_path(job_name)
c_list = self._zk_client.get_children(job_name_path)
global_running_num = 0
for c in c_list:
if self.lock_path == c:
continue
if self.get_address() == c:
num = JobRunningNum.instance().get_job_running_num(job_name)
else:
v, s = self._zk_client.get(join(job_name_path, c))
num = int(v)
global_running_num = global_running_num + num
if JobRunningNum.instance().get_job_running_num(job_name) >= concurrency or \
global_running_num >= global_concurrency:
raise JobZkClientException("No available Job Executor for job [" + job_name + "]")
def get_job_name_path(self, job_name):
return join(self.job_root, self.job_group, job_name)
def report_finished(self, job_name):
path = join(self.job_root, self.job_group, job_name, self.get_address())
n = JobRunningNum.instance().modify_job_running_num(job_name, -1)
self._zk_client.set(path, str(n))
def register_job(self, job_name):
lock_path = join(self.job_lock_root, self.job_group, job_name)
lock = self._zk_client.Lock(lock_path)
with lock:
self._create_path_if_not_exist(job_name)
self._recreate_address_path(job_name)
def _create_path_if_not_exist(self, job_name):
path = join(self.job_root, self.job_group, job_name)
if not self._zk_client.exists(path):
self._zk_client.create(path, makepath=True)
def _recreate_address_path(self, job_name):
path = join(self.job_root, self.job_group, job_name, self.get_address())
if self._zk_client.exists(path):
self._zk_client.delete(path)
n = JobRunningNum.instance().get_job_running_num(job_name)
self._zk_client.create(path, str(n), ephemeral=True, makepath=True)
def get_address(self):
ip = os.getenv(self.SERVER_IP)
port = os.getenv(self.SERVER_PORT) if os.getenv(self.SERVER_PORT) else "8001"
print "{}:{}".format(ip, port)
return "{}:{}".format(ip, port)
def add_listener(self, listener):
self._zk_client.add_listener(listener)
@staticmethod
def instance():
return JobZkClient()
class JobZkClientException(Exception):
pass
| 0.488039 | 0.052936 |
import argparse
from datetime import datetime
import json
import logging
from schedules_tools import jsondate, discovery
from schedules_tools.converter import ScheduleConverter
from schedules_tools.models import Task, Schedule
import sys
log = logging.getLogger(__name__)
REPORT_NO_CHANGE = ''
REPORT_ADDED = '_added_'
REPORT_REMOVED = '_removed_'
REPORT_CHANGED = '_changed_'
REPORT_PREFIX_MAP = {
REPORT_ADDED: '[+]',
REPORT_REMOVED: '[-]',
REPORT_CHANGED: '[M]',
REPORT_NO_CHANGE: 3 * ' ',
}
NAME_SIM_THRESHOLD = 0.8
TASK_SCORE_THRESHOLD = 0.45
NAME_SIM_WEIGHT = 0.5
TASK_POS_WEIGHT = 0.5
def strings_similarity(str1, str2, winkler=True, scaling=0.1):
"""
Find the Jaro-Winkler distance of 2 strings.
https://en.wikipedia.org/wiki/Jaro-Winkler_distance
:param winkler: add winkler adjustment to the Jaro distance
:param scaling: constant scaling factor for how much the score is adjusted
upwards for having common prefixes. Should not exceed 0.25
"""
if str1 == str2:
return 1.0
def num_of_char_matches(s1, len1, s2, len2):
count = 0
transpositions = 0 # number of matching chars w/ different sequence order
limit = int(max(len1, len2) / 2 - 1)
for i in range(len1):
start = i - limit
if start < 0:
start = 0
end = i + limit + 1
if end > len2:
end = len2
index = s2.find(s1[i], start, end)
if index > -1: # found common char
count += 1
if index != i:
transpositions += 1
return count, transpositions
len1 = len(str1)
len2 = len(str2)
num_of_matches, transpositions = num_of_char_matches(str1, len1, str2, len2)
if num_of_matches == 0:
return 0.0
m = float(num_of_matches)
t = transpositions / 2.0
dj = (m / float(len1) + m / float(len2) + (m - t) / m) / 3.0
if winkler:
length = 0
# length of common prefix at the start of the string (max = 4)
max_length = min(
len1,
len2,
4
)
while length < max_length and str1[length] == str2[length]:
length += 1
return dj + (length * scaling * (1.0 - dj))
return dj
class ScheduleDiff(object):
result = []
hierarchy_attr = 'tasks'
subtree_hash_attr_name = 'subtree_hash'
""" Default list of attributes used to compare 2 tasks. """
default_tasks_match_attrs = ['name', 'dStart', 'dFinish']
def __init__(self, schedule_a, schedule_b, trim_time=False, extra_compare_attributes=None):
self.schedule_a = schedule_a
self.schedule_b = schedule_b
self.trim_time = trim_time
self.attributes_to_compare = self.default_tasks_match_attrs
if extra_compare_attributes:
# avoid using += to not modify class-level list
self.attributes_to_compare = self.attributes_to_compare + list(extra_compare_attributes)
self.result = self._diff()
def __str__(self):
return self.result_to_str()
def _get_subtree(self, item):
return getattr(item, self.hierarchy_attr)
def result_to_str(self, items=None, level=0):
""" Textual representation of the diff. """
res = ''
if items is None:
items = self.result
schedule = Schedule()
for item in items:
subtree = item['subtree']
state = item['item_state']
if state in [REPORT_CHANGED, REPORT_ADDED]:
task = item['right']
elif state is REPORT_REMOVED:
task = item['left']
else:
task = item['both']
task_obj = Task.load_from_dict(task, schedule)
res += '{} {}{}\n'.format(REPORT_PREFIX_MAP[state], level * ' ', str(task_obj))
if subtree:
res += self.result_to_str(subtree, level + 2)
return res
def _create_report(self,
item_state,
left=None,
right=None,
both=None,
subtree=[],
changed_attrs=[]):
"""
Returns a dictionary representing a possible change.
{
left: Task or None,
right: Task or None,
both: used instead of left and right, when the task are equal,
subtree: List of reports from the child Tasks,
changed_attr: List of changed attributes,
item_state: Type of change
}
"""
if both:
report = {
'both': both.dump_as_dict(recursive=False),
'subtree': subtree,
'changed_attrs': changed_attrs,
'item_state': item_state
}
else:
# No need to keep the whole structure,
# child tasks will be placed in report['tasks']
if left is not None:
left = left.dump_as_dict(recursive=False)
if right is not None:
right = right.dump_as_dict(recursive=False)
report = {
'left': left,
'right': right,
'subtree': subtree,
'changed_attrs': changed_attrs,
'item_state': item_state,
}
return report
def _set_subtree_items_state(self, items, state):
"""
Set the given state recursively on the subtree items
"""
def create_report(item):
kwargs = {
'subtree': self._set_subtree_items_state(self._get_subtree(item), state)
}
if state == REPORT_NO_CHANGE:
kwargs['both'] = item
elif state == REPORT_ADDED:
kwargs['right'] = item
elif state == REPORT_REMOVED:
kwargs['left'] = item
return self._create_report(state, **kwargs)
return [create_report(item) for item in items]
def get_changed_attrs(self, task_a, task_b):
"""
Compare 2 tasks
Uses attributes defined in `self.attributes_to_compare` and subtree hash and
returns a list of atts that don't match.
"""
changed_attributes = [attr for attr in self.attributes_to_compare
if not self._compare_tasks_attributes(task_a, task_b, attr)]
if task_a.get_subtree_hash(self.attributes_to_compare) \
!= task_b.get_subtree_hash(self.attributes_to_compare):
changed_attributes.append(self.subtree_hash_attr_name)
return changed_attributes
def _compare_tasks_attributes(self, task_a, task_b, attr_name):
"""
Compares tasks attributes.
Trims time from datetime objects if self.trim_time is set.
"""
attribute_a = getattr(task_a, attr_name)
attribute_b = getattr(task_b, attr_name)
# no need to compare empty values strictly
if not attribute_a and not attribute_b:
return True
if self.trim_time:
if isinstance(attribute_a, datetime):
attribute_a = attribute_a.date()
if isinstance(attribute_b, datetime):
attribute_b = attribute_b.date()
if isinstance(attribute_a, list):
attribute_a = sorted(attribute_a)
if isinstance(attribute_b, list):
attribute_b = sorted(attribute_b)
return attribute_a == attribute_b
def find_best_match(self, t1, possible_matches, start_at_index=0):
"""
Finds the best match for the given task in the list of possible matches.
Returns the index of the best match and a dict
with a state suggestion and list of changed attrs.
"""
match_index = None
best_match = {
'state': REPORT_REMOVED,
'changes': [],
'name_score': 0,
'score': TASK_SCORE_THRESHOLD
}
if start_at_index > 0:
possible_matches = possible_matches[start_at_index:]
for i, t2 in enumerate(possible_matches, start_at_index):
res = self.eval_tasks(t1, t2, i, name_threshold=best_match['name_score'])
if (res['state'] is REPORT_CHANGED
and res['score'] > best_match['score']):
match_index = i
best_match = res
if res['state'] is REPORT_NO_CHANGE:
match_index = i
best_match = res
break
return match_index, best_match
def _task_position_score(self, index):
return 1.0 / (2 * (index + 1))
def _task_score(self, name_score, position_score):
weight_sum = NAME_SIM_WEIGHT + TASK_POS_WEIGHT
name_score *= NAME_SIM_WEIGHT
position_score *= TASK_POS_WEIGHT
return (name_score + position_score) / weight_sum
def eval_tasks(self, t1, t2, t2_index, name_threshold=NAME_SIM_THRESHOLD):
name_score = 0.0
position_score = 0.0
changed_attrs = self.get_changed_attrs(t1, t2)
# different names
if 'name' in changed_attrs:
t1_subtree = t1.get_subtree_hash(self.attributes_to_compare)
t2_subtree = t2.get_subtree_hash(self.attributes_to_compare)
if t1_subtree and t2_subtree:
if t1_subtree == t2_subtree:
state = REPORT_CHANGED
position_score = 1.0
else:
name_score = strings_similarity(t1.name, t2.name)
if (name_score > name_threshold
and len(changed_attrs) < len(self.attributes_to_compare)):
state = REPORT_CHANGED
position_score = self._task_position_score(t2_index)
else:
state = REPORT_REMOVED
# no subtrees
else:
name_score = strings_similarity(t1.name, t2.name, winkler=False)
if name_score > name_threshold:
state = REPORT_CHANGED
position_score = self._task_position_score(t2_index)
else:
state = REPORT_REMOVED
# names are equal
else:
name_score = 1.0
if (not changed_attrs
or (len(changed_attrs) == 1
and self.subtree_hash_attr_name in changed_attrs)):
state = REPORT_NO_CHANGE
else:
state = REPORT_CHANGED
position_score = 1.0
return {
'state': state,
'changes': changed_attrs,
'name_score': name_score,
'position_score': position_score,
'score': self._task_score(name_score, position_score)
}
def _diff(self, tasks_a=None, tasks_b=None):
if tasks_a is None:
tasks_a = self.schedule_a.tasks
if tasks_b is None:
tasks_b = self.schedule_b.tasks
res = []
last_b_index = 0
# shortcut to create a report for an added task
def report_task_added(index, recursive=True):
task = tasks_b[index]
subtree = self._get_subtree(task)
if recursive:
subtree = self._set_subtree_items_state(subtree, REPORT_ADDED)
return self._create_report(REPORT_ADDED, right=task, subtree=subtree)
for task in tasks_a:
match_index, match = self.find_best_match(task, tasks_b, start_at_index=last_b_index)
report = {}
if match_index is None:
subtree = self._set_subtree_items_state(self._get_subtree(task), REPORT_REMOVED)
report = self._create_report(REPORT_REMOVED, left=task, subtree=subtree)
else:
# ALL elements between last_b_index and match_index => ADDED
res.extend([report_task_added(k) for k in range(last_b_index, match_index)])
# exact match => NO CHANGE
if not match['changes']:
subtree = self._set_subtree_items_state(self._get_subtree(task), match['state'])
report_kwargs = {'both': task, 'subtree': subtree}
# structural change => CHANGED / NO CHANGE
elif self.subtree_hash_attr_name in match['changes']:
# process child tasks
subtree = self._diff(
self._get_subtree(task),
self._get_subtree(tasks_b[match_index])
)
if len(match['changes']) > 1:
report_kwargs = {
'left': task,
'right': tasks_b[match_index],
'subtree': subtree
}
else:
report_kwargs = {
'both': task,
'subtree': subtree
}
# no structural changes => CHANGED
else:
subtree = self._set_subtree_items_state(
self._get_subtree(tasks_b[match_index]), REPORT_NO_CHANGE)
report_kwargs = {
'left': task,
'right': tasks_b[match_index],
'subtree': subtree
}
report = self._create_report(match['state'],
changed_attrs=match['changes'],
**report_kwargs)
last_b_index = match_index + 1
res.append(report)
# remaining tasks => ADDED
res.extend([report_task_added(k) for k in range(last_b_index, len(tasks_b))])
return res
def dump_json(self, **kwargs):
def _encoder(obj):
if isinstance(obj, Task):
return obj.dump_as_dict()
return jsondate._datetime_encoder(obj)
kwargs['default'] = _encoder
return json.dumps(self.result, **kwargs)
def setup_logging(level):
log_format = '%(name)-10s %(levelname)7s: %(message)s'
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(level)
formatter = logging.Formatter(log_format)
sh.setFormatter(formatter)
# setup root logger
inst = logging.getLogger('')
inst.setLevel(level)
inst.addHandler(sh)
def main():
setup_logging(logging.INFO)
parser = argparse.ArgumentParser(
description='Tool to show differences between two schedules.')
parser.add_argument('--simple-diff',
help='Simple comparison between two schedules.',
action='store_true',
default=False)
parser.add_argument(
'--handlers-path',
help='Add python-dot-notation path to discover handlers (needs to '
'be python module), can be called several times '
'(conflicting names will be overriden - the last '
'implementation will be used)',
action='append',
default=[])
parser.add_argument('--whole-days',
help='Compare just date part of timestamp (will '
'ignore differences in time)',
action='store_true',
default=False)
parser.add_argument('left')
parser.add_argument('right')
args = parser.parse_args()
for path in args.handlers_path:
discovery.search_paths.append(path)
left = ScheduleConverter()
left.import_schedule(args.left)
right = ScheduleConverter()
right.import_schedule(args.right)
if args.simple_diff:
diff_res = left.schedule.diff(right.schedule, whole_days=args.whole_days)
else:
diff_res = ScheduleDiff(left.schedule, right.schedule)
if diff_res:
print(diff_res)
sys.exit(1)
if __name__ == '__main__':
main()
|
schedules-tools
|
/schedules_tools-8.16.1-py3-none-any.whl/schedules_tools/diff.py
|
diff.py
|
import argparse
from datetime import datetime
import json
import logging
from schedules_tools import jsondate, discovery
from schedules_tools.converter import ScheduleConverter
from schedules_tools.models import Task, Schedule
import sys
log = logging.getLogger(__name__)
REPORT_NO_CHANGE = ''
REPORT_ADDED = '_added_'
REPORT_REMOVED = '_removed_'
REPORT_CHANGED = '_changed_'
REPORT_PREFIX_MAP = {
REPORT_ADDED: '[+]',
REPORT_REMOVED: '[-]',
REPORT_CHANGED: '[M]',
REPORT_NO_CHANGE: 3 * ' ',
}
NAME_SIM_THRESHOLD = 0.8
TASK_SCORE_THRESHOLD = 0.45
NAME_SIM_WEIGHT = 0.5
TASK_POS_WEIGHT = 0.5
def strings_similarity(str1, str2, winkler=True, scaling=0.1):
"""
Find the Jaro-Winkler distance of 2 strings.
https://en.wikipedia.org/wiki/Jaro-Winkler_distance
:param winkler: add winkler adjustment to the Jaro distance
:param scaling: constant scaling factor for how much the score is adjusted
upwards for having common prefixes. Should not exceed 0.25
"""
if str1 == str2:
return 1.0
def num_of_char_matches(s1, len1, s2, len2):
count = 0
transpositions = 0 # number of matching chars w/ different sequence order
limit = int(max(len1, len2) / 2 - 1)
for i in range(len1):
start = i - limit
if start < 0:
start = 0
end = i + limit + 1
if end > len2:
end = len2
index = s2.find(s1[i], start, end)
if index > -1: # found common char
count += 1
if index != i:
transpositions += 1
return count, transpositions
len1 = len(str1)
len2 = len(str2)
num_of_matches, transpositions = num_of_char_matches(str1, len1, str2, len2)
if num_of_matches == 0:
return 0.0
m = float(num_of_matches)
t = transpositions / 2.0
dj = (m / float(len1) + m / float(len2) + (m - t) / m) / 3.0
if winkler:
length = 0
# length of common prefix at the start of the string (max = 4)
max_length = min(
len1,
len2,
4
)
while length < max_length and str1[length] == str2[length]:
length += 1
return dj + (length * scaling * (1.0 - dj))
return dj
class ScheduleDiff(object):
result = []
hierarchy_attr = 'tasks'
subtree_hash_attr_name = 'subtree_hash'
""" Default list of attributes used to compare 2 tasks. """
default_tasks_match_attrs = ['name', 'dStart', 'dFinish']
def __init__(self, schedule_a, schedule_b, trim_time=False, extra_compare_attributes=None):
self.schedule_a = schedule_a
self.schedule_b = schedule_b
self.trim_time = trim_time
self.attributes_to_compare = self.default_tasks_match_attrs
if extra_compare_attributes:
# avoid using += to not modify class-level list
self.attributes_to_compare = self.attributes_to_compare + list(extra_compare_attributes)
self.result = self._diff()
def __str__(self):
return self.result_to_str()
def _get_subtree(self, item):
return getattr(item, self.hierarchy_attr)
def result_to_str(self, items=None, level=0):
""" Textual representation of the diff. """
res = ''
if items is None:
items = self.result
schedule = Schedule()
for item in items:
subtree = item['subtree']
state = item['item_state']
if state in [REPORT_CHANGED, REPORT_ADDED]:
task = item['right']
elif state is REPORT_REMOVED:
task = item['left']
else:
task = item['both']
task_obj = Task.load_from_dict(task, schedule)
res += '{} {}{}\n'.format(REPORT_PREFIX_MAP[state], level * ' ', str(task_obj))
if subtree:
res += self.result_to_str(subtree, level + 2)
return res
def _create_report(self,
item_state,
left=None,
right=None,
both=None,
subtree=[],
changed_attrs=[]):
"""
Returns a dictionary representing a possible change.
{
left: Task or None,
right: Task or None,
both: used instead of left and right, when the task are equal,
subtree: List of reports from the child Tasks,
changed_attr: List of changed attributes,
item_state: Type of change
}
"""
if both:
report = {
'both': both.dump_as_dict(recursive=False),
'subtree': subtree,
'changed_attrs': changed_attrs,
'item_state': item_state
}
else:
# No need to keep the whole structure,
# child tasks will be placed in report['tasks']
if left is not None:
left = left.dump_as_dict(recursive=False)
if right is not None:
right = right.dump_as_dict(recursive=False)
report = {
'left': left,
'right': right,
'subtree': subtree,
'changed_attrs': changed_attrs,
'item_state': item_state,
}
return report
def _set_subtree_items_state(self, items, state):
"""
Set the given state recursively on the subtree items
"""
def create_report(item):
kwargs = {
'subtree': self._set_subtree_items_state(self._get_subtree(item), state)
}
if state == REPORT_NO_CHANGE:
kwargs['both'] = item
elif state == REPORT_ADDED:
kwargs['right'] = item
elif state == REPORT_REMOVED:
kwargs['left'] = item
return self._create_report(state, **kwargs)
return [create_report(item) for item in items]
def get_changed_attrs(self, task_a, task_b):
"""
Compare 2 tasks
Uses attributes defined in `self.attributes_to_compare` and subtree hash and
returns a list of atts that don't match.
"""
changed_attributes = [attr for attr in self.attributes_to_compare
if not self._compare_tasks_attributes(task_a, task_b, attr)]
if task_a.get_subtree_hash(self.attributes_to_compare) \
!= task_b.get_subtree_hash(self.attributes_to_compare):
changed_attributes.append(self.subtree_hash_attr_name)
return changed_attributes
def _compare_tasks_attributes(self, task_a, task_b, attr_name):
"""
Compares tasks attributes.
Trims time from datetime objects if self.trim_time is set.
"""
attribute_a = getattr(task_a, attr_name)
attribute_b = getattr(task_b, attr_name)
# no need to compare empty values strictly
if not attribute_a and not attribute_b:
return True
if self.trim_time:
if isinstance(attribute_a, datetime):
attribute_a = attribute_a.date()
if isinstance(attribute_b, datetime):
attribute_b = attribute_b.date()
if isinstance(attribute_a, list):
attribute_a = sorted(attribute_a)
if isinstance(attribute_b, list):
attribute_b = sorted(attribute_b)
return attribute_a == attribute_b
def find_best_match(self, t1, possible_matches, start_at_index=0):
"""
Finds the best match for the given task in the list of possible matches.
Returns the index of the best match and a dict
with a state suggestion and list of changed attrs.
"""
match_index = None
best_match = {
'state': REPORT_REMOVED,
'changes': [],
'name_score': 0,
'score': TASK_SCORE_THRESHOLD
}
if start_at_index > 0:
possible_matches = possible_matches[start_at_index:]
for i, t2 in enumerate(possible_matches, start_at_index):
res = self.eval_tasks(t1, t2, i, name_threshold=best_match['name_score'])
if (res['state'] is REPORT_CHANGED
and res['score'] > best_match['score']):
match_index = i
best_match = res
if res['state'] is REPORT_NO_CHANGE:
match_index = i
best_match = res
break
return match_index, best_match
def _task_position_score(self, index):
return 1.0 / (2 * (index + 1))
def _task_score(self, name_score, position_score):
weight_sum = NAME_SIM_WEIGHT + TASK_POS_WEIGHT
name_score *= NAME_SIM_WEIGHT
position_score *= TASK_POS_WEIGHT
return (name_score + position_score) / weight_sum
def eval_tasks(self, t1, t2, t2_index, name_threshold=NAME_SIM_THRESHOLD):
name_score = 0.0
position_score = 0.0
changed_attrs = self.get_changed_attrs(t1, t2)
# different names
if 'name' in changed_attrs:
t1_subtree = t1.get_subtree_hash(self.attributes_to_compare)
t2_subtree = t2.get_subtree_hash(self.attributes_to_compare)
if t1_subtree and t2_subtree:
if t1_subtree == t2_subtree:
state = REPORT_CHANGED
position_score = 1.0
else:
name_score = strings_similarity(t1.name, t2.name)
if (name_score > name_threshold
and len(changed_attrs) < len(self.attributes_to_compare)):
state = REPORT_CHANGED
position_score = self._task_position_score(t2_index)
else:
state = REPORT_REMOVED
# no subtrees
else:
name_score = strings_similarity(t1.name, t2.name, winkler=False)
if name_score > name_threshold:
state = REPORT_CHANGED
position_score = self._task_position_score(t2_index)
else:
state = REPORT_REMOVED
# names are equal
else:
name_score = 1.0
if (not changed_attrs
or (len(changed_attrs) == 1
and self.subtree_hash_attr_name in changed_attrs)):
state = REPORT_NO_CHANGE
else:
state = REPORT_CHANGED
position_score = 1.0
return {
'state': state,
'changes': changed_attrs,
'name_score': name_score,
'position_score': position_score,
'score': self._task_score(name_score, position_score)
}
def _diff(self, tasks_a=None, tasks_b=None):
if tasks_a is None:
tasks_a = self.schedule_a.tasks
if tasks_b is None:
tasks_b = self.schedule_b.tasks
res = []
last_b_index = 0
# shortcut to create a report for an added task
def report_task_added(index, recursive=True):
task = tasks_b[index]
subtree = self._get_subtree(task)
if recursive:
subtree = self._set_subtree_items_state(subtree, REPORT_ADDED)
return self._create_report(REPORT_ADDED, right=task, subtree=subtree)
for task in tasks_a:
match_index, match = self.find_best_match(task, tasks_b, start_at_index=last_b_index)
report = {}
if match_index is None:
subtree = self._set_subtree_items_state(self._get_subtree(task), REPORT_REMOVED)
report = self._create_report(REPORT_REMOVED, left=task, subtree=subtree)
else:
# ALL elements between last_b_index and match_index => ADDED
res.extend([report_task_added(k) for k in range(last_b_index, match_index)])
# exact match => NO CHANGE
if not match['changes']:
subtree = self._set_subtree_items_state(self._get_subtree(task), match['state'])
report_kwargs = {'both': task, 'subtree': subtree}
# structural change => CHANGED / NO CHANGE
elif self.subtree_hash_attr_name in match['changes']:
# process child tasks
subtree = self._diff(
self._get_subtree(task),
self._get_subtree(tasks_b[match_index])
)
if len(match['changes']) > 1:
report_kwargs = {
'left': task,
'right': tasks_b[match_index],
'subtree': subtree
}
else:
report_kwargs = {
'both': task,
'subtree': subtree
}
# no structural changes => CHANGED
else:
subtree = self._set_subtree_items_state(
self._get_subtree(tasks_b[match_index]), REPORT_NO_CHANGE)
report_kwargs = {
'left': task,
'right': tasks_b[match_index],
'subtree': subtree
}
report = self._create_report(match['state'],
changed_attrs=match['changes'],
**report_kwargs)
last_b_index = match_index + 1
res.append(report)
# remaining tasks => ADDED
res.extend([report_task_added(k) for k in range(last_b_index, len(tasks_b))])
return res
def dump_json(self, **kwargs):
def _encoder(obj):
if isinstance(obj, Task):
return obj.dump_as_dict()
return jsondate._datetime_encoder(obj)
kwargs['default'] = _encoder
return json.dumps(self.result, **kwargs)
def setup_logging(level):
log_format = '%(name)-10s %(levelname)7s: %(message)s'
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(level)
formatter = logging.Formatter(log_format)
sh.setFormatter(formatter)
# setup root logger
inst = logging.getLogger('')
inst.setLevel(level)
inst.addHandler(sh)
def main():
setup_logging(logging.INFO)
parser = argparse.ArgumentParser(
description='Tool to show differences between two schedules.')
parser.add_argument('--simple-diff',
help='Simple comparison between two schedules.',
action='store_true',
default=False)
parser.add_argument(
'--handlers-path',
help='Add python-dot-notation path to discover handlers (needs to '
'be python module), can be called several times '
'(conflicting names will be overriden - the last '
'implementation will be used)',
action='append',
default=[])
parser.add_argument('--whole-days',
help='Compare just date part of timestamp (will '
'ignore differences in time)',
action='store_true',
default=False)
parser.add_argument('left')
parser.add_argument('right')
args = parser.parse_args()
for path in args.handlers_path:
discovery.search_paths.append(path)
left = ScheduleConverter()
left.import_schedule(args.left)
right = ScheduleConverter()
right.import_schedule(args.right)
if args.simple_diff:
diff_res = left.schedule.diff(right.schedule, whole_days=args.whole_days)
else:
diff_res = ScheduleDiff(left.schedule, right.schedule)
if diff_res:
print(diff_res)
sys.exit(1)
if __name__ == '__main__':
main()
| 0.547222 | 0.207958 |
# Test as "python -m schedules_tools.batches.schedule_batch"
import argparse
import re
from schedules_tools.batches.utils import initialize_ss_handler, load_template
from schedules_tools.models import Task
from smartsheet.models import Row, Cell, ObjectValue, PredecessorList, Predecessor, Duration
GA_NAME_REGEX = re.compile(r'^GA( Release)?$')
BATCH_NAME_REGEX = re.compile(r'^Batch update ([0-9]+)')
class BatchError(Exception):
pass
def construct_task(name, duration=None):
"""
makes an instance of schedule_tools Task
"""
task = Task()
task.name = name
task.dStart = None
task.dFinish = None
if duration:
task.duration = duration
elif duration == 0:
task.milestone = True
return task
def build_columns_map(columns):
result = {}
for column in columns:
if not column.title:
continue
column_name = column.title.lower()
if column_name in ('task', 'task name'):
result['name'] = column.index
elif column_name in ('start', 'start date'):
result['start'] = column.index
elif column_name in ('finish', 'due', 'end date'):
result['finish'] = column.index
missing_columns = {'name', 'start', 'finish'} - set(result.keys())
if missing_columns:
raise BatchError(f'Couldn\'t locate required columns: {missing_columns}')
return result
def add_batch(handle, template):
handler = initialize_ss_handler(handle)
columns_map = build_columns_map(handler.sheet.columns)
parsed_rows = list(
map(
lambda x: parse_row(x, columns_map),
handler.sheet.rows
)
)
# finding relevant rows
parent_row = find_parent_row(parsed_rows, template['parent'])
if not parent_row:
raise BatchError(f'Parent row "{template["parent"]}" not found.')
if template.get('first'):
predecessor_row = find_ga_row(parsed_rows)
batch_number = 1
batch_task_export_kwargs = {'to_top': True}
else:
latest_batch_row, latest_batch_number = find_latest_batch_row(
parsed_rows,
parent_row['id']
)
predecessor_row = find_predecessor_row_from_batch(
parsed_rows,
latest_batch_row['id'],
template['predecessor-task-name']
)
batch_number = latest_batch_number + 1
batch_task_export_kwargs = {'sibling_id': latest_batch_row['id']}
batch_name = 'Batch update %d' % batch_number
if 'type' in template:
batch_name = '%s %s' % (batch_name, template['type'])
# adding main batch task
batch_task = construct_task(batch_name)
batch_row_id = handler.export_task(
batch_task,
parent_id=parent_row['id'],
**batch_task_export_kwargs
).id
# exporting batch tasks and mapping them to set dependencies later
# can't set dependencies right away because task
# dependency might not be in the schedule yet
task_id_to_row = {}
for task_id, task_data in template['tasks'].items():
st_task = construct_task(task_data['name'], duration=task_data['duration'])
task_export_row = handler.export_task(st_task, batch_row_id)
task_id_to_row[task_id] = parse_row(task_export_row, columns_map)
# setting dependencies
for task_id, task_data in template['tasks'].items():
if 'dependency' not in task_data:
continue
pred_list = PredecessorList()
pred = Predecessor()
dependency_dict = task_data['dependency']
if dependency_dict['to'] == 'predecessor':
pred.row_id = predecessor_row['id']
else:
pred.row_id = task_id_to_row[int(dependency_dict['to'])]['id']
pred.type = dependency_dict.get('type') or 'FS'
if dependency_dict['lag_amount']:
lag_duration = Duration()
lag_duration.negative = dependency_dict['lag_sign'] == '-'
lag_amount = int(dependency_dict['lag_amount'])
if dependency_dict['lag_type'] == 'd':
lag_duration.days = lag_amount
else:
lag_duration.weeks = lag_amount
pred.lag = lag_duration
pred_list.predecessors = [pred]
dependency_cell = Cell()
dependency_cell.column_id = handler._sheet_columns['predecessors']
dependency_cell.object_value = ObjectValue()
dependency_cell.object_value.object_type = "PREDECESSOR_LIST"
dependency_cell.object_value = pred_list
task_row = task_id_to_row[task_id]
task_update_row = Row()
task_update_row.id = task_row['id']
task_update_row.cells.append(dependency_cell)
handler.client.Sheets.update_rows(
handler.handle,
[task_update_row]
)
def parse_row(row, columns_map):
"""
converts smartsheet row into a dict
"""
row_dict = row.to_dict()
cells = row_dict['cells']
result = {
'id': row_dict['id'],
'row_number': row_dict['rowNumber'],
'parent_id': row_dict.get('parentId'),
'name': cells[columns_map['name']].get('value'),
'date_start': cells[columns_map['start']].get('value'),
'date_finish': cells[columns_map['finish']].get('value'),
}
return result
def find_parent_row(parsed_rows, parent_name):
"""
finds a parent row by a given name
"""
for row in parsed_rows:
task_name = row['name']
if not task_name:
continue
if task_name == parent_name:
return row
return None
def find_latest_batch_row(parsed_rows, batch_parent_row_id):
"""
finds latest batch in the schedule
"""
children_rows = filter(
lambda x: x['parent_id'] == batch_parent_row_id,
parsed_rows
)
latest_batch_row = None
latest_batch_number = None
for row in children_rows:
batch_regex_match = BATCH_NAME_REGEX.match(row['name'])
if batch_regex_match:
batch_number = int(batch_regex_match.groups()[0])
if not latest_batch_number or batch_number > latest_batch_number:
latest_batch_row = row
latest_batch_number = batch_number
return latest_batch_row, latest_batch_number
def find_predecessor_row_from_batch(parsed_rows, batch_row_id, predecessor_name):
"""
finds a relevant predecessor row in a batch
"""
batch_rows = filter(
lambda x: x['parent_id'] == batch_row_id,
parsed_rows
)
for row in batch_rows:
if row['name'] == predecessor_name:
return row
return None
def find_ga_row(parsed_rows):
"""
finds GA in the schedule
"""
for row in parsed_rows:
if GA_NAME_REGEX.match(row['name']):
return row
def main():
parser = argparse.ArgumentParser(
description='Add a batch to SmartSheet schedule',
epilog="""
Requires SmartSheet API token in SMARTSHEET_API_TOKEN env variable.
It's possible to use custom batch templates by specifying BATCHES_TEMPLATE_DIR env variable.
""",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument('template',
help='template name)',
type=str,)
parser.add_argument('handle',
help='SmartSheet handle (URL)',
type=str,)
args = parser.parse_args()
template = load_template(args.template)
add_batch(args.handle, template)
if __name__ == '__main__':
main()
|
schedules-tools
|
/schedules_tools-8.16.1-py3-none-any.whl/schedules_tools/batches/schedule_batch.py
|
schedule_batch.py
|
# Test as "python -m schedules_tools.batches.schedule_batch"
import argparse
import re
from schedules_tools.batches.utils import initialize_ss_handler, load_template
from schedules_tools.models import Task
from smartsheet.models import Row, Cell, ObjectValue, PredecessorList, Predecessor, Duration
GA_NAME_REGEX = re.compile(r'^GA( Release)?$')
BATCH_NAME_REGEX = re.compile(r'^Batch update ([0-9]+)')
class BatchError(Exception):
pass
def construct_task(name, duration=None):
"""
makes an instance of schedule_tools Task
"""
task = Task()
task.name = name
task.dStart = None
task.dFinish = None
if duration:
task.duration = duration
elif duration == 0:
task.milestone = True
return task
def build_columns_map(columns):
result = {}
for column in columns:
if not column.title:
continue
column_name = column.title.lower()
if column_name in ('task', 'task name'):
result['name'] = column.index
elif column_name in ('start', 'start date'):
result['start'] = column.index
elif column_name in ('finish', 'due', 'end date'):
result['finish'] = column.index
missing_columns = {'name', 'start', 'finish'} - set(result.keys())
if missing_columns:
raise BatchError(f'Couldn\'t locate required columns: {missing_columns}')
return result
def add_batch(handle, template):
handler = initialize_ss_handler(handle)
columns_map = build_columns_map(handler.sheet.columns)
parsed_rows = list(
map(
lambda x: parse_row(x, columns_map),
handler.sheet.rows
)
)
# finding relevant rows
parent_row = find_parent_row(parsed_rows, template['parent'])
if not parent_row:
raise BatchError(f'Parent row "{template["parent"]}" not found.')
if template.get('first'):
predecessor_row = find_ga_row(parsed_rows)
batch_number = 1
batch_task_export_kwargs = {'to_top': True}
else:
latest_batch_row, latest_batch_number = find_latest_batch_row(
parsed_rows,
parent_row['id']
)
predecessor_row = find_predecessor_row_from_batch(
parsed_rows,
latest_batch_row['id'],
template['predecessor-task-name']
)
batch_number = latest_batch_number + 1
batch_task_export_kwargs = {'sibling_id': latest_batch_row['id']}
batch_name = 'Batch update %d' % batch_number
if 'type' in template:
batch_name = '%s %s' % (batch_name, template['type'])
# adding main batch task
batch_task = construct_task(batch_name)
batch_row_id = handler.export_task(
batch_task,
parent_id=parent_row['id'],
**batch_task_export_kwargs
).id
# exporting batch tasks and mapping them to set dependencies later
# can't set dependencies right away because task
# dependency might not be in the schedule yet
task_id_to_row = {}
for task_id, task_data in template['tasks'].items():
st_task = construct_task(task_data['name'], duration=task_data['duration'])
task_export_row = handler.export_task(st_task, batch_row_id)
task_id_to_row[task_id] = parse_row(task_export_row, columns_map)
# setting dependencies
for task_id, task_data in template['tasks'].items():
if 'dependency' not in task_data:
continue
pred_list = PredecessorList()
pred = Predecessor()
dependency_dict = task_data['dependency']
if dependency_dict['to'] == 'predecessor':
pred.row_id = predecessor_row['id']
else:
pred.row_id = task_id_to_row[int(dependency_dict['to'])]['id']
pred.type = dependency_dict.get('type') or 'FS'
if dependency_dict['lag_amount']:
lag_duration = Duration()
lag_duration.negative = dependency_dict['lag_sign'] == '-'
lag_amount = int(dependency_dict['lag_amount'])
if dependency_dict['lag_type'] == 'd':
lag_duration.days = lag_amount
else:
lag_duration.weeks = lag_amount
pred.lag = lag_duration
pred_list.predecessors = [pred]
dependency_cell = Cell()
dependency_cell.column_id = handler._sheet_columns['predecessors']
dependency_cell.object_value = ObjectValue()
dependency_cell.object_value.object_type = "PREDECESSOR_LIST"
dependency_cell.object_value = pred_list
task_row = task_id_to_row[task_id]
task_update_row = Row()
task_update_row.id = task_row['id']
task_update_row.cells.append(dependency_cell)
handler.client.Sheets.update_rows(
handler.handle,
[task_update_row]
)
def parse_row(row, columns_map):
"""
converts smartsheet row into a dict
"""
row_dict = row.to_dict()
cells = row_dict['cells']
result = {
'id': row_dict['id'],
'row_number': row_dict['rowNumber'],
'parent_id': row_dict.get('parentId'),
'name': cells[columns_map['name']].get('value'),
'date_start': cells[columns_map['start']].get('value'),
'date_finish': cells[columns_map['finish']].get('value'),
}
return result
def find_parent_row(parsed_rows, parent_name):
"""
finds a parent row by a given name
"""
for row in parsed_rows:
task_name = row['name']
if not task_name:
continue
if task_name == parent_name:
return row
return None
def find_latest_batch_row(parsed_rows, batch_parent_row_id):
"""
finds latest batch in the schedule
"""
children_rows = filter(
lambda x: x['parent_id'] == batch_parent_row_id,
parsed_rows
)
latest_batch_row = None
latest_batch_number = None
for row in children_rows:
batch_regex_match = BATCH_NAME_REGEX.match(row['name'])
if batch_regex_match:
batch_number = int(batch_regex_match.groups()[0])
if not latest_batch_number or batch_number > latest_batch_number:
latest_batch_row = row
latest_batch_number = batch_number
return latest_batch_row, latest_batch_number
def find_predecessor_row_from_batch(parsed_rows, batch_row_id, predecessor_name):
"""
finds a relevant predecessor row in a batch
"""
batch_rows = filter(
lambda x: x['parent_id'] == batch_row_id,
parsed_rows
)
for row in batch_rows:
if row['name'] == predecessor_name:
return row
return None
def find_ga_row(parsed_rows):
"""
finds GA in the schedule
"""
for row in parsed_rows:
if GA_NAME_REGEX.match(row['name']):
return row
def main():
parser = argparse.ArgumentParser(
description='Add a batch to SmartSheet schedule',
epilog="""
Requires SmartSheet API token in SMARTSHEET_API_TOKEN env variable.
It's possible to use custom batch templates by specifying BATCHES_TEMPLATE_DIR env variable.
""",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument('template',
help='template name)',
type=str,)
parser.add_argument('handle',
help='SmartSheet handle (URL)',
type=str,)
args = parser.parse_args()
template = load_template(args.template)
add_batch(args.handle, template)
if __name__ == '__main__':
main()
| 0.667256 | 0.295014 |
import datetime
import logging
from schedules_tools import SchedulesToolsException
log = logging.getLogger(__name__)
try:
import redis
redis_available = True
except ImportError:
log.info('Redis unavailable - will not use exclusive access to storage')
redis_available = False
class AcquireLockException(SchedulesToolsException):
pass
class StorageBase(object):
handle = None
options = {}
tmp_root = None
provide_changelog = False
provide_mtime = False
exclusive_access = False
exclusive_access_option = 'exclusive_access'
redis = None
_shared_lock = None # shared copy lock
lock_acquired = None
lock_timeout = 120
lock_sleep = 0.5 # seconds
lock_max_workers = 10 # max number of workers waiting
def __init__(self, handle=None, options=dict()):
self.handle = handle # 'handle' is source/target of schedule in general
self.options = options
self.exclusive_access = redis_available and options.get(self.exclusive_access_option,
self.exclusive_access)
if self.exclusive_access:
redis_url = options.get('redis_url', '')
if redis_url:
self.redis = redis.StrictRedis.from_url(redis_url)
else:
self.redis = redis.StrictRedis()
self._shared_lock = self.redis.lock(
name=self.redis_key,
timeout=self.lock_timeout - 10, # max life time for lock
sleep=self.lock_sleep,
blocking_timeout=self.lock_timeout * self.lock_max_workers
)
@property
def redis_key(self):
return '_'.join(['schedules_tools', self.__class__.__name__])
def acquire_shared_lock(self, msg=''):
if self.exclusive_access:
log.debug('Waiting for {} shared lock..'.format(self.__class__.__name__))
self.lock_acquired = self._shared_lock.acquire()
if not self.lock_acquired:
raise AcquireLockException(
'Unable to acquire lock {}'.format(msg),
source=self
)
else:
log.debug('{} shared lock ACQUIRED'.format(self.__class__.__name__))
def release_shared_lock(self):
if self.lock_acquired:
self._shared_lock.release()
log.debug('{} shared lock RELEASED'.format(self.__class__.__name__))
def get_local_handle(self, revision=None, datetime=None):
"""
Get specific version of handle (usually file) based on revision
or datetime. If they are specified both, revision has precedence.
Args:
revision: checkout specific revision
datetime: checkout regarding to specific date
Returns:
"""
raise NotImplementedError
def clean_local_handle(self):
raise NotImplementedError
def push(self):
raise NotImplementedError
def get_handle_mtime(self):
raise NotImplementedError
def handle_modified_since(self, mtime):
# Return False only when able to tell
if isinstance(mtime, datetime.datetime):
try:
handle_mtime = self.get_handle_mtime()
except NotImplementedError:
return True
if handle_mtime and handle_mtime <= mtime:
return False
return True
def get_handle_changelog(self):
raise NotImplementedError
def sync_nfs(local, remote, path=None):
pass
|
schedules-tools
|
/schedules_tools-8.16.1-py3-none-any.whl/schedules_tools/storage_handlers/__init__.py
|
__init__.py
|
import datetime
import logging
from schedules_tools import SchedulesToolsException
log = logging.getLogger(__name__)
try:
import redis
redis_available = True
except ImportError:
log.info('Redis unavailable - will not use exclusive access to storage')
redis_available = False
class AcquireLockException(SchedulesToolsException):
pass
class StorageBase(object):
handle = None
options = {}
tmp_root = None
provide_changelog = False
provide_mtime = False
exclusive_access = False
exclusive_access_option = 'exclusive_access'
redis = None
_shared_lock = None # shared copy lock
lock_acquired = None
lock_timeout = 120
lock_sleep = 0.5 # seconds
lock_max_workers = 10 # max number of workers waiting
def __init__(self, handle=None, options=dict()):
self.handle = handle # 'handle' is source/target of schedule in general
self.options = options
self.exclusive_access = redis_available and options.get(self.exclusive_access_option,
self.exclusive_access)
if self.exclusive_access:
redis_url = options.get('redis_url', '')
if redis_url:
self.redis = redis.StrictRedis.from_url(redis_url)
else:
self.redis = redis.StrictRedis()
self._shared_lock = self.redis.lock(
name=self.redis_key,
timeout=self.lock_timeout - 10, # max life time for lock
sleep=self.lock_sleep,
blocking_timeout=self.lock_timeout * self.lock_max_workers
)
@property
def redis_key(self):
return '_'.join(['schedules_tools', self.__class__.__name__])
def acquire_shared_lock(self, msg=''):
if self.exclusive_access:
log.debug('Waiting for {} shared lock..'.format(self.__class__.__name__))
self.lock_acquired = self._shared_lock.acquire()
if not self.lock_acquired:
raise AcquireLockException(
'Unable to acquire lock {}'.format(msg),
source=self
)
else:
log.debug('{} shared lock ACQUIRED'.format(self.__class__.__name__))
def release_shared_lock(self):
if self.lock_acquired:
self._shared_lock.release()
log.debug('{} shared lock RELEASED'.format(self.__class__.__name__))
def get_local_handle(self, revision=None, datetime=None):
"""
Get specific version of handle (usually file) based on revision
or datetime. If they are specified both, revision has precedence.
Args:
revision: checkout specific revision
datetime: checkout regarding to specific date
Returns:
"""
raise NotImplementedError
def clean_local_handle(self):
raise NotImplementedError
def push(self):
raise NotImplementedError
def get_handle_mtime(self):
raise NotImplementedError
def handle_modified_since(self, mtime):
# Return False only when able to tell
if isinstance(mtime, datetime.datetime):
try:
handle_mtime = self.get_handle_mtime()
except NotImplementedError:
return True
if handle_mtime and handle_mtime <= mtime:
return False
return True
def get_handle_changelog(self):
raise NotImplementedError
def sync_nfs(local, remote, path=None):
pass
| 0.476336 | 0.060114 |
from schedules_tools.schedule_handlers import ScheduleHandlerBase
import logging
from lxml.html import etree
log = logging.getLogger(__name__)
css = """
a[href=""] {display:none}
table.schedule {
border-collapse: collapse;
}
table.schedule th, table.schedule td {
border: 2px solid black;
padding: 3px 5px;
}
table.schedule th {
background-color: #a5c2ff;
}
table.schedule td {
background-color: #f3ebae;
}
table.schedule td.parent-task {
font-weight: bold;
}
table.schedule td.date {
font-size: 90%;
white-space: nowrap;
text-align: right;
}
table.schedule td.duration {
text-align: right;
}
table.schedule td div.note {
font-size: 80%;
}
"""
class ScheduleHandler_html(ScheduleHandlerBase):
provide_export = True
handle_deps_satisfied = True
default_export_ext = 'html'
def __init__(self, *args, **kwargs):
super(ScheduleHandler_html, self).__init__(*args, **kwargs)
if not self.options.get('date_format', False):
self.options['date_format'] = '%a %Y-%m-%d'
@classmethod
def is_valid_source(cls, handle=None):
return False
def _export_task(self, e_table, task, hiearchy_parent='',
hiearchy_index=''):
e_tr = etree.SubElement(e_table, 'tr')
e_td = etree.SubElement(e_tr, 'td')
curr_hiearchy_index = str(hiearchy_parent)
if hiearchy_index:
curr_hiearchy_index += '.' + str(hiearchy_index)
e_td.text = curr_hiearchy_index
padding = (task.level - 1) * float(self.options.get('html_level_indent', 1))
e_td = etree.SubElement(e_tr, 'td',
style='padding-left: {}em'.format(padding))
e_td.text = task.name
if len(task.tasks):
e_td.attrib['class'] = 'parent-task'
if task.note:
e_note = etree.SubElement(e_td, 'div')
e_note.attrib['class'] = 'note'
e_note.text = task.note
if task.link:
e_div = etree.SubElement(e_td, 'div')
e_link = etree.SubElement(e_div, 'a')
e_link.attrib['href'] = task.link
e_link.text = task.link
e_td = etree.SubElement(e_tr, 'td')
e_td.attrib['class'] = 'date'
e_td.text = str(task.dStart.strftime(self.options['date_format']))
e_td = etree.SubElement(e_tr, 'td')
e_td.attrib['class'] = 'date'
e_td.text = str(task.dFinish.strftime(self.options['date_format']))
duration = task.dFinish - task.dStart
e_td = etree.SubElement(e_tr, 'td')
e_td.attrib['class'] = 'duration'
e_td.text = str(duration.days)
for index, task in enumerate(task.tasks):
self._export_task(e_table, task, curr_hiearchy_index, index + 1)
# Schedule
def export_schedule(self, out_file=None):
e_html = etree.Element('html')
e_head = etree.SubElement(e_html, 'head')
etree.SubElement(e_head, 'meta', charset="utf-8")
if self.options.get('html_title', False):
title = self.options['html_title']
else:
title = self.schedule.name
e_title = etree.SubElement(e_head, 'title')
e_title.text = title
if self.options.get('html_css_href', False):
etree.SubElement(e_head,
'link',
type='text/css',
rel='stylesheet',
href=self.options['html_css_href']
)
else:
e_style = etree.SubElement(e_head, 'style', type='text/css')
e_style.text = css
e_body = etree.SubElement(e_html, 'body')
e_h1 = etree.SubElement(e_body, 'h1')
e_h1.text = title
if self.options.get('html_table_header', False):
e_body.append(etree.fromstring(self.options['html_table_header']))
e_table = etree.SubElement(e_body, 'table', attrib={'align': 'center',
'class': 'schedule'})
e_tr_head = etree.SubElement(e_table, 'tr')
head_columns = ['HierarchIndex', 'Name', 'Start', 'End', 'Duration']
for column in head_columns:
e_th_head = etree.SubElement(e_tr_head, 'th')
e_th_head.text = column
for index, task in enumerate(self.schedule.tasks):
self._export_task(e_table, task, index + 1)
if self.options.get('html_table_footer', False):
e_body.append(etree.fromstring(self.options['html_table_footer']))
etree_return = etree.ElementTree(e_html)
if out_file:
etree_return.write(out_file, pretty_print=True, encoding="utf-8",
xml_declaration=False)
return str(etree_return)
|
schedules-tools
|
/schedules_tools-8.16.1-py3-none-any.whl/schedules_tools/schedule_handlers/html.py
|
html.py
|
from schedules_tools.schedule_handlers import ScheduleHandlerBase
import logging
from lxml.html import etree
log = logging.getLogger(__name__)
css = """
a[href=""] {display:none}
table.schedule {
border-collapse: collapse;
}
table.schedule th, table.schedule td {
border: 2px solid black;
padding: 3px 5px;
}
table.schedule th {
background-color: #a5c2ff;
}
table.schedule td {
background-color: #f3ebae;
}
table.schedule td.parent-task {
font-weight: bold;
}
table.schedule td.date {
font-size: 90%;
white-space: nowrap;
text-align: right;
}
table.schedule td.duration {
text-align: right;
}
table.schedule td div.note {
font-size: 80%;
}
"""
class ScheduleHandler_html(ScheduleHandlerBase):
provide_export = True
handle_deps_satisfied = True
default_export_ext = 'html'
def __init__(self, *args, **kwargs):
super(ScheduleHandler_html, self).__init__(*args, **kwargs)
if not self.options.get('date_format', False):
self.options['date_format'] = '%a %Y-%m-%d'
@classmethod
def is_valid_source(cls, handle=None):
return False
def _export_task(self, e_table, task, hiearchy_parent='',
hiearchy_index=''):
e_tr = etree.SubElement(e_table, 'tr')
e_td = etree.SubElement(e_tr, 'td')
curr_hiearchy_index = str(hiearchy_parent)
if hiearchy_index:
curr_hiearchy_index += '.' + str(hiearchy_index)
e_td.text = curr_hiearchy_index
padding = (task.level - 1) * float(self.options.get('html_level_indent', 1))
e_td = etree.SubElement(e_tr, 'td',
style='padding-left: {}em'.format(padding))
e_td.text = task.name
if len(task.tasks):
e_td.attrib['class'] = 'parent-task'
if task.note:
e_note = etree.SubElement(e_td, 'div')
e_note.attrib['class'] = 'note'
e_note.text = task.note
if task.link:
e_div = etree.SubElement(e_td, 'div')
e_link = etree.SubElement(e_div, 'a')
e_link.attrib['href'] = task.link
e_link.text = task.link
e_td = etree.SubElement(e_tr, 'td')
e_td.attrib['class'] = 'date'
e_td.text = str(task.dStart.strftime(self.options['date_format']))
e_td = etree.SubElement(e_tr, 'td')
e_td.attrib['class'] = 'date'
e_td.text = str(task.dFinish.strftime(self.options['date_format']))
duration = task.dFinish - task.dStart
e_td = etree.SubElement(e_tr, 'td')
e_td.attrib['class'] = 'duration'
e_td.text = str(duration.days)
for index, task in enumerate(task.tasks):
self._export_task(e_table, task, curr_hiearchy_index, index + 1)
# Schedule
def export_schedule(self, out_file=None):
e_html = etree.Element('html')
e_head = etree.SubElement(e_html, 'head')
etree.SubElement(e_head, 'meta', charset="utf-8")
if self.options.get('html_title', False):
title = self.options['html_title']
else:
title = self.schedule.name
e_title = etree.SubElement(e_head, 'title')
e_title.text = title
if self.options.get('html_css_href', False):
etree.SubElement(e_head,
'link',
type='text/css',
rel='stylesheet',
href=self.options['html_css_href']
)
else:
e_style = etree.SubElement(e_head, 'style', type='text/css')
e_style.text = css
e_body = etree.SubElement(e_html, 'body')
e_h1 = etree.SubElement(e_body, 'h1')
e_h1.text = title
if self.options.get('html_table_header', False):
e_body.append(etree.fromstring(self.options['html_table_header']))
e_table = etree.SubElement(e_body, 'table', attrib={'align': 'center',
'class': 'schedule'})
e_tr_head = etree.SubElement(e_table, 'tr')
head_columns = ['HierarchIndex', 'Name', 'Start', 'End', 'Duration']
for column in head_columns:
e_th_head = etree.SubElement(e_tr_head, 'th')
e_th_head.text = column
for index, task in enumerate(self.schedule.tasks):
self._export_task(e_table, task, index + 1)
if self.options.get('html_table_footer', False):
e_body.append(etree.fromstring(self.options['html_table_footer']))
etree_return = etree.ElementTree(e_html)
if out_file:
etree_return.write(out_file, pretty_print=True, encoding="utf-8",
xml_declaration=False)
return str(etree_return)
| 0.548432 | 0.169097 |
from schedules_tools.schedule_handlers import ScheduleHandlerBase
from schedules_tools import models
import sys
import datetime
import os
import logging
from datetime import timedelta
log = logging.getLogger(__name__)
try:
from pyral import Rally, rallyWorkset
additional_deps_satistifed = True
except ImportError:
additional_deps_satistifed = False
class ScheduleHandler_rally(ScheduleHandlerBase):
provide_export = False
handle_deps_satisfied = additional_deps_satistifed
@classmethod
def is_valid_source(cls, handle=None):
if not handle:
handle = cls.handle
if os.stat(handle).st_size < 1024:
file_cont = open(handle).read()
if 'WORKSPACE' in file_cont and 'PROJECT' in file_cont:
return True
return False
def import_schedule(self):
self.schedule = models.Schedule()
start_time = None
options = ['--config=%s' % self.handle]
server, user, password, apikey, workspace, project = rallyWorkset(options)
rally = Rally(server, user, password, apikey, workspace=workspace, project=project)
rally_iter = self.options['rally_iter']
self.schedule.name = rally_iter.strip()
query_criteria = 'Iteration.Name = "%s"' % rally_iter
response = rally.get('Iteration', fetch=True,
query='Name = "%s"' % rally_iter)
if response.errors:
sys.stdout.write("\n".join(response.errors))
sys.exit(1)
for iteration in response:
print('Iteration: %s (starts %s ends %s)' % (
iteration.Name, iteration.StartDate[:10], iteration.EndDate[:10]))
start_time = datetime.datetime.combine(
datetime.datetime.strptime(iteration.StartDate[:10], '%Y-%m-%d'),
datetime.time(8))
end_time = datetime.datetime.combine(
datetime.datetime.strptime(iteration.EndDate[:10], '%Y-%m-%d'),
datetime.time(8)) - timedelta(days=1)
break
response = rally.get('UserStory', fetch=True, query=query_criteria, order="Rank")
if response.errors:
sys.stdout.write("\n".join(response.errors))
sys.exit(1)
index = 1
if not start_time:
start_time = datetime.datetime.combine(datetime.date.today(), datetime.time(8))
max_end_time = start_time
self.schedule.dStart = start_time
self.schedule.dFinish = end_time
for story in response:
print(story.Name)
t = models.Task(self.schedule, level=1)
t.index = index
index += 1
t.name = story.Name.strip()
t.dStart = start_time
max_st_end_time = start_time
story.Tasks.sort(key=lambda x: x.TaskIndex)
for task in story.Tasks:
print('-- %s | %sh | %s' % (task.Name, task.Estimate, task.Owner.Name))
t_in = models.Task(self.schedule, level=2)
t_in.index = index
index += 1
t_in.name = task.Name.strip()
t_in.dStart = start_time
t_in.dFinish = start_time + datetime.timedelta(hours=float(task.Estimate))
max_st_end_time = max(max_end_time, t_in.dFinish)
# look for resource
resource_id = None
for r_id, resource in self.schedule.resources.items():
if resource == task.Owner.Name:
resource_id = r_id
break
if not resource_id:
resource_id = len(self.schedule.resources) + 1
self.schedule.resources[resource_id] = str(task.Owner.Name)
t_in.resource = resource_id
t_in.user = task.Owner.UserName.split('@')[0]
t.tasks.append(t_in)
print('')
t.dFinish = max_st_end_time
self.schedule.tasks.append(t)
return self.schedule
|
schedules-tools
|
/schedules_tools-8.16.1-py3-none-any.whl/schedules_tools/schedule_handlers/rally.py
|
rally.py
|
from schedules_tools.schedule_handlers import ScheduleHandlerBase
from schedules_tools import models
import sys
import datetime
import os
import logging
from datetime import timedelta
log = logging.getLogger(__name__)
try:
from pyral import Rally, rallyWorkset
additional_deps_satistifed = True
except ImportError:
additional_deps_satistifed = False
class ScheduleHandler_rally(ScheduleHandlerBase):
provide_export = False
handle_deps_satisfied = additional_deps_satistifed
@classmethod
def is_valid_source(cls, handle=None):
if not handle:
handle = cls.handle
if os.stat(handle).st_size < 1024:
file_cont = open(handle).read()
if 'WORKSPACE' in file_cont and 'PROJECT' in file_cont:
return True
return False
def import_schedule(self):
self.schedule = models.Schedule()
start_time = None
options = ['--config=%s' % self.handle]
server, user, password, apikey, workspace, project = rallyWorkset(options)
rally = Rally(server, user, password, apikey, workspace=workspace, project=project)
rally_iter = self.options['rally_iter']
self.schedule.name = rally_iter.strip()
query_criteria = 'Iteration.Name = "%s"' % rally_iter
response = rally.get('Iteration', fetch=True,
query='Name = "%s"' % rally_iter)
if response.errors:
sys.stdout.write("\n".join(response.errors))
sys.exit(1)
for iteration in response:
print('Iteration: %s (starts %s ends %s)' % (
iteration.Name, iteration.StartDate[:10], iteration.EndDate[:10]))
start_time = datetime.datetime.combine(
datetime.datetime.strptime(iteration.StartDate[:10], '%Y-%m-%d'),
datetime.time(8))
end_time = datetime.datetime.combine(
datetime.datetime.strptime(iteration.EndDate[:10], '%Y-%m-%d'),
datetime.time(8)) - timedelta(days=1)
break
response = rally.get('UserStory', fetch=True, query=query_criteria, order="Rank")
if response.errors:
sys.stdout.write("\n".join(response.errors))
sys.exit(1)
index = 1
if not start_time:
start_time = datetime.datetime.combine(datetime.date.today(), datetime.time(8))
max_end_time = start_time
self.schedule.dStart = start_time
self.schedule.dFinish = end_time
for story in response:
print(story.Name)
t = models.Task(self.schedule, level=1)
t.index = index
index += 1
t.name = story.Name.strip()
t.dStart = start_time
max_st_end_time = start_time
story.Tasks.sort(key=lambda x: x.TaskIndex)
for task in story.Tasks:
print('-- %s | %sh | %s' % (task.Name, task.Estimate, task.Owner.Name))
t_in = models.Task(self.schedule, level=2)
t_in.index = index
index += 1
t_in.name = task.Name.strip()
t_in.dStart = start_time
t_in.dFinish = start_time + datetime.timedelta(hours=float(task.Estimate))
max_st_end_time = max(max_end_time, t_in.dFinish)
# look for resource
resource_id = None
for r_id, resource in self.schedule.resources.items():
if resource == task.Owner.Name:
resource_id = r_id
break
if not resource_id:
resource_id = len(self.schedule.resources) + 1
self.schedule.resources[resource_id] = str(task.Owner.Name)
t_in.resource = resource_id
t_in.user = task.Owner.UserName.split('@')[0]
t.tasks.append(t_in)
print('')
t.dFinish = max_st_end_time
self.schedule.tasks.append(t)
return self.schedule
| 0.205456 | 0.188903 |
import datetime
import json
import logging
import os
from schedules_tools.schedule_handlers import ScheduleHandlerBase
from schedules_tools.models import Schedule, Task
log = logging.getLogger(__name__)
class ScheduleHandler_json(ScheduleHandlerBase):
provide_export = True
handle_deps_satisfied = True
default_export_ext = 'json'
changelog_date_format = '%Y-%m-%d'
@classmethod
def is_valid_source(cls, handle=None):
if not handle:
handle = cls.handle
file_ext = os.path.splitext(handle)[1]
if file_ext != '.json':
return False
try:
with open(handle) as fd:
json.load(fd)
except ValueError:
return False
return True
@staticmethod
def _parse_timestamp(timestamp):
number = int(timestamp)
return datetime.datetime.fromtimestamp(number)
def import_schedule(self):
with open(self.handle) as fd:
jsonobj = json.load(fd)
schedule = Schedule()
schedule.dStart = self._parse_timestamp(jsonobj['start'])
schedule.dFinish = self._parse_timestamp(jsonobj['end'])
schedule.slug = jsonobj['slug']
schedule.name = jsonobj['name']
if jsonobj.get('mtime', None):
schedule.mtime = self._parse_timestamp(jsonobj['mtime'])
schedule.ext_attr = jsonobj.get('ext_attr', {})
schedule.resources = jsonobj.get('resources', {})
schedule.flags_attr_id = jsonobj.get('flags_attr_id', None)
# schedule.id_reg is built during parsing tasks
if 'changelog' in jsonobj:
changelogs = {}
for rev, record in jsonobj['changelog'].items():
record_date = datetime.datetime.strptime(
record['date'], self.changelog_date_format)
item = {
'user': record['user'],
'date': record_date,
'msg': record['msg']
}
changelogs[rev] = item
schedule.changelog = changelogs
# We don't parse phases here, because we are collecting them
# during parsing tasks itself
for subtaskobj in jsonobj['tasks']:
task = self.import_task_from_json(schedule, subtaskobj, None)
schedule.tasks.append(task)
return schedule
def import_task_from_json(self, schedule, jsonobj, parenttask):
task = Task(schedule)
task.index = jsonobj['index']
task.level = jsonobj['_level']
task.name = jsonobj['name']
task.slug = jsonobj['slug']
schedule.id_reg.add(task.slug)
task.priority = jsonobj['priority']
task.p_complete = jsonobj['complete']
task.milestone = False
if jsonobj['type'] == 'Milestone':
task.milestone = True
task.flags = jsonobj['flags']
if 'link' in jsonobj:
task.link = jsonobj['link']
if 'note' in jsonobj:
task.note = jsonobj['note']
task.dStart = self._parse_timestamp(jsonobj['start'])
task.dFinish = self._parse_timestamp(jsonobj['end'])
schedule.used_flags |= set(task.flags)
if 'tasks' in jsonobj:
for subtaskobj in jsonobj['tasks']:
subtask = self.import_task_from_json(schedule, subtaskobj, task)
task.tasks.append(subtask)
return task
def export_schedule(self, out_file):
json_schedule = self.export_schedule_as_dict()
content = json.dumps(json_schedule,
sort_keys=True,
indent=4,
separators=(',', ': '))
self._write_to_file(content, out_file)
return content
def export_schedule_as_dict(self):
schedule_dict = dict()
schedule_dict['slug'] = self.schedule.slug
schedule_dict['name'] = self.schedule.name
schedule_dict['start'] = self.schedule.dStart.strftime('%s')
schedule_dict['end'] = self.schedule.dFinish.strftime('%s')
if self.schedule.mtime:
schedule_dict['mtime'] = self.schedule.mtime.strftime('%s')
schedule_dict['resources'] = self.schedule.resources
schedule_dict['used_flags'] = sorted(list(self.schedule.used_flags))
schedule_dict['ext_attr'] = self.schedule.ext_attr
schedule_dict['flags_attr_id'] = self.schedule.flags_attr_id
# We intentionally don't export id_reg attribute here - it's collected
# during import
schedule_dict['changelog'] = self.schedule.changelog
for log in self.schedule.changelog.values():
log['date'] = datetime.datetime.strftime(log['date'], '%Y-%m-%d')
schedule_dict['tasks'] = []
self.schedule.task_id_reg = set()
for task in self.schedule.tasks:
schedule_dict['tasks'].append(self.export_task_as_dict(task))
return schedule_dict
def export_task_as_dict(self, task, parent_slug=''):
task_export = {}
task_export['slug'] = task.slug
task_export['index'] = task.index
task_export['_level'] = task.level
task_export['name'] = task.name
task_export['priority'] = task.priority
task_export['complete'] = task.p_complete
task_export['type'] = task.get_type()
task_export['flags'] = task.flags
if task.note:
task_export['note'] = task.note
if task.link:
task_export['link'] = task.link
task_export['parentTask'] = parent_slug
task_export['start'] = task.dStart.strftime('%s')
task_export['end'] = task.dFinish.strftime('%s')
if task.tasks: # task has subtasks
# prepare tasklist
task_export['tasks'] = []
for subtask in task.tasks:
task_export['tasks'].append(self.export_task_as_dict(subtask, task.slug))
return task_export
|
schedules-tools
|
/schedules_tools-8.16.1-py3-none-any.whl/schedules_tools/schedule_handlers/jsonstruct.py
|
jsonstruct.py
|
import datetime
import json
import logging
import os
from schedules_tools.schedule_handlers import ScheduleHandlerBase
from schedules_tools.models import Schedule, Task
log = logging.getLogger(__name__)
class ScheduleHandler_json(ScheduleHandlerBase):
provide_export = True
handle_deps_satisfied = True
default_export_ext = 'json'
changelog_date_format = '%Y-%m-%d'
@classmethod
def is_valid_source(cls, handle=None):
if not handle:
handle = cls.handle
file_ext = os.path.splitext(handle)[1]
if file_ext != '.json':
return False
try:
with open(handle) as fd:
json.load(fd)
except ValueError:
return False
return True
@staticmethod
def _parse_timestamp(timestamp):
number = int(timestamp)
return datetime.datetime.fromtimestamp(number)
def import_schedule(self):
with open(self.handle) as fd:
jsonobj = json.load(fd)
schedule = Schedule()
schedule.dStart = self._parse_timestamp(jsonobj['start'])
schedule.dFinish = self._parse_timestamp(jsonobj['end'])
schedule.slug = jsonobj['slug']
schedule.name = jsonobj['name']
if jsonobj.get('mtime', None):
schedule.mtime = self._parse_timestamp(jsonobj['mtime'])
schedule.ext_attr = jsonobj.get('ext_attr', {})
schedule.resources = jsonobj.get('resources', {})
schedule.flags_attr_id = jsonobj.get('flags_attr_id', None)
# schedule.id_reg is built during parsing tasks
if 'changelog' in jsonobj:
changelogs = {}
for rev, record in jsonobj['changelog'].items():
record_date = datetime.datetime.strptime(
record['date'], self.changelog_date_format)
item = {
'user': record['user'],
'date': record_date,
'msg': record['msg']
}
changelogs[rev] = item
schedule.changelog = changelogs
# We don't parse phases here, because we are collecting them
# during parsing tasks itself
for subtaskobj in jsonobj['tasks']:
task = self.import_task_from_json(schedule, subtaskobj, None)
schedule.tasks.append(task)
return schedule
def import_task_from_json(self, schedule, jsonobj, parenttask):
task = Task(schedule)
task.index = jsonobj['index']
task.level = jsonobj['_level']
task.name = jsonobj['name']
task.slug = jsonobj['slug']
schedule.id_reg.add(task.slug)
task.priority = jsonobj['priority']
task.p_complete = jsonobj['complete']
task.milestone = False
if jsonobj['type'] == 'Milestone':
task.milestone = True
task.flags = jsonobj['flags']
if 'link' in jsonobj:
task.link = jsonobj['link']
if 'note' in jsonobj:
task.note = jsonobj['note']
task.dStart = self._parse_timestamp(jsonobj['start'])
task.dFinish = self._parse_timestamp(jsonobj['end'])
schedule.used_flags |= set(task.flags)
if 'tasks' in jsonobj:
for subtaskobj in jsonobj['tasks']:
subtask = self.import_task_from_json(schedule, subtaskobj, task)
task.tasks.append(subtask)
return task
def export_schedule(self, out_file):
json_schedule = self.export_schedule_as_dict()
content = json.dumps(json_schedule,
sort_keys=True,
indent=4,
separators=(',', ': '))
self._write_to_file(content, out_file)
return content
def export_schedule_as_dict(self):
schedule_dict = dict()
schedule_dict['slug'] = self.schedule.slug
schedule_dict['name'] = self.schedule.name
schedule_dict['start'] = self.schedule.dStart.strftime('%s')
schedule_dict['end'] = self.schedule.dFinish.strftime('%s')
if self.schedule.mtime:
schedule_dict['mtime'] = self.schedule.mtime.strftime('%s')
schedule_dict['resources'] = self.schedule.resources
schedule_dict['used_flags'] = sorted(list(self.schedule.used_flags))
schedule_dict['ext_attr'] = self.schedule.ext_attr
schedule_dict['flags_attr_id'] = self.schedule.flags_attr_id
# We intentionally don't export id_reg attribute here - it's collected
# during import
schedule_dict['changelog'] = self.schedule.changelog
for log in self.schedule.changelog.values():
log['date'] = datetime.datetime.strftime(log['date'], '%Y-%m-%d')
schedule_dict['tasks'] = []
self.schedule.task_id_reg = set()
for task in self.schedule.tasks:
schedule_dict['tasks'].append(self.export_task_as_dict(task))
return schedule_dict
def export_task_as_dict(self, task, parent_slug=''):
task_export = {}
task_export['slug'] = task.slug
task_export['index'] = task.index
task_export['_level'] = task.level
task_export['name'] = task.name
task_export['priority'] = task.priority
task_export['complete'] = task.p_complete
task_export['type'] = task.get_type()
task_export['flags'] = task.flags
if task.note:
task_export['note'] = task.note
if task.link:
task_export['link'] = task.link
task_export['parentTask'] = parent_slug
task_export['start'] = task.dStart.strftime('%s')
task_export['end'] = task.dFinish.strftime('%s')
if task.tasks: # task has subtasks
# prepare tasklist
task_export['tasks'] = []
for subtask in task.tasks:
task_export['tasks'].append(self.export_task_as_dict(subtask, task.slug))
return task_export
| 0.289975 | 0.086439 |
import logging
from lxml import etree
from schedules_tools.schedule_handlers import ScheduleHandlerBase
log = logging.getLogger(__name__)
class ScheduleHandler_confluencehtml(ScheduleHandlerBase):
provide_export = True
handle_deps_satisfied = True
default_export_ext = 'html'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.options.get('date_format', False):
self.options['date_format'] = '%a %Y-%m-%d'
@classmethod
def is_valid_source(cls, handle=None):
return False
def _export_task(self, e_parent, task, top=True):
if top: # top level - make p
e_p = etree.SubElement(e_parent, 'p')
e_strong = etree.SubElement(e_p, 'strong')
e_strong.text = task.name
else:
e_li = etree.SubElement(e_parent, 'li')
e_li.text = f'{task.name}'
e_li.attrib['role'] = 'checkbox'
if hasattr(task, 'user'):
e_user = etree.SubElement(e_li, 'em')
e_user.text = f' [{task.user}]'
if len(task.tasks):
e_ul = etree.SubElement(e_parent, 'ul')
e_ul.attrib['class'] = 'inline-task-list'
for task in task.tasks:
self._export_task(e_ul, task, top=False)
if top:
etree.SubElement(e_parent, 'br')
# Schedule
def export_schedule(self, out_file=None):
e_html = etree.Element('html')
e_head = etree.SubElement(e_html, 'head')
etree.SubElement(e_head, 'meta', charset="utf-8")
if self.options.get('html_title', False):
title = self.options['html_title']
else:
title = self.schedule.name
title_date_fmt = '%b %-d'
start_date = self.schedule.dStart.strftime(title_date_fmt)
finish_date = self.schedule.dFinish.strftime(title_date_fmt)
title_text = f'{title} ({start_date} - {finish_date})'
e_title = etree.SubElement(e_head, 'title')
e_title.text = title_text
e_body = etree.SubElement(e_html, 'body')
e_h = etree.SubElement(e_body, 'h1')
e_h.text = title_text
etree.SubElement(e_body, 'br')
for task in self.schedule.tasks:
self._export_task(e_body, task)
etree_return = etree.ElementTree(e_html)
if out_file:
etree_return.write(out_file, pretty_print=True, encoding="utf-8",
xml_declaration=False)
return str(etree_return)
|
schedules-tools
|
/schedules_tools-8.16.1-py3-none-any.whl/schedules_tools/schedule_handlers/confluencehtml.py
|
confluencehtml.py
|
import logging
from lxml import etree
from schedules_tools.schedule_handlers import ScheduleHandlerBase
log = logging.getLogger(__name__)
class ScheduleHandler_confluencehtml(ScheduleHandlerBase):
provide_export = True
handle_deps_satisfied = True
default_export_ext = 'html'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.options.get('date_format', False):
self.options['date_format'] = '%a %Y-%m-%d'
@classmethod
def is_valid_source(cls, handle=None):
return False
def _export_task(self, e_parent, task, top=True):
if top: # top level - make p
e_p = etree.SubElement(e_parent, 'p')
e_strong = etree.SubElement(e_p, 'strong')
e_strong.text = task.name
else:
e_li = etree.SubElement(e_parent, 'li')
e_li.text = f'{task.name}'
e_li.attrib['role'] = 'checkbox'
if hasattr(task, 'user'):
e_user = etree.SubElement(e_li, 'em')
e_user.text = f' [{task.user}]'
if len(task.tasks):
e_ul = etree.SubElement(e_parent, 'ul')
e_ul.attrib['class'] = 'inline-task-list'
for task in task.tasks:
self._export_task(e_ul, task, top=False)
if top:
etree.SubElement(e_parent, 'br')
# Schedule
def export_schedule(self, out_file=None):
e_html = etree.Element('html')
e_head = etree.SubElement(e_html, 'head')
etree.SubElement(e_head, 'meta', charset="utf-8")
if self.options.get('html_title', False):
title = self.options['html_title']
else:
title = self.schedule.name
title_date_fmt = '%b %-d'
start_date = self.schedule.dStart.strftime(title_date_fmt)
finish_date = self.schedule.dFinish.strftime(title_date_fmt)
title_text = f'{title} ({start_date} - {finish_date})'
e_title = etree.SubElement(e_head, 'title')
e_title.text = title_text
e_body = etree.SubElement(e_html, 'body')
e_h = etree.SubElement(e_body, 'h1')
e_h.text = title_text
etree.SubElement(e_body, 'br')
for task in self.schedule.tasks:
self._export_task(e_body, task)
etree_return = etree.ElementTree(e_html)
if out_file:
etree_return.write(out_file, pretty_print=True, encoding="utf-8",
xml_declaration=False)
return str(etree_return)
| 0.48438 | 0.102394 |
from datetime import datetime
import logging
import pytz
log = logging.getLogger(__name__)
# Handle implementation must be in format ScheduleHandler_format
# where 'format' is used as a uniq label for the format and
# 'ScheduleHandler' can be whatever.
class ScheduleHandlerBase(object):
handle = None
schedule = None
# This flag indicate ability to export internal intermediate structure
# (Schedule) into format of implementation. It's read by ScheduleConverter
# during autodiscovery and used to provide actual help message in CLI
# TODO: add provide_import to be complete?
provide_export = False
provide_changelog = False
provide_mtime = False
options = {}
default_export_ext = None
# Handlers can depend on additional python modules. We don't require from
# users to have all of them installed if they aren't used.
# This flag indicates that the handler can be fully utilized and there is
# no missing dependent packages installed.
handle_deps_satisfied = False
def __init__(self, handle=None, schedule=None, options=dict()):
self.schedule = schedule
self.options = options
# set handle last - there might be custom processing that requires options to already be set
self.handle = handle # 'handle' is source/target of schedule in general
def _write_to_file(self, content, filename):
with open(filename, 'wb') as fp:
fp.write(content.strip().encode('UTF-8'))
def get_handle_mtime(self):
""" Implement only if schedule handler is able to get mtime directly
without storage """
raise NotImplementedError
def handle_modified_since(self, mtime):
""" Return boolean to be able to bypass processing """
# Return False only when able to tell otherwise return True
modified = True
if isinstance(mtime, datetime):
try:
handle_mtime = self.get_handle_mtime()
except NotImplementedError:
pass
# we're working with TZ naive dates (but in UTC)
if handle_mtime:
if handle_mtime.tzinfo is not None:
handle_mtime = handle_mtime.astimezone(pytz.utc).replace(tzinfo=None)
if handle_mtime <= mtime:
modified = False
return modified
def get_handle_changelog(self):
raise NotImplementedError
# handle - file/link/smartsheet id
def import_schedule(self):
raise NotImplementedError
def export_schedule(self):
raise NotImplementedError
def build_schedule(self):
raise NotImplementedError
@classmethod
def is_valid_source(cls, handle=None):
"""Method returns True, if the specific handler is able to work with
given handle"""
return False
def extract_backup(self, handle=None):
"""Prepare files which need a backup in case of external source"""
return []
|
schedules-tools
|
/schedules_tools-8.16.1-py3-none-any.whl/schedules_tools/schedule_handlers/__init__.py
|
__init__.py
|
from datetime import datetime
import logging
import pytz
log = logging.getLogger(__name__)
# Handle implementation must be in format ScheduleHandler_format
# where 'format' is used as a uniq label for the format and
# 'ScheduleHandler' can be whatever.
class ScheduleHandlerBase(object):
handle = None
schedule = None
# This flag indicate ability to export internal intermediate structure
# (Schedule) into format of implementation. It's read by ScheduleConverter
# during autodiscovery and used to provide actual help message in CLI
# TODO: add provide_import to be complete?
provide_export = False
provide_changelog = False
provide_mtime = False
options = {}
default_export_ext = None
# Handlers can depend on additional python modules. We don't require from
# users to have all of them installed if they aren't used.
# This flag indicates that the handler can be fully utilized and there is
# no missing dependent packages installed.
handle_deps_satisfied = False
def __init__(self, handle=None, schedule=None, options=dict()):
self.schedule = schedule
self.options = options
# set handle last - there might be custom processing that requires options to already be set
self.handle = handle # 'handle' is source/target of schedule in general
def _write_to_file(self, content, filename):
with open(filename, 'wb') as fp:
fp.write(content.strip().encode('UTF-8'))
def get_handle_mtime(self):
""" Implement only if schedule handler is able to get mtime directly
without storage """
raise NotImplementedError
def handle_modified_since(self, mtime):
""" Return boolean to be able to bypass processing """
# Return False only when able to tell otherwise return True
modified = True
if isinstance(mtime, datetime):
try:
handle_mtime = self.get_handle_mtime()
except NotImplementedError:
pass
# we're working with TZ naive dates (but in UTC)
if handle_mtime:
if handle_mtime.tzinfo is not None:
handle_mtime = handle_mtime.astimezone(pytz.utc).replace(tzinfo=None)
if handle_mtime <= mtime:
modified = False
return modified
def get_handle_changelog(self):
raise NotImplementedError
# handle - file/link/smartsheet id
def import_schedule(self):
raise NotImplementedError
def export_schedule(self):
raise NotImplementedError
def build_schedule(self):
raise NotImplementedError
@classmethod
def is_valid_source(cls, handle=None):
"""Method returns True, if the specific handler is able to work with
given handle"""
return False
def extract_backup(self, handle=None):
"""Prepare files which need a backup in case of external source"""
return []
| 0.405096 | 0.164516 |
# Schedules - Create non-blocking scheduled tasks.
## Installation
Use the package manager [pip](https://pip.pypa.io/en/stable/) to install schedules.
```bash
pip install schedules
```
## Usage
### Timer
Here is a simple timer scheduled task example.
After 3 days, 1 hour, 30 minutes and 10 seconds `task` will execute.
```python
import schedules
# example task
def task(argument):
print(argument)
# initialize a timer
timer = schedules.timer()
# Start the timer.
# "repeat=True" means the task not execute only once,
# it will execute every 3 days, 1 hour, 30 minutes and 10 seconds infinite times.
timer.day(3).hour(1).minute(30).second(10).start(target=task, args=("Example",), repeat=True)
```
If the task is asynchronous, You can do this:
```python
import schedules
# example task
async def task(argument):
print(argument)
# initialize a timer
timer = schedules.timer()
# Start the timer.
timer.day(3).hour(1).minute(30).second(10).start(target=task, args=("Example",), asynchronous=True)
```
### Every
If you don't want the task to execute at a certain time, you can use `every`.
Every time the minutes are 0 (every new hour), `task` will execute.
```python
import schedules
# example task
def task(argument):
print(argument)
# initialize "every"
every = schedules.every()
# Start the timer.
# "repeat=True" means the task not execute only once,
# it will execute every time the minutes are 0 (every new hour) infinite times.
every.minute(0).start(target=task, args=("Example",), repeat=True)
# This code will execute the task at 2:30pm and 10 seconds everyday:
every.hour(14).minute(30).second(10).start(target=task, args=("Example",), repeat=True)
```
If the task is asynchronous, You can do this:
```python
import schedules
# example task
async def task(argument):
print(argument)
# initialize "every"
every = schedules.every()
# Start the timer.
every.hour(14).minute(30).second(10).start(target=task, args=("Example",), asynchronous=True)
```
## Contributing
Pull requests are welcome. For major changes, please open an issue first to discuss what you would like to change.
Please make sure to update tests as appropriate.
## License
[MIT](https://choosealicense.com/licenses/mit/)
|
schedules
|
/schedules-1.3.tar.gz/schedules-1.3/README.md
|
README.md
|
pip install schedules
import schedules
# example task
def task(argument):
print(argument)
# initialize a timer
timer = schedules.timer()
# Start the timer.
# "repeat=True" means the task not execute only once,
# it will execute every 3 days, 1 hour, 30 minutes and 10 seconds infinite times.
timer.day(3).hour(1).minute(30).second(10).start(target=task, args=("Example",), repeat=True)
import schedules
# example task
async def task(argument):
print(argument)
# initialize a timer
timer = schedules.timer()
# Start the timer.
timer.day(3).hour(1).minute(30).second(10).start(target=task, args=("Example",), asynchronous=True)
import schedules
# example task
def task(argument):
print(argument)
# initialize "every"
every = schedules.every()
# Start the timer.
# "repeat=True" means the task not execute only once,
# it will execute every time the minutes are 0 (every new hour) infinite times.
every.minute(0).start(target=task, args=("Example",), repeat=True)
# This code will execute the task at 2:30pm and 10 seconds everyday:
every.hour(14).minute(30).second(10).start(target=task, args=("Example",), repeat=True)
import schedules
# example task
async def task(argument):
print(argument)
# initialize "every"
every = schedules.every()
# Start the timer.
every.hour(14).minute(30).second(10).start(target=task, args=("Example",), asynchronous=True)
| 0.402979 | 0.839076 |
import json
import random
import uuid
import xml.etree.ElementTree as ET
from functools import lru_cache
from io import BytesIO, StringIO
import schedulesy_qrcode.generate
from schedulesy_qrcode.generate import multi
from schedulesy_qrcode.render import render
class ADE_Parser:
FLAT = "flat.json"
tree = {}
def __init__(self, client):
self.s3_client = client
self.flat = (
{"rooms": {}, "paths": {}}
if not self.s3_client.exists(ADE_Parser.FLAT)
else json.loads(self.s3_client.get(ADE_Parser.FLAT))
)
def hash_room(self, room):
content = room["path"].copy()
content.append(room["name"])
content.append(",".join(list(map(str, room["color"]))))
return self.hash_path(content)
def hash_path(self, path):
return self._hash("%%".join(path))
@lru_cache(maxsize=1000)
def _hash(self, path):
return str(uuid.uuid3(uuid.NAMESPACE_DNS, path))
def random_color(self):
color = [random.randint(0, 255) for _ in range(3)]
color[random.randint(0, 2)] = 0
return color
def dig(self, path, element, center_color, branch):
rooms = []
def compare(room_list, building_color):
final_color = building_color
if len(room_list) == 4:
changed = False
branch[f"Planche {multi(rooms)}"] = {}
for room in room_list:
if self.hash_path(room["path"]) in self.flat["paths"]:
final_color = self.flat["paths"][self.hash_path(room["path"])]
room["color"] = final_color
if room["id"] in self.flat["rooms"]:
changed |= room["id"] not in self.flat["rooms"] or self.flat[
"rooms"
][room["id"]] != self.hash_room(room)
else:
changed = True
branch[f"Planche {multi(rooms)}"][room['name']] = room
if changed:
for room in room_list:
self.flat["rooms"][room["id"]] = self.hash_room(room)
self.flat["paths"][self.hash_path(room["path"])] = final_color
schedulesy_qrcode.generate.generate(
room_list, final_color, self.s3_client
)
self.save_progress()
room_list = []
return room_list
for child in element:
if child.tag == "leaf":
rooms.append(
{
"id": child.attrib["id"],
"path": path,
"name": child.attrib["name"],
"color": center_color,
}
)
rooms = compare(rooms, center_color)
if child.tag == "branch":
branch[child.attrib["name"]] = {}
sub_d = branch[child.attrib["name"]]
new_path = path.copy()
new_path.append(child.attrib["name"])
color = self.random_color()
self.dig(new_path, child, color, sub_d)
if len(rooms) > 0:
# print("Duplicating rooms")
while len(rooms) < 4:
rooms.append(rooms[0])
rooms = compare(rooms, center_color)
def save_progress(self):
self.s3_client.upload(
BytesIO(json.dumps(self.tree).encode("UTF-8")),
"tree.json",
"application/json",
)
self.s3_client.upload(
BytesIO(json.dumps(self.flat).encode("UTF-8")),
ADE_Parser.FLAT,
"application/json",
)
# open("out.json", "w").write(json.dumps(self.tree))
# open("flat.json", "w").write(json.dumps(self.flat))
def parse(self, content):
classrooms = ET.fromstring(content)[0]
self.dig([], classrooms, self.random_color(), self.tree)
self.save_progress()
ordered_tree = json.loads(json.dumps(self.tree, sort_keys=True))
# open("ordered_tree.json", "w").write(json.dumps(ordered_tree))
self.s3_client.upload(
BytesIO(render(ordered_tree).encode("UTF-8")), "index.html", "text/html"
)
open("index.html", "w").write(render(ordered_tree))
|
schedulesy-qrcode
|
/schedulesy_qrcode-1.0.3-py3-none-any.whl/schedulesy_qrcode/parse.py
|
parse.py
|
import json
import random
import uuid
import xml.etree.ElementTree as ET
from functools import lru_cache
from io import BytesIO, StringIO
import schedulesy_qrcode.generate
from schedulesy_qrcode.generate import multi
from schedulesy_qrcode.render import render
class ADE_Parser:
FLAT = "flat.json"
tree = {}
def __init__(self, client):
self.s3_client = client
self.flat = (
{"rooms": {}, "paths": {}}
if not self.s3_client.exists(ADE_Parser.FLAT)
else json.loads(self.s3_client.get(ADE_Parser.FLAT))
)
def hash_room(self, room):
content = room["path"].copy()
content.append(room["name"])
content.append(",".join(list(map(str, room["color"]))))
return self.hash_path(content)
def hash_path(self, path):
return self._hash("%%".join(path))
@lru_cache(maxsize=1000)
def _hash(self, path):
return str(uuid.uuid3(uuid.NAMESPACE_DNS, path))
def random_color(self):
color = [random.randint(0, 255) for _ in range(3)]
color[random.randint(0, 2)] = 0
return color
def dig(self, path, element, center_color, branch):
rooms = []
def compare(room_list, building_color):
final_color = building_color
if len(room_list) == 4:
changed = False
branch[f"Planche {multi(rooms)}"] = {}
for room in room_list:
if self.hash_path(room["path"]) in self.flat["paths"]:
final_color = self.flat["paths"][self.hash_path(room["path"])]
room["color"] = final_color
if room["id"] in self.flat["rooms"]:
changed |= room["id"] not in self.flat["rooms"] or self.flat[
"rooms"
][room["id"]] != self.hash_room(room)
else:
changed = True
branch[f"Planche {multi(rooms)}"][room['name']] = room
if changed:
for room in room_list:
self.flat["rooms"][room["id"]] = self.hash_room(room)
self.flat["paths"][self.hash_path(room["path"])] = final_color
schedulesy_qrcode.generate.generate(
room_list, final_color, self.s3_client
)
self.save_progress()
room_list = []
return room_list
for child in element:
if child.tag == "leaf":
rooms.append(
{
"id": child.attrib["id"],
"path": path,
"name": child.attrib["name"],
"color": center_color,
}
)
rooms = compare(rooms, center_color)
if child.tag == "branch":
branch[child.attrib["name"]] = {}
sub_d = branch[child.attrib["name"]]
new_path = path.copy()
new_path.append(child.attrib["name"])
color = self.random_color()
self.dig(new_path, child, color, sub_d)
if len(rooms) > 0:
# print("Duplicating rooms")
while len(rooms) < 4:
rooms.append(rooms[0])
rooms = compare(rooms, center_color)
def save_progress(self):
self.s3_client.upload(
BytesIO(json.dumps(self.tree).encode("UTF-8")),
"tree.json",
"application/json",
)
self.s3_client.upload(
BytesIO(json.dumps(self.flat).encode("UTF-8")),
ADE_Parser.FLAT,
"application/json",
)
# open("out.json", "w").write(json.dumps(self.tree))
# open("flat.json", "w").write(json.dumps(self.flat))
def parse(self, content):
classrooms = ET.fromstring(content)[0]
self.dig([], classrooms, self.random_color(), self.tree)
self.save_progress()
ordered_tree = json.loads(json.dumps(self.tree, sort_keys=True))
# open("ordered_tree.json", "w").write(json.dumps(ordered_tree))
self.s3_client.upload(
BytesIO(render(ordered_tree).encode("UTF-8")), "index.html", "text/html"
)
open("index.html", "w").write(render(ordered_tree))
| 0.259638 | 0.141045 |
import datetime
import io
import boto3
from schedulesy_qrcode.config import S3_CONF
class S3_client:
def __init__(self):
self.client = boto3.client(
"s3",
aws_access_key_id=S3_CONF['access_key'],
aws_secret_access_key=S3_CONF['secret_key'],
endpoint_url=S3_CONF['endpoint'],
)
self.bucket = S3_CONF['bucket']
response = self.client.list_buckets()
if self.bucket in [b["Name"] for b in response["Buckets"]]:
print(f"🪣 Bucket {self.bucket} already exists")
else:
print(f"🪣 Creating bucket {self.bucket}")
self.client.create_bucket(Bucket=self.bucket)
self.client.put_bucket_cors(
Bucket=self.bucket,
CORSConfiguration={
"CORSRules": [
{
"AllowedMethods": ["GET", "HEAD"],
"AllowedOrigins": [
"*",
],
"ExposeHeaders": ["*"],
"AllowedHeaders": ["Content-Type", "Authorization"],
}
]
},
)
def upload(self, content, filename, mime_type):
print(f"⬆️ Uploading file {filename}")
self.client.upload_fileobj(
content,
self.bucket,
filename,
ExtraArgs={
"ContentType": mime_type,
"ACL": "public-read",
},
)
def get(self, filename):
print(f"⬇️ Downloading {filename}")
output = io.BytesIO()
self.client.download_fileobj(self.bucket, filename, output)
return output.getvalue()
def exists(self, filename):
response = self.client.list_objects(Bucket=self.bucket)
return "Contents" in response and filename in [
item["Key"]
for item in self.client.list_objects_v2(
Bucket=self.bucket, Prefix=filename
)["Contents"]
]
|
schedulesy-qrcode
|
/schedulesy_qrcode-1.0.3-py3-none-any.whl/schedulesy_qrcode/ceph.py
|
ceph.py
|
import datetime
import io
import boto3
from schedulesy_qrcode.config import S3_CONF
class S3_client:
def __init__(self):
self.client = boto3.client(
"s3",
aws_access_key_id=S3_CONF['access_key'],
aws_secret_access_key=S3_CONF['secret_key'],
endpoint_url=S3_CONF['endpoint'],
)
self.bucket = S3_CONF['bucket']
response = self.client.list_buckets()
if self.bucket in [b["Name"] for b in response["Buckets"]]:
print(f"🪣 Bucket {self.bucket} already exists")
else:
print(f"🪣 Creating bucket {self.bucket}")
self.client.create_bucket(Bucket=self.bucket)
self.client.put_bucket_cors(
Bucket=self.bucket,
CORSConfiguration={
"CORSRules": [
{
"AllowedMethods": ["GET", "HEAD"],
"AllowedOrigins": [
"*",
],
"ExposeHeaders": ["*"],
"AllowedHeaders": ["Content-Type", "Authorization"],
}
]
},
)
def upload(self, content, filename, mime_type):
print(f"⬆️ Uploading file {filename}")
self.client.upload_fileobj(
content,
self.bucket,
filename,
ExtraArgs={
"ContentType": mime_type,
"ACL": "public-read",
},
)
def get(self, filename):
print(f"⬇️ Downloading {filename}")
output = io.BytesIO()
self.client.download_fileobj(self.bucket, filename, output)
return output.getvalue()
def exists(self, filename):
response = self.client.list_objects(Bucket=self.bucket)
return "Contents" in response and filename in [
item["Key"]
for item in self.client.list_objects_v2(
Bucket=self.bucket, Prefix=filename
)["Contents"]
]
| 0.345989 | 0.108142 |
import io
import qrcode
from PIL import Image, ImageDraw, ImageFont, ImageOps
from qrcode.image.styledpil import StyledPilImage
from qrcode.image.styles.colormasks import RadialGradiantColorMask
from qrcode.image.styles.moduledrawers import RoundedModuleDrawer
from schedulesy_qrcode.config import FONT_CONF, QR_CONF
def generate(rooms, color, client):
def save_image(image, filename):
output = io.BytesIO()
image.save(output, "png")
output.seek(0)
client.upload(output, filename, "image/png")
output.close()
def single_room(room):
print(f'🎨 Generating {".".join(room["path"])}.{room["name"]} ({room["id"]})')
qr = qrcode.QRCode(error_correction=qrcode.constants.ERROR_CORRECT_H)
qr.add_data(f'{QR_CONF["url"]}/public/{room["id"]}')
image = qr.make_image(
image_factory=StyledPilImage,
module_drawer=RoundedModuleDrawer(),
color_mask=RadialGradiantColorMask(
back_color=(255, 255, 255), center_color=color, edge_color=(0, 0, 0)
),
embeded_image_path=QR_CONF['logo'],
)
header = QR_CONF['header']
def split(a, n):
k, m = divmod(len(a), n)
return (
a[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n)
)
footer = "\n".join(
[" - ".join(x) for x in list(split(room["path"] + [room["name"]], 3))]
)
expanded_image = ImageOps.expand(image, border=20, fill="white")
# Add define a new font to write in the border
big_font = ImageFont.truetype(FONT_CONF['path'], int(FONT_CONF['header']))
small_font = ImageFont.truetype(FONT_CONF['path'], int(FONT_CONF['footer']))
# Instantiate draw object & add desired text
draw_object = ImageDraw.Draw(expanded_image)
draw_object.text(xy=(60, 10), text=header, fill=(0, 0, 0), font=big_font)
draw_object.text(
xy=(60, expanded_image.height - 55),
text=footer,
fill=(0, 0, 0),
font=small_font,
)
bordered = ImageOps.expand(expanded_image, border=10, fill=tuple(color))
# Preview the image
# bordered.show()
# Save the image
# bordered.save(f'out/{room["id"]}.png')
save_image(bordered, f'{room["id"]}.png')
return bordered
images = list(map(single_room, rooms))
w, h = images[0].size
separation = 2
# create big empty image with place for images
new_image = Image.new(
"RGB", (w * 2 + separation, h * 2 + separation), color="white"
)
# put images on new_image
new_image.paste(images[0], (0, 0))
new_image.paste(images[1], (w + separation, 0))
new_image.paste(images[2], (0, h + separation))
new_image.paste(images[3], (w + separation, h + separation))
s_ids = multi(rooms)
# save it
print(f"🎨 Generating {s_ids}")
# new_image.save(f'out/{"-".join(s_ids)}.png')
save_image(new_image, f"{s_ids}.png")
def multi(rooms):
ids = [int(room["id"]) for room in rooms]
ids.sort()
return "-".join(list(map(str, ids)))
|
schedulesy-qrcode
|
/schedulesy_qrcode-1.0.3-py3-none-any.whl/schedulesy_qrcode/generate.py
|
generate.py
|
import io
import qrcode
from PIL import Image, ImageDraw, ImageFont, ImageOps
from qrcode.image.styledpil import StyledPilImage
from qrcode.image.styles.colormasks import RadialGradiantColorMask
from qrcode.image.styles.moduledrawers import RoundedModuleDrawer
from schedulesy_qrcode.config import FONT_CONF, QR_CONF
def generate(rooms, color, client):
def save_image(image, filename):
output = io.BytesIO()
image.save(output, "png")
output.seek(0)
client.upload(output, filename, "image/png")
output.close()
def single_room(room):
print(f'🎨 Generating {".".join(room["path"])}.{room["name"]} ({room["id"]})')
qr = qrcode.QRCode(error_correction=qrcode.constants.ERROR_CORRECT_H)
qr.add_data(f'{QR_CONF["url"]}/public/{room["id"]}')
image = qr.make_image(
image_factory=StyledPilImage,
module_drawer=RoundedModuleDrawer(),
color_mask=RadialGradiantColorMask(
back_color=(255, 255, 255), center_color=color, edge_color=(0, 0, 0)
),
embeded_image_path=QR_CONF['logo'],
)
header = QR_CONF['header']
def split(a, n):
k, m = divmod(len(a), n)
return (
a[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n)
)
footer = "\n".join(
[" - ".join(x) for x in list(split(room["path"] + [room["name"]], 3))]
)
expanded_image = ImageOps.expand(image, border=20, fill="white")
# Add define a new font to write in the border
big_font = ImageFont.truetype(FONT_CONF['path'], int(FONT_CONF['header']))
small_font = ImageFont.truetype(FONT_CONF['path'], int(FONT_CONF['footer']))
# Instantiate draw object & add desired text
draw_object = ImageDraw.Draw(expanded_image)
draw_object.text(xy=(60, 10), text=header, fill=(0, 0, 0), font=big_font)
draw_object.text(
xy=(60, expanded_image.height - 55),
text=footer,
fill=(0, 0, 0),
font=small_font,
)
bordered = ImageOps.expand(expanded_image, border=10, fill=tuple(color))
# Preview the image
# bordered.show()
# Save the image
# bordered.save(f'out/{room["id"]}.png')
save_image(bordered, f'{room["id"]}.png')
return bordered
images = list(map(single_room, rooms))
w, h = images[0].size
separation = 2
# create big empty image with place for images
new_image = Image.new(
"RGB", (w * 2 + separation, h * 2 + separation), color="white"
)
# put images on new_image
new_image.paste(images[0], (0, 0))
new_image.paste(images[1], (w + separation, 0))
new_image.paste(images[2], (0, h + separation))
new_image.paste(images[3], (w + separation, h + separation))
s_ids = multi(rooms)
# save it
print(f"🎨 Generating {s_ids}")
# new_image.save(f'out/{"-".join(s_ids)}.png')
save_image(new_image, f"{s_ids}.png")
def multi(rooms):
ids = [int(room["id"]) for room in rooms]
ids.sort()
return "-".join(list(map(str, ids)))
| 0.565539 | 0.263608 |
# scheduletask
Script to quickly create google calendar events from the command line. I created this script as a means to quickly jot down ideas for later. Pulling up GNOME Calendar and using my moues is *of course* too much work.
I'm working on a PyPI release soon, didn't realize I couldn't just *upload* it and it works. I thought computers were magic, damnit!
# Requirements
## Google Cloud Project
Follow the Prerequisites in this link : https://developers.google.com/calendar/api/quickstart/python. The downloaded credentials should be renamed to credentials.json.
On Windows, they should be put in as: `C:\Users\$USER\.credentials\credentials.json`
On Linux, they should be put in as:
`/home/$USER/.credentials/credentials.json`
## Google Calendar Simple API
https://github.com/kuzmoyev/google-calendar-simple-api
`pip install gcsa`
## Beautiful Date
https://github.com/kuzmoyev/beautiful-date
`pip install beauitful-date`
You also need an enviroment variable called "SCHEDULE_EMAIL" with your email. These are instructions on how to create them temporarily for the means of testing.
How to create one on Linux:
`export SCHEDULE_EMAIL="[email protected]"`
How to create one on Windows (Powershell):
`$env:SCHEDULE_EMAIL = "[email protected]"`
# Roadmap:
Create some GitHub Actions workflows to automate package publishing alongside testing (For Windows, Linux, and Mac(Soon)).
|
scheduletask
|
/scheduletask-2.0.1.tar.gz/scheduletask-2.0.1/README.md
|
README.md
|
# scheduletask
Script to quickly create google calendar events from the command line. I created this script as a means to quickly jot down ideas for later. Pulling up GNOME Calendar and using my moues is *of course* too much work.
I'm working on a PyPI release soon, didn't realize I couldn't just *upload* it and it works. I thought computers were magic, damnit!
# Requirements
## Google Cloud Project
Follow the Prerequisites in this link : https://developers.google.com/calendar/api/quickstart/python. The downloaded credentials should be renamed to credentials.json.
On Windows, they should be put in as: `C:\Users\$USER\.credentials\credentials.json`
On Linux, they should be put in as:
`/home/$USER/.credentials/credentials.json`
## Google Calendar Simple API
https://github.com/kuzmoyev/google-calendar-simple-api
`pip install gcsa`
## Beautiful Date
https://github.com/kuzmoyev/beautiful-date
`pip install beauitful-date`
You also need an enviroment variable called "SCHEDULE_EMAIL" with your email. These are instructions on how to create them temporarily for the means of testing.
How to create one on Linux:
`export SCHEDULE_EMAIL="[email protected]"`
How to create one on Windows (Powershell):
`$env:SCHEDULE_EMAIL = "[email protected]"`
# Roadmap:
Create some GitHub Actions workflows to automate package publishing alongside testing (For Windows, Linux, and Mac(Soon)).
| 0.477554 | 0.205197 |
# Scheduler Utils
Implements some scheduling functions to decay or ramp values across timestamps.
Install with: `pip install scheduling_utils`
To initialize and use for example a cosine scheduler, do the following:
```
from schedulers import CosineScheduler
start_step = 0
stop_step = 10
start_value = 5
stop_value = 20
scheduler = CosineScheduler(start_step, stop_step, start_value, stop_value)
# get values corresponding to step
for step in range(10):
value = scheduler.step(step)
```
Available Schedulers at current version:
- Linear


- Cosine


- LinearCosine

|
scheduling-utils
|
/scheduling_utils-0.1.2.tar.gz/scheduling_utils-0.1.2/README.md
|
README.md
|
from schedulers import CosineScheduler
start_step = 0
stop_step = 10
start_value = 5
stop_value = 20
scheduler = CosineScheduler(start_step, stop_step, start_value, stop_value)
# get values corresponding to step
for step in range(10):
value = scheduler.step(step)
| 0.467332 | 0.852383 |
from abc import ABC, abstractmethod
import math
class Scheduler(ABC):
"""
Base abstract class for all schedulers
"""
def __init__(self, start_step: int, stop_step: int, start_value: float, stop_value: float):
if start_step >= stop_step:
raise AttributeError('In the scheduler, start step must be minor that stop step!')
if start_value < stop_value:
print('Initializing Scheduler to Ramp Value')
elif start_value > stop_value:
print('Initializing Scheduler to Decay Value')
else:
print('Initializing Scheduler with no effect!')
self._start_step = start_step
self._stop_step = stop_step
self._start_value = start_value
self._stop_value = stop_value
@abstractmethod
def warp_func(self, perc_step: float):
pass
def _get_perc_step(self, step: int):
# get step normalized in 0_1 range
return max(0, min(1, (step - self._start_step) / (self._stop_step - self._start_step)))
def _get_value(self, perc_step: float):
# get value at perc_step
return self._start_value + (self._stop_value - self._start_value) * perc_step
def step(self, step: int):
# step normalized in 0_1 range
perc_step = self._get_perc_step(step)
# warp perc according to scheduler type
perc_step = self.warp_func(perc_step)
return self._get_value(perc_step)
class CosineScheduler(Scheduler):
def __init__(self, start_step: int, stop_step: int, start_value: float, stop_value: float):
super().__init__(start_step, stop_step, start_value, stop_value)
def warp_func(self, perc_step: float):
# warp with cosine
# math.cos(math.pi * perc_step) goes from 1 to -1
# sum 1 and mul 0.5 to normalize
# then reverse since you still want a perc step as output
return 1 - (0.5 * (1. + math.cos(math.pi * perc_step)))
class LinearScheduler(Scheduler):
def __init__(self, start_step: int, stop_step: int, start_value: float, stop_value: float):
super().__init__(start_step, stop_step, start_value, stop_value)
def warp_func(self, perc_step: float):
# Identity warp
return perc_step
class LinearCosineScheduler:
def __init__(self, start_step: int, stop_step: int, start_value: float, stop_value: float, th_step: int):
"""
Linear Warmup Followed by Cosine Decay.
Learning rate increases from start_step tp th_step (0.0 to start_value) and then decays to stop_value
"""
if start_value <= stop_value:
raise AttributeError('the LinearCosine Scheduler must decay.')
if start_step >= stop_step:
raise AttributeError('In the scheduler, start step must be minor that stop step!')
if not start_step < th_step and th_step < stop_step:
raise AttributeError('In the scheduler, threshold step must lay between start and stop steps!')
super().__init__()
self.th_step = th_step
self.linear_wu = LinearScheduler(start_step, th_step, 0, start_value)
self.cosine_decay = CosineScheduler(th_step, stop_step, start_value, stop_value)
def step(self, step: int):
if step < self.th_step:
return self.linear_wu.step(step)
else:
return self.cosine_decay.step(step)
|
scheduling-utils
|
/scheduling_utils-0.1.2.tar.gz/scheduling_utils-0.1.2/scheduling_utils/schedulers.py
|
schedulers.py
|
from abc import ABC, abstractmethod
import math
class Scheduler(ABC):
"""
Base abstract class for all schedulers
"""
def __init__(self, start_step: int, stop_step: int, start_value: float, stop_value: float):
if start_step >= stop_step:
raise AttributeError('In the scheduler, start step must be minor that stop step!')
if start_value < stop_value:
print('Initializing Scheduler to Ramp Value')
elif start_value > stop_value:
print('Initializing Scheduler to Decay Value')
else:
print('Initializing Scheduler with no effect!')
self._start_step = start_step
self._stop_step = stop_step
self._start_value = start_value
self._stop_value = stop_value
@abstractmethod
def warp_func(self, perc_step: float):
pass
def _get_perc_step(self, step: int):
# get step normalized in 0_1 range
return max(0, min(1, (step - self._start_step) / (self._stop_step - self._start_step)))
def _get_value(self, perc_step: float):
# get value at perc_step
return self._start_value + (self._stop_value - self._start_value) * perc_step
def step(self, step: int):
# step normalized in 0_1 range
perc_step = self._get_perc_step(step)
# warp perc according to scheduler type
perc_step = self.warp_func(perc_step)
return self._get_value(perc_step)
class CosineScheduler(Scheduler):
def __init__(self, start_step: int, stop_step: int, start_value: float, stop_value: float):
super().__init__(start_step, stop_step, start_value, stop_value)
def warp_func(self, perc_step: float):
# warp with cosine
# math.cos(math.pi * perc_step) goes from 1 to -1
# sum 1 and mul 0.5 to normalize
# then reverse since you still want a perc step as output
return 1 - (0.5 * (1. + math.cos(math.pi * perc_step)))
class LinearScheduler(Scheduler):
def __init__(self, start_step: int, stop_step: int, start_value: float, stop_value: float):
super().__init__(start_step, stop_step, start_value, stop_value)
def warp_func(self, perc_step: float):
# Identity warp
return perc_step
class LinearCosineScheduler:
def __init__(self, start_step: int, stop_step: int, start_value: float, stop_value: float, th_step: int):
"""
Linear Warmup Followed by Cosine Decay.
Learning rate increases from start_step tp th_step (0.0 to start_value) and then decays to stop_value
"""
if start_value <= stop_value:
raise AttributeError('the LinearCosine Scheduler must decay.')
if start_step >= stop_step:
raise AttributeError('In the scheduler, start step must be minor that stop step!')
if not start_step < th_step and th_step < stop_step:
raise AttributeError('In the scheduler, threshold step must lay between start and stop steps!')
super().__init__()
self.th_step = th_step
self.linear_wu = LinearScheduler(start_step, th_step, 0, start_value)
self.cosine_decay = CosineScheduler(th_step, stop_step, start_value, stop_value)
def step(self, step: int):
if step < self.th_step:
return self.linear_wu.step(step)
else:
return self.cosine_decay.step(step)
| 0.8758 | 0.304617 |
=========
Changelog
=========
0.1.6 (2021-08-09)
==================
* Improve performance by building pycalphad phase records in simulations (:issue:`21`)
0.1.5 (2021-06-12)
==================
* Fix a floating point bug where equilibrium solidification could get stuck in an infinite loop after a binary search (:issue:`15`)
* Fix a bug where the disordered part of partitioned phases would not be counted as solidified solid if it became stable (:issue:`16`)
0.1.4 (2020-11-14)
==================
* Fix to PyPI distribution
0.1.3 (2020-11-14)
==================
This is a minor release containing some maintenance changes and bug fixes
* Don't automatically remove the "GAS" phase from the set of solid phases (:issue:`12`)
* Call filter_phases to remove phases that cannot exist (:issue:`11`)
* Developers: switch to GitHub Actions instead of Travis-CI
0.1.2 (2020-01-29)
==================
* Equilibrium solidification improvements
* Make points updating adaptive
* Convergence checking
* Enable order-disorder deconvolution
0.1.1 (2020-01-23)
==================
* Packaging fixes
* Updated LICENSE
0.1 (2020-01-23)
==================
Initial release
* Perform Scheil-Gulliver and equilibrium solidification simulations
|
scheil
|
/scheil-0.1.6.tar.gz/scheil-0.1.6/CHANGES.rst
|
CHANGES.rst
|
=========
Changelog
=========
0.1.6 (2021-08-09)
==================
* Improve performance by building pycalphad phase records in simulations (:issue:`21`)
0.1.5 (2021-06-12)
==================
* Fix a floating point bug where equilibrium solidification could get stuck in an infinite loop after a binary search (:issue:`15`)
* Fix a bug where the disordered part of partitioned phases would not be counted as solidified solid if it became stable (:issue:`16`)
0.1.4 (2020-11-14)
==================
* Fix to PyPI distribution
0.1.3 (2020-11-14)
==================
This is a minor release containing some maintenance changes and bug fixes
* Don't automatically remove the "GAS" phase from the set of solid phases (:issue:`12`)
* Call filter_phases to remove phases that cannot exist (:issue:`11`)
* Developers: switch to GitHub Actions instead of Travis-CI
0.1.2 (2020-01-29)
==================
* Equilibrium solidification improvements
* Make points updating adaptive
* Convergence checking
* Enable order-disorder deconvolution
0.1.1 (2020-01-23)
==================
* Packaging fixes
* Updated LICENSE
0.1 (2020-01-23)
==================
Initial release
* Perform Scheil-Gulliver and equilibrium solidification simulations
| 0.87938 | 0.218419 |
Releasing scheil
================
Create a release of scheil
--------------------------
To release a new version of scheil:
These steps assume that ``0.1`` is the most recently tagged version number and ``0.2`` is the next version number to be released.
Replace their values with the last public release's version number and the new version number as appropriate.
#. Determine what the next version number should be using `semantic versioning <https://semver.org/>`_.
#. Resolve or defer all pull requests and issues tagged with the upcoming version milestone.
#. ``git stash`` to save any uncommitted work.
#. ``git checkout master``
#. ``git pull`` to make sure you haven't missed any last-minute commits. **After this point, nothing else is making it into this version.**
#. ``pytest`` to ensure that all tests pass locally.
#. ``sphinx-apidoc -f -H 'API Documentation' -o docs/api/ scheil`` to regenerate the API documentation.
#. Update ``CHANGES.rst`` with a human-readable list of changes since the last commit.
``git log --oneline --no-decorate --color 0.1^..master`` can be used to list the changes since the last version.
#. ``git add docs/api CHANGES.rst`` to stage the updated documentation.
#. ``git commit -m "REL: 0.2"`` to commit the changes.
#. ``git push origin master``
#. **Verify that all continuous integration test and build workflows pass.**
#. Create a release on GitHub
#. Go to https://github.com/pycalphad/scheil/releases/new
#. Set the "Tag version" field to ``0.2``.
#. Set the branch target to ``master``.
#. Set the "Release title" to ``scheil 0.2``.
#. Leave the description box blank.
#. If this version is a pre-release, check the "This is a pre-release" box.
#. Click "Publish release".
#. The new version will be available on PyPI when the ``Build and deploy to PyPI`` workflow on GitHub Actions finishes successfully.
Deploy to PyPI (manually)
-------------------------
.. warning::
DO NOT FOLLOW THESE STEPS unless the GitHub Actions deployment workflow is broken.
Creating a GitHub release should trigger the ``Build and deploy to PyPI`` workflow on GitHub Actions that will upload source and platform-dependent wheel distributions automatically.
To release a source distribution to PyPI:
#. If deploying for the first time: ``pip install twine build``
#. ``rm -R dist/*`` on Linux/OSX or ``del dist/*`` on Windows
#. ``git checkout master`` to checkout the latest version
#. ``git pull``
#. ``git log`` to verify the repository state matches the newly created tag
#. ``python -m build --sdist``
#. **Make sure that the script correctly detected the new version exactly and not a dirty / revised state of the repo.**
#. ``twine upload dist/*`` to upload (assumes a `correctly configured <https://packaging.python.org/specifications/pypirc/>`_ ``~/.pypirc`` file)
Deploy to conda-forge (manually)
--------------------------------
The `conda-forge autotick bot`_ will automatically open a pull request in the
`conda-forge/scheil-feedstock`_ repository after the package has been uploaded
to PyPI. This usually happens in within an hour of the PyPI release. If the
build succeeds, the PR will be merged automatically and scheil will usually be
available in an hour or two.
.. warning::
DO NOT FOLLOW THESE STEPS unless the pull request opened by the conda-forge
autotick bot on the `conda-forge/scheil-feedstock`_ was not merged
automatically and a new PR needs to be built manually.
Start with the commit checked out which was tagged with the new version.
1. Generate the SHA256 hash of the build artifact (tarball) submitted to PyPI.
Alternatively, the hashes can be found by clicking the "View" button for the
source distribution in the `PyPI download files table <https://pypi.org/project/scheil/#files>`_.
2. Fork the `conda-forge/scheil-feedstock`_ repo.
3. Update scheil version and sha256 strings in the ``recipe/meta.yaml`` file.
4. If any of the dependencies changed since the last release, make sure to update the ``recipe/meta.yaml`` file.
5. Submit a pull request to the main scheil feedstock repo.
6. Once the build completes successfully, merge the pull request.
.. _conda-forge autotick bot: https://github.com/regro-cf-autotick-bot
.. _conda-forge/scheil-feedstock: https://github.com/conda-forge/scheil-feedstock
|
scheil
|
/scheil-0.1.6.tar.gz/scheil-0.1.6/RELEASING.rst
|
RELEASING.rst
|
Releasing scheil
================
Create a release of scheil
--------------------------
To release a new version of scheil:
These steps assume that ``0.1`` is the most recently tagged version number and ``0.2`` is the next version number to be released.
Replace their values with the last public release's version number and the new version number as appropriate.
#. Determine what the next version number should be using `semantic versioning <https://semver.org/>`_.
#. Resolve or defer all pull requests and issues tagged with the upcoming version milestone.
#. ``git stash`` to save any uncommitted work.
#. ``git checkout master``
#. ``git pull`` to make sure you haven't missed any last-minute commits. **After this point, nothing else is making it into this version.**
#. ``pytest`` to ensure that all tests pass locally.
#. ``sphinx-apidoc -f -H 'API Documentation' -o docs/api/ scheil`` to regenerate the API documentation.
#. Update ``CHANGES.rst`` with a human-readable list of changes since the last commit.
``git log --oneline --no-decorate --color 0.1^..master`` can be used to list the changes since the last version.
#. ``git add docs/api CHANGES.rst`` to stage the updated documentation.
#. ``git commit -m "REL: 0.2"`` to commit the changes.
#. ``git push origin master``
#. **Verify that all continuous integration test and build workflows pass.**
#. Create a release on GitHub
#. Go to https://github.com/pycalphad/scheil/releases/new
#. Set the "Tag version" field to ``0.2``.
#. Set the branch target to ``master``.
#. Set the "Release title" to ``scheil 0.2``.
#. Leave the description box blank.
#. If this version is a pre-release, check the "This is a pre-release" box.
#. Click "Publish release".
#. The new version will be available on PyPI when the ``Build and deploy to PyPI`` workflow on GitHub Actions finishes successfully.
Deploy to PyPI (manually)
-------------------------
.. warning::
DO NOT FOLLOW THESE STEPS unless the GitHub Actions deployment workflow is broken.
Creating a GitHub release should trigger the ``Build and deploy to PyPI`` workflow on GitHub Actions that will upload source and platform-dependent wheel distributions automatically.
To release a source distribution to PyPI:
#. If deploying for the first time: ``pip install twine build``
#. ``rm -R dist/*`` on Linux/OSX or ``del dist/*`` on Windows
#. ``git checkout master`` to checkout the latest version
#. ``git pull``
#. ``git log`` to verify the repository state matches the newly created tag
#. ``python -m build --sdist``
#. **Make sure that the script correctly detected the new version exactly and not a dirty / revised state of the repo.**
#. ``twine upload dist/*`` to upload (assumes a `correctly configured <https://packaging.python.org/specifications/pypirc/>`_ ``~/.pypirc`` file)
Deploy to conda-forge (manually)
--------------------------------
The `conda-forge autotick bot`_ will automatically open a pull request in the
`conda-forge/scheil-feedstock`_ repository after the package has been uploaded
to PyPI. This usually happens in within an hour of the PyPI release. If the
build succeeds, the PR will be merged automatically and scheil will usually be
available in an hour or two.
.. warning::
DO NOT FOLLOW THESE STEPS unless the pull request opened by the conda-forge
autotick bot on the `conda-forge/scheil-feedstock`_ was not merged
automatically and a new PR needs to be built manually.
Start with the commit checked out which was tagged with the new version.
1. Generate the SHA256 hash of the build artifact (tarball) submitted to PyPI.
Alternatively, the hashes can be found by clicking the "View" button for the
source distribution in the `PyPI download files table <https://pypi.org/project/scheil/#files>`_.
2. Fork the `conda-forge/scheil-feedstock`_ repo.
3. Update scheil version and sha256 strings in the ``recipe/meta.yaml`` file.
4. If any of the dependencies changed since the last release, make sure to update the ``recipe/meta.yaml`` file.
5. Submit a pull request to the main scheil feedstock repo.
6. Once the build completes successfully, merge the pull request.
.. _conda-forge autotick bot: https://github.com/regro-cf-autotick-bot
.. _conda-forge/scheil-feedstock: https://github.com/conda-forge/scheil-feedstock
| 0.839603 | 0.616936 |
======
scheil
======
A Scheil-Gulliver simulation tool using `pycalphad`_.
.. image:: https://zenodo.org/badge/150358281.svg
:target: https://zenodo.org/badge/latestdoi/150358281
.. _pycalphad: http://pycalphad.org
.. code-block:: python
import matplotlib.pyplot as plt
from pycalphad import Database, variables as v
from scheil import simulate_scheil_solidification
# setup the simulation parameters
dbf = Database('alzn_mey.tdb')
comps = ['AL', 'ZN', 'VA']
phases = sorted(dbf.phases.keys())
liquid_phase_name = 'LIQUID'
initial_composition = {v.X('ZN'): 0.3}
start_temperature = 850
# perform the simulation
sol_res = simulate_scheil_solidification(dbf, comps, phases, initial_composition, start_temperature, step_temperature=1.0)
# plot the result
for phase_name, amounts in sol_res.cum_phase_amounts.items():
plt.plot(sol_res.temperatures, amounts, label=phase_name)
plt.plot(sol_res.temperatures, sol_res.fraction_liquid, label='LIQUID')
plt.ylabel('Phase Fraction')
plt.xlabel('Temperature (K)')
plt.title('Al-30Zn Scheil simulation, phase fractions')
plt.legend(loc='best')
plt.show()
.. image:: https://raw.githubusercontent.com/pycalphad/scheil/master/docs/_static/Al-30Zn_Scheil_simulation.png
:align: center
:alt: Phase fraction evolution during a Scheil simulation of Al-30Zn
Installation
============
pip (recommended)
-----------------
scheil is suggested to be installed from PyPI.
.. code-block:: bash
pip install scheil
Anaconda
--------
.. code-block:: bash
conda install -c conda-forge scheil
Development versions
--------------------
To install an editable development version with pip:
.. code-block:: bash
git clone https://github.com/pycalphad/scheil.git
cd scheil
pip install --editable .[dev]
Upgrading scheil later requires you to run ``git pull`` in this directory.
Run the automated tests using
.. code-block:: bash
pytest
Theory
======
Uses classic Scheil-Gulliver theory (see G.H. Gulliver, *J. Inst. Met.* 9 (1913) 120–157 and Scheil, *Zeitschrift Für Met.* 34 (1942) 70–72.) with assumptions of
1. Perfect mixing in the liquid
2. Local equilibrium between solid and liquid
3. No diffusion in the solid
Getting Help
============
For help on installing and using scheil, please join the `pycalphad/pycalphad Gitter room <https://gitter.im/pycalphad/pycalphad>`_.
Bugs and software issues should be reported on `GitHub <https://github.com/pycalphad/scheil/issues>`_.
License
=======
scheil is MIT licensed. See LICENSE.
Citing
======
.. image:: https://zenodo.org/badge/150358281.svg
:target: https://zenodo.org/badge/latestdoi/150358281
If you use the ``scheil`` package in your work, please cite the relevant version.
The following DOI, `doi:10.5281/zenodo.3630656 <https://doi.org/10.5281/zenodo.3630656>`_, will link to the latest released version of the code on Zenodo where you can cite the specific version that you haved used. For example, version 0.1.2 can be cited as:
::
Bocklund, Brandon, Bobbio, Lourdes D., Otis, Richard A., Beese, Allison M., & Liu, Zi-Kui. (2020, January 29). pycalphad-scheil: 0.1.2 (Version 0.1.2). Zenodo. http://doi.org/10.5281/zenodo.3630657
::
@software{bocklund_brandon_2020_3630657,
author = {Bocklund, Brandon and
Bobbio, Lourdes D. and
Otis, Richard A. and
Beese, Allison M. and
Liu, Zi-Kui},
title = {pycalphad-scheil: 0.1.2},
month = jan,
year = 2020,
publisher = {Zenodo},
version = {0.1.2},
doi = {10.5281/zenodo.3630657},
url = {https://doi.org/10.5281/zenodo.3630657}
}
|
scheil
|
/scheil-0.1.6.tar.gz/scheil-0.1.6/README.rst
|
README.rst
|
======
scheil
======
A Scheil-Gulliver simulation tool using `pycalphad`_.
.. image:: https://zenodo.org/badge/150358281.svg
:target: https://zenodo.org/badge/latestdoi/150358281
.. _pycalphad: http://pycalphad.org
.. code-block:: python
import matplotlib.pyplot as plt
from pycalphad import Database, variables as v
from scheil import simulate_scheil_solidification
# setup the simulation parameters
dbf = Database('alzn_mey.tdb')
comps = ['AL', 'ZN', 'VA']
phases = sorted(dbf.phases.keys())
liquid_phase_name = 'LIQUID'
initial_composition = {v.X('ZN'): 0.3}
start_temperature = 850
# perform the simulation
sol_res = simulate_scheil_solidification(dbf, comps, phases, initial_composition, start_temperature, step_temperature=1.0)
# plot the result
for phase_name, amounts in sol_res.cum_phase_amounts.items():
plt.plot(sol_res.temperatures, amounts, label=phase_name)
plt.plot(sol_res.temperatures, sol_res.fraction_liquid, label='LIQUID')
plt.ylabel('Phase Fraction')
plt.xlabel('Temperature (K)')
plt.title('Al-30Zn Scheil simulation, phase fractions')
plt.legend(loc='best')
plt.show()
.. image:: https://raw.githubusercontent.com/pycalphad/scheil/master/docs/_static/Al-30Zn_Scheil_simulation.png
:align: center
:alt: Phase fraction evolution during a Scheil simulation of Al-30Zn
Installation
============
pip (recommended)
-----------------
scheil is suggested to be installed from PyPI.
.. code-block:: bash
pip install scheil
Anaconda
--------
.. code-block:: bash
conda install -c conda-forge scheil
Development versions
--------------------
To install an editable development version with pip:
.. code-block:: bash
git clone https://github.com/pycalphad/scheil.git
cd scheil
pip install --editable .[dev]
Upgrading scheil later requires you to run ``git pull`` in this directory.
Run the automated tests using
.. code-block:: bash
pytest
Theory
======
Uses classic Scheil-Gulliver theory (see G.H. Gulliver, *J. Inst. Met.* 9 (1913) 120–157 and Scheil, *Zeitschrift Für Met.* 34 (1942) 70–72.) with assumptions of
1. Perfect mixing in the liquid
2. Local equilibrium between solid and liquid
3. No diffusion in the solid
Getting Help
============
For help on installing and using scheil, please join the `pycalphad/pycalphad Gitter room <https://gitter.im/pycalphad/pycalphad>`_.
Bugs and software issues should be reported on `GitHub <https://github.com/pycalphad/scheil/issues>`_.
License
=======
scheil is MIT licensed. See LICENSE.
Citing
======
.. image:: https://zenodo.org/badge/150358281.svg
:target: https://zenodo.org/badge/latestdoi/150358281
If you use the ``scheil`` package in your work, please cite the relevant version.
The following DOI, `doi:10.5281/zenodo.3630656 <https://doi.org/10.5281/zenodo.3630656>`_, will link to the latest released version of the code on Zenodo where you can cite the specific version that you haved used. For example, version 0.1.2 can be cited as:
::
Bocklund, Brandon, Bobbio, Lourdes D., Otis, Richard A., Beese, Allison M., & Liu, Zi-Kui. (2020, January 29). pycalphad-scheil: 0.1.2 (Version 0.1.2). Zenodo. http://doi.org/10.5281/zenodo.3630657
::
@software{bocklund_brandon_2020_3630657,
author = {Bocklund, Brandon and
Bobbio, Lourdes D. and
Otis, Richard A. and
Beese, Allison M. and
Liu, Zi-Kui},
title = {pycalphad-scheil: 0.1.2},
month = jan,
year = 2020,
publisher = {Zenodo},
version = {0.1.2},
doi = {10.5281/zenodo.3630657},
url = {https://doi.org/10.5281/zenodo.3630657}
}
| 0.922005 | 0.736732 |
scheil package
==============
Submodules
----------
scheil.simulate module
----------------------
.. automodule:: scheil.simulate
:members:
:undoc-members:
:show-inheritance:
scheil.solidification\_result module
------------------------------------
.. automodule:: scheil.solidification_result
:members:
:undoc-members:
:show-inheritance:
scheil.utils module
-------------------
.. automodule:: scheil.utils
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: scheil
:members:
:undoc-members:
:show-inheritance:
|
scheil
|
/scheil-0.1.6.tar.gz/scheil-0.1.6/docs/api/scheil.rst
|
scheil.rst
|
scheil package
==============
Submodules
----------
scheil.simulate module
----------------------
.. automodule:: scheil.simulate
:members:
:undoc-members:
:show-inheritance:
scheil.solidification\_result module
------------------------------------
.. automodule:: scheil.solidification_result
:members:
:undoc-members:
:show-inheritance:
scheil.utils module
-------------------
.. automodule:: scheil.utils
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: scheil
:members:
:undoc-members:
:show-inheritance:
| 0.814901 | 0.231419 |
<!--- This is a markdown file. Comments look like this --->
* [Overview](#overview)
* [Installation](#installation)
* [Usage](#usage)
---------------------------------------------------------------------------------
## Overview
---------------------------------------------------------------------------------
## Installation
---------------------------------------------------------------------------------
## Usage
---------------------------------------------------------------------------------
|
schelp
|
/schelp-2023.7.13.2.39.tar.gz/schelp-2023.7.13.2.39/README.md
|
README.md
|
<!--- This is a markdown file. Comments look like this --->
* [Overview](#overview)
* [Installation](#installation)
* [Usage](#usage)
---------------------------------------------------------------------------------
## Overview
---------------------------------------------------------------------------------
## Installation
---------------------------------------------------------------------------------
## Usage
---------------------------------------------------------------------------------
| 0.666171 | 0.230302 |
# SChem
https://pypi.org/project/schem/. Install with `pip install schem`
Clean Room implementation of the backend of SpaceChem (https://www.zachtronics.com/spacechem).
## Usage (CLI)
```
python -m schem [-h] [--version] [-l LEVEL_FILE] [--max-cycles MAX_CYCLES]
[--check-precog] [--max-precog-check-cycles MAX_PRECOG_CHECK_CYCLES]
[--seed SEED] [--hash-states HASH_STATES]
[--export] [--no-run] [--strict]
[--json | --verbose] [--debug [DEBUG]]
[solution_files ...]
```
E.g. `python -m schem` will validate the cycles-reactors-symbols score of any solution export(s) in the user's clipboard. See `python -m schem --help` for details.
## Usage (python)
Supposing `level_export`, `solution_export` are strings as exported by SpaceChem CE:
```python
from schem import Level, Solution
# Load a solution
solution = Solution(solution_export) # Auto-use appropriate official level
solution = Solution(solution_export, level=level_export) # Custom level
solution = Solution(solution_export, level=Level(level_export)) # Alternative
# Run a solution
solution.run()
# => Score(cycles=45, reactors=1, symbols=14)
# Check the expected score that was in the export's metadata
solution.expected_score
# => Score(cycles=44, reactors=1, symbols=14)
# Reset the run state of a solution
solution.reset()
# Validate that a solution matched its expected score
solution.validate()
# =/> ScoreError("[Of Pancakes and Spaceships] 44-1-14 "Cycles" by Zig: Expected 44 cycles but got 45.")
# Check if a solution uses precognition
solution.is_precognitive() # slow
# => False
# Bundle method for calling validate() if expected score is present, else run(), optionally checking precog,
# and returning a dict of all this info and any error
solution.evaluate()
# => {"level_name": "Tunnels III",
# "resnet_id": (1, 1, 3), # Volume, Issue, Puzzle
# "author": "Zig",
# "cycles": 244,
# "reactors": 1,
# "symbols": 14,
# "solution_name": "symbols",
# "error": ScoreError("[Tunnels III] 243-1-14 \"symbols\" by Zig: Expected 243 cycles but got 244.")
#}
solution.evaluate(check_precog=True)
# => {"level_name": "Challenge: Going Green",
# "author": "Zig",
# "cycles: 3578,
# "reactors": 1,
# "symbols": 103,
# "solution_name": "assumes 2nd input",
# "precog": true,
# "precog_explanation": "Solution is precognitive; failed whenever molecule 2 was Hydrogen Sulfide, for 9 such
# appearances (whereas solution success rate was otherwise 100%)."
#}
# Re-export the solution. Sorts export lines to ensure uniqueness
solution.export_str()
# => "SOLUTION:..."
```
|
schem
|
/schem-0.32.3.tar.gz/schem-0.32.3/README.md
|
README.md
|
python -m schem [-h] [--version] [-l LEVEL_FILE] [--max-cycles MAX_CYCLES]
[--check-precog] [--max-precog-check-cycles MAX_PRECOG_CHECK_CYCLES]
[--seed SEED] [--hash-states HASH_STATES]
[--export] [--no-run] [--strict]
[--json | --verbose] [--debug [DEBUG]]
[solution_files ...]
from schem import Level, Solution
# Load a solution
solution = Solution(solution_export) # Auto-use appropriate official level
solution = Solution(solution_export, level=level_export) # Custom level
solution = Solution(solution_export, level=Level(level_export)) # Alternative
# Run a solution
solution.run()
# => Score(cycles=45, reactors=1, symbols=14)
# Check the expected score that was in the export's metadata
solution.expected_score
# => Score(cycles=44, reactors=1, symbols=14)
# Reset the run state of a solution
solution.reset()
# Validate that a solution matched its expected score
solution.validate()
# =/> ScoreError("[Of Pancakes and Spaceships] 44-1-14 "Cycles" by Zig: Expected 44 cycles but got 45.")
# Check if a solution uses precognition
solution.is_precognitive() # slow
# => False
# Bundle method for calling validate() if expected score is present, else run(), optionally checking precog,
# and returning a dict of all this info and any error
solution.evaluate()
# => {"level_name": "Tunnels III",
# "resnet_id": (1, 1, 3), # Volume, Issue, Puzzle
# "author": "Zig",
# "cycles": 244,
# "reactors": 1,
# "symbols": 14,
# "solution_name": "symbols",
# "error": ScoreError("[Tunnels III] 243-1-14 \"symbols\" by Zig: Expected 243 cycles but got 244.")
#}
solution.evaluate(check_precog=True)
# => {"level_name": "Challenge: Going Green",
# "author": "Zig",
# "cycles: 3578,
# "reactors": 1,
# "symbols": 103,
# "solution_name": "assumes 2nd input",
# "precog": true,
# "precog_explanation": "Solution is precognitive; failed whenever molecule 2 was Hydrogen Sulfide, for 9 such
# appearances (whereas solution success rate was otherwise 100%)."
#}
# Re-export the solution. Sorts export lines to ensure uniqueness
solution.export_str()
# => "SOLUTION:..."
| 0.678753 | 0.7797 |
# LinkML Schema Automator
[](https://zenodo.org/badge/latestdoi/13996/linkml/schema_automator)
This is a toolkit that assists with:
1. Bootstrapping LinkML models from instance data
- TSVs and spreadsheets
- SQLite databases
- RDF instance graphs
2. Bootstrapping a LinkML model from a different schema representation (i.e. opposite of a linkml.generator)
- OWL (RDFS-like subset)
- TODO: JSON-Schema, XSD, ShEx, SHACL, SQL DDL, FHIR, Python dataclasses/pydantic, etc
3. Using automated methods to enhance a model
- Using text mining and concept annotator APIs to enrich semantic enums
- TODO: querying sparql endpoints to retrieve additional metadata
These can be composed together. For example, run `tsvs2linkml` followed by `annotate-enums`
The toolkit is still experimental. It is intended as an aid to schema creation rather than act as a formal conversion tool
## Installation
`linkml-model-enrichment` and its components require Python 3.9 or greater.
```bash
chmod 755 environment.sh
. environment.sh
pip install -r requirements.txt
pip install -e .
```
## Command Line Usage
### Annotating Enums
This toolkit allows automated annotation of LinkML enums, mapping text strings to ontology terms.
The command line tool `annotate-enums` takes a LinkML schema, with enums and fills in the `meaning` slots.
See the [annotators](schema_automator/annotators/) folder for docs
### Converting TSVs
The `tsv2linkml` command infers a single-class schema from a TSV datafile
```bash
$ tsv2linkml --help
Usage: tsv2linkml [OPTIONS] TSVFILE
Infer a model from a TSV
Options:
-o, --output TEXT Output file
-c, --class_name TEXT Core class name in schema
-n, --schema_name TEXT Schema name
-s, --sep TEXT separator
-E, --enum-columns TEXT column that is forced to be an enum
--robot / --no-robot set if the TSV is a ROBOT template
--help Show this message and exit.
```
Example:
```bash
tsv2linkml tests/resources/biobank-specimens.tsv
```
The `tsvs2linkml` command infers a multi-class schema from multiple TSV datafiles
```
$ tsvs2linkml --help
Usage: tsvs2linkml [OPTIONS] [TSVFILES]...
Infer a model from multiple TSVs
Options:
-o, --output TEXT Output file
-n, --schema_name TEXT Schema name
-s, --sep TEXT separator
-E, --enum-columns TEXT column(s) that is forced to be an enum
--enum-mask-columns TEXT column(s) that are excluded from being enums
--max-enum-size INTEGER do not create an enum if more than max distinct
members
--enum-threshold FLOAT if the number of distinct values / rows is less
than this, do not make an enum
--robot / --no-robot set if the TSV is a ROBOT template
--help Show this message and exit.
```
### Converting OWL
```bash
$ owl2linkml --help
Usage: owl2linkml [OPTIONS] OWLFILE
Infer a model from OWL Ontology
Note: input must be in functional syntax
Options:
-n, --name TEXT Schema name
--help Show this message and exit.
```
Example:
```bash
owl2linkml -n prov tests/resources/prov.ofn > prov.yaml
```
Note this works best on schema-style ontologies such as Prov
**NOT** recommended for terminological-style ontologies such as OBO
### Converting RDF instance graphs
```bash
$ rdf2linkml --help
Usage: rdf2linkml [OPTIONS] RDFFILE
Infer a model from RDF instance data
Options:
-d, --dir TEXT [required]
--help Show this message and exit.
```
### Converting JSON Instance Data
```bash
$ jsondata2linkml --help
Usage: jsondata2linkml [OPTIONS] INPUT
Infer a model from JSON instance data
Options:
--container-class-name TEXT name of root class
-f, --format TEXT json or yaml (or json.gz or yaml.gz)
--omit-null / --no-omit-null if true, ignore null values
--help Show this message and exit.
```
### Converting JSON-Schema
```
$ jsonschema2linkml --help
Usage: jsonschema2linkml [OPTIONS] INPUT
Infer a model from JSON Schema
Options:
-n, --name TEXT ID of schema [required]
-f, --format TEXT JSON Schema format - yaml or json
-o, --output TEXT output path
--help Show this message and exit.
```
|
schema-automator
|
/schema_automator-1.0.0b0.tar.gz/schema_automator-1.0.0b0/README.md
|
README.md
|
chmod 755 environment.sh
. environment.sh
pip install -r requirements.txt
pip install -e .
$ tsv2linkml --help
Usage: tsv2linkml [OPTIONS] TSVFILE
Infer a model from a TSV
Options:
-o, --output TEXT Output file
-c, --class_name TEXT Core class name in schema
-n, --schema_name TEXT Schema name
-s, --sep TEXT separator
-E, --enum-columns TEXT column that is forced to be an enum
--robot / --no-robot set if the TSV is a ROBOT template
--help Show this message and exit.
tsv2linkml tests/resources/biobank-specimens.tsv
$ tsvs2linkml --help
Usage: tsvs2linkml [OPTIONS] [TSVFILES]...
Infer a model from multiple TSVs
Options:
-o, --output TEXT Output file
-n, --schema_name TEXT Schema name
-s, --sep TEXT separator
-E, --enum-columns TEXT column(s) that is forced to be an enum
--enum-mask-columns TEXT column(s) that are excluded from being enums
--max-enum-size INTEGER do not create an enum if more than max distinct
members
--enum-threshold FLOAT if the number of distinct values / rows is less
than this, do not make an enum
--robot / --no-robot set if the TSV is a ROBOT template
--help Show this message and exit.
$ owl2linkml --help
Usage: owl2linkml [OPTIONS] OWLFILE
Infer a model from OWL Ontology
Note: input must be in functional syntax
Options:
-n, --name TEXT Schema name
--help Show this message and exit.
owl2linkml -n prov tests/resources/prov.ofn > prov.yaml
$ rdf2linkml --help
Usage: rdf2linkml [OPTIONS] RDFFILE
Infer a model from RDF instance data
Options:
-d, --dir TEXT [required]
--help Show this message and exit.
$ jsondata2linkml --help
Usage: jsondata2linkml [OPTIONS] INPUT
Infer a model from JSON instance data
Options:
--container-class-name TEXT name of root class
-f, --format TEXT json or yaml (or json.gz or yaml.gz)
--omit-null / --no-omit-null if true, ignore null values
--help Show this message and exit.
$ jsonschema2linkml --help
Usage: jsonschema2linkml [OPTIONS] INPUT
Infer a model from JSON Schema
Options:
-n, --name TEXT ID of schema [required]
-f, --format TEXT JSON Schema format - yaml or json
-o, --output TEXT output path
--help Show this message and exit.
| 0.499512 | 0.946646 |
import json
import os
import logging
from schema_builder.builder_ddl import open_ddl_file, clean_data
from schema_builder.builder_table_list import parse_formatted_table
def build_json_schema(source_type, file=None, data=None, table_name=None):
if source_type == 'ddl':
return schema_from_ddl(file)
elif source_type == 'table':
return schema_from_table(data, table_name)
else:
return "Please enter a valid source type [ddl, table]."
def schema_from_ddl(file):
if file == None:
return "Please enter a valid file path."
raw_table_data = open_ddl_file(file)
clean_table_data = clean_data(raw_table_data)
table_name = clean_table_data[0]
table_dict = create_table_dict(clean_table_data)
json_schema_dict = create_json_schema_dict(table_dict)
return create_json_schema_file(json_schema_dict, table_name)
def schema_from_table(data, table_name):
if data == None:
return "Please provide data from a SQL DESCRIBE FORMATTED query."
if table_name == None:
return "Please provide a table name."
clean_table_data = parse_formatted_table(data, table_name)
table_dict = create_table_dict(clean_table_data)
json_schema_dict = create_json_schema_dict(table_dict)
return create_json_schema_file(json_schema_dict, table_name)
def create_table_dict(data):
table_dict = {}
table_columns = data[1]
for row in table_columns:
data_type = find_data_type(row[1])
table_dict[row[0]] = data_type
return table_dict
def find_data_type(data):
lowercase_data = data.lower()
if 'int' in lowercase_data:
return {"type": ["integer", "null"]}
elif 'bigint' in lowercase_data:
return {"type": ["integer", "null"]}
elif 'decimal' in lowercase_data:
return {"type": ["number", "null"]}
elif 'varchar' in lowercase_data:
return {"type": ["string", "null"]}
elif 'char' in lowercase_data:
return {"type": ["string", "null"]}
elif 'string' in lowercase_data:
return {"type": ["string", "null"]}
elif 'timestamp' in lowercase_data:
return {"type": ["string", "null"]}
elif 'offset_date_time' in lowercase_data:
return {"type": ["string", "null"]}
elif 'date' in lowercase_data:
return {"type": ["string", "null"]}
def create_json_schema_dict(data):
json_schema = {
"type": ["object", "null"],
"properties": data
}
return json_schema
def create_json_schema_file(data, table_name):
json_schema = json.dumps(data, indent=4)
path = os.getcwd()
try:
os.mkdir(f'{path}/json_schemas')
except FileExistsError:
logging.info('/json_schemas directory already exists.')
with open(f"{path}/json_schemas/{table_name}_schema.json", "w") as schema:
schema.write(json_schema)
return f"{table_name}_schema.json created successfully."
|
schema-builder
|
/schema-builder-0.1.3.tar.gz/schema-builder-0.1.3/schema_builder/__init__.py
|
__init__.py
|
import json
import os
import logging
from schema_builder.builder_ddl import open_ddl_file, clean_data
from schema_builder.builder_table_list import parse_formatted_table
def build_json_schema(source_type, file=None, data=None, table_name=None):
if source_type == 'ddl':
return schema_from_ddl(file)
elif source_type == 'table':
return schema_from_table(data, table_name)
else:
return "Please enter a valid source type [ddl, table]."
def schema_from_ddl(file):
if file == None:
return "Please enter a valid file path."
raw_table_data = open_ddl_file(file)
clean_table_data = clean_data(raw_table_data)
table_name = clean_table_data[0]
table_dict = create_table_dict(clean_table_data)
json_schema_dict = create_json_schema_dict(table_dict)
return create_json_schema_file(json_schema_dict, table_name)
def schema_from_table(data, table_name):
if data == None:
return "Please provide data from a SQL DESCRIBE FORMATTED query."
if table_name == None:
return "Please provide a table name."
clean_table_data = parse_formatted_table(data, table_name)
table_dict = create_table_dict(clean_table_data)
json_schema_dict = create_json_schema_dict(table_dict)
return create_json_schema_file(json_schema_dict, table_name)
def create_table_dict(data):
table_dict = {}
table_columns = data[1]
for row in table_columns:
data_type = find_data_type(row[1])
table_dict[row[0]] = data_type
return table_dict
def find_data_type(data):
lowercase_data = data.lower()
if 'int' in lowercase_data:
return {"type": ["integer", "null"]}
elif 'bigint' in lowercase_data:
return {"type": ["integer", "null"]}
elif 'decimal' in lowercase_data:
return {"type": ["number", "null"]}
elif 'varchar' in lowercase_data:
return {"type": ["string", "null"]}
elif 'char' in lowercase_data:
return {"type": ["string", "null"]}
elif 'string' in lowercase_data:
return {"type": ["string", "null"]}
elif 'timestamp' in lowercase_data:
return {"type": ["string", "null"]}
elif 'offset_date_time' in lowercase_data:
return {"type": ["string", "null"]}
elif 'date' in lowercase_data:
return {"type": ["string", "null"]}
def create_json_schema_dict(data):
json_schema = {
"type": ["object", "null"],
"properties": data
}
return json_schema
def create_json_schema_file(data, table_name):
json_schema = json.dumps(data, indent=4)
path = os.getcwd()
try:
os.mkdir(f'{path}/json_schemas')
except FileExistsError:
logging.info('/json_schemas directory already exists.')
with open(f"{path}/json_schemas/{table_name}_schema.json", "w") as schema:
schema.write(json_schema)
return f"{table_name}_schema.json created successfully."
| 0.243642 | 0.229195 |
# SchemaChangeRiskEngine (SCRE):
A tool for assessing the risk of schema changes in a MySQL database when using tools like gh-ost or flywheel.
## The problem
Based on [Impact analysis of database schema changes](https://www.researchgate.net/publication/221555365_Impact_analysis_of_database_schema_changes)
and real world learning. It was found we should restrict riskier changes and patterns.
Such patterns include:
* BLOB & TEXT column overuse and storage/memory waste
* ENUM columns issues with casting during value parsing of a change
* SET columns issues with casting during value parsing of a change
* Foreign Key and Trigger usage preventing non-blocking and non-atomic changes
* No Primary Key causing slow migration or table level locking verses row level locking
* Renaming columns and tables leading toward application, data warehouse, and data lake sync issues
## The solution
This tool addresses this by allowing you to pass any CREATE or ALTER statement, and it will return a boolean if it's safe.
### Example
```python
from schema_change_risk_engine import SchemaChangeRiskEngine as SCRE
engine = SCRE()
changeStatements = [
"""
CREATE TABLE `test` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(255) NOT NULL,
PRIMARY KEY (`id`)
)
ENGINE=InnoDB
DEFAULT CHARSET=utf8mb4
COLLATE=utf8mb4_0900_ai_ci
""",
"ALTER TABLE `test` ADD COLUMN `age` int(11) NOT NULL DEFAULT 0",
"ALTER TABLE `test` RENAME COLUMN `age` to `years_old`",
"ALTER TABLE `test` ADD COLUMN `gener` ENUM('M', 'F','T','NC') NOT NULL DEFAULT 'NC'",
"ALTER TABLE `test` ADD COLUMN `hobbies` SET('S', 'R','T','NC') NOT NULL DEFAULT 'NC'",
"ALTER TABLE `test` ADD COLUMN `bio` TEXT NOT NULL",
"ALTER TABLE `test` ADD COLUMN `photo` BLOB NOT NULL",
"ALTER TABLE `test` ADD COLUMN `order_date` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP",
"ALTER TABLE `test` ADD TRIGGER `test_trigger` AFTER INSERT ON `test` FOR EACH ROW BEGIN INSERT INTO `test` (`name`) VALUES ('test'); END",
"ALTER TABLE `test` ADD FOREIGN KEY (`id2`) REFERENCES `test` (`id`)",
"ALTER TABLE `test` RENAME TO `test2`",
"ALTER TABLE `test` RENAME TABLE `test2` TO `test`"
]
> for idx, change in enumerate(changeStatements):
print("Statement %s: %s" % (idx + 1, engine.validate(change)))
Statement
1: (True, None)
Statement
2: (True, None)
Statement
3: (False, 'Renaming columns is not allowed')
Statement
4: (False, 'ENUM data type is not allowed')
Statement
5: (False, 'SET is not allowed')
Statement
6: (False, 'TEXT columns are not allowed')
Statement
7: (False, 'BLOB columns are not allowed')
Statement
8: (False, 'DATETIME data type is not allowed')
Statement
9: (False, 'Triggers are not allowed')
Statement
10: (False, 'Foreign keys are not allowed')
Statement
11: (False, 'Renaming tables is not allowed')
Statement
12: (False, 'Renaming tables is not allowed')
```
|
schema-change-risk-engine
|
/schema-change-risk-engine-0.0.9.tar.gz/schema-change-risk-engine-0.0.9/README.md
|
README.md
|
from schema_change_risk_engine import SchemaChangeRiskEngine as SCRE
engine = SCRE()
changeStatements = [
"""
CREATE TABLE `test` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(255) NOT NULL,
PRIMARY KEY (`id`)
)
ENGINE=InnoDB
DEFAULT CHARSET=utf8mb4
COLLATE=utf8mb4_0900_ai_ci
""",
"ALTER TABLE `test` ADD COLUMN `age` int(11) NOT NULL DEFAULT 0",
"ALTER TABLE `test` RENAME COLUMN `age` to `years_old`",
"ALTER TABLE `test` ADD COLUMN `gener` ENUM('M', 'F','T','NC') NOT NULL DEFAULT 'NC'",
"ALTER TABLE `test` ADD COLUMN `hobbies` SET('S', 'R','T','NC') NOT NULL DEFAULT 'NC'",
"ALTER TABLE `test` ADD COLUMN `bio` TEXT NOT NULL",
"ALTER TABLE `test` ADD COLUMN `photo` BLOB NOT NULL",
"ALTER TABLE `test` ADD COLUMN `order_date` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP",
"ALTER TABLE `test` ADD TRIGGER `test_trigger` AFTER INSERT ON `test` FOR EACH ROW BEGIN INSERT INTO `test` (`name`) VALUES ('test'); END",
"ALTER TABLE `test` ADD FOREIGN KEY (`id2`) REFERENCES `test` (`id`)",
"ALTER TABLE `test` RENAME TO `test2`",
"ALTER TABLE `test` RENAME TABLE `test2` TO `test`"
]
> for idx, change in enumerate(changeStatements):
print("Statement %s: %s" % (idx + 1, engine.validate(change)))
Statement
1: (True, None)
Statement
2: (True, None)
Statement
3: (False, 'Renaming columns is not allowed')
Statement
4: (False, 'ENUM data type is not allowed')
Statement
5: (False, 'SET is not allowed')
Statement
6: (False, 'TEXT columns are not allowed')
Statement
7: (False, 'BLOB columns are not allowed')
Statement
8: (False, 'DATETIME data type is not allowed')
Statement
9: (False, 'Triggers are not allowed')
Statement
10: (False, 'Foreign keys are not allowed')
Statement
11: (False, 'Renaming tables is not allowed')
Statement
12: (False, 'Renaming tables is not allowed')
| 0.443359 | 0.742538 |
from typing import Any, Dict, NoReturn, TypeVar, Union, Type, Tuple, Callable, Iterable
ObjType = TypeVar('ObjType')
SchemaType = Union[str, Type, Tuple[Type], Dict[Union[str, Type], Any]]
def _get_type(sch: Dict[Union[str, Type], Any]) -> Any:
return sch[type if type in sch else 'type']
def _default(value: Any) -> Any:
return value() if callable(value) else value
def _on_error(schema: SchemaType, msg: Union[str, Exception]) -> NoReturn:
if isinstance(schema, dict):
msg = schema.get('errmsg', msg)
raise ValueError(msg)
def _validate_const_enum(obj: ObjType, schema: Dict[str, Any], schema_type: str, key: str) -> ObjType:
if 'value' not in schema:
_on_error(schema, 'schema for "enum" must contain "value"')
if schema_type == 'enum':
if obj not in schema['value']:
_on_error(schema, '"{}" is not in enum "{}"')
elif obj != schema['value']:
_on_error(schema, '"{}" is not allowed as "{}"'.format(obj, key))
return obj
def _check_dict_key(obj: ObjType, schema: Dict[str, Any], extra: str) -> ObjType:
unex = {i for i in obj if i not in schema['value']}
if unex and not schema.get('unexpected', False):
_on_error(schema, 'Got unexpected keys: "{}" {};'.format('", "'.join([str(i) for i in unex]), extra))
missed = {
i
for i in schema['value']
if i not in obj and (not isinstance(schema['value'][i], dict) or 'default' not in schema['value'][i])
}
if missed:
_on_error(schema, 'expected keys "{}" {}'.format('", "'.join([str(i) for i in missed]), extra))
return {
i: obj[i]
for i in unex
}
def _validate_dicts_value(obj: ObjType, schema: Dict[str, Any], extra: str) -> ObjType:
new_obj = _check_dict_key(obj=obj, schema=schema, extra=extra)
try:
new_obj.update(
{
i: (
_default(schema['value'][i]['default'])
if i not in obj else
_apply(obj=obj[i], schema=schema['value'][i], key=i)
)
for i in schema['value']
}
)
except ValueError as ex:
_on_error(schema, ex)
return new_obj
def _validate_dict(obj: ObjType, schema: Dict[str, Any], extra: str) -> ObjType:
if 'value' in schema:
obj = _validate_dicts_value(obj=obj, schema=schema, extra=extra)
elif 'any_key' in schema:
try:
obj = {i: _apply(obj[i], schema['any_key'], i) for i in obj}
except ValueError as ex:
_on_error(schema, ex)
return obj
def _check_filter(obj: ObjType, func: Union[Callable, Iterable[Callable]]) -> bool:
return all(func(obj) for func in ([func] if callable(func) else func))
def _generic_checks(obj: ObjType, schema: SchemaType, schema_type: Type, extra: str, key: str) -> ObjType:
if not isinstance(obj, schema_type):
_on_error(schema, 'expected type "{}" {} ; got {}'.format(schema_type, extra, type(obj)))
if 'filter' in schema and not _check_filter(obj, schema['filter']):
_on_error(schema, '"{}" not passed filter'.format(key))
if schema.get('blank') is False and not obj:
_on_error(schema, '"{}" is blank'.format(key))
if 'max_length' in schema and len(obj) > schema['max_length']:
_on_error(schema, '"{}" > max_length'.format(key))
if 'min_length' in schema and len(obj) < schema['min_length']:
_on_error(schema, '"{}" < min_length'.format(key))
return obj
def _validate_generic(obj: ObjType, schema: SchemaType, schema_type: Type, key: str, extra: str) -> ObjType:
obj = _generic_checks(obj=obj, schema=schema, schema_type=schema_type, key=key, extra=extra)
if isinstance(schema_type, type) and issubclass(schema_type, (list, tuple)) and 'value' in schema:
try:
obj = schema_type(_apply(i, schema['value'], key=key) for i in obj)
except ValueError as ex:
_on_error(schema, ex)
elif isinstance(schema_type, type) and issubclass(schema_type, dict):
obj = _validate_dict(obj=obj, schema=schema, extra=extra)
return obj
def _validate(obj: ObjType, schema: SchemaType, key: str, extra: str) -> ObjType:
schema_type = _get_type(schema)
if schema_type in {'const', 'enum'}:
return _validate_const_enum(obj=obj, schema=schema, schema_type=schema_type, key=key)
return _validate_generic(obj=obj, schema=schema, schema_type=schema_type, extra=extra, key=key)
def _apply_callable(obj: ObjType, func: Union[Callable, Iterable[Callable]]) -> ObjType:
for func in ([func] if callable(func) else func):
obj = func(obj)
return obj
def _apply(obj: ObjType, schema: SchemaType, key: str) -> ObjType:
extra = ''.join(['for ', key]) if key else ''
if not isinstance(schema, (dict, type, tuple)) and schema not in {'const', 'enum'}:
raise ValueError('schema must be type, dict, tuple or "const"/"enum" {}'.format(extra))
if schema == 'const':
return obj
if isinstance(schema, (type, tuple)):
if isinstance(obj, schema):
return obj
raise ValueError('"{}" is not type of "{}" {}'.format(obj, schema, extra))
if 'pre_call' in schema:
obj = _apply_callable(obj, schema['pre_call'])
obj = _validate(obj=obj, schema=schema, key=key, extra=extra)
if 'post_call' in schema:
obj = _apply_callable(obj, schema['post_call'])
return obj
def validate(obj: ObjType, schema: SchemaType) -> ObjType:
"""
obj - some object
schema - schema_checker
schema ::= type of this object : list/dict/str/int/float (can be tuple of types) or "const"/"enum"
OR
schema ::= dict - {
type : type of this object : "list/tuple/dict/str/int/float or "const"
"value" : need for obj type of
- list/tuple - is schema for all elements in list
- dict - dict[key -> schema]
- const - some value to be compared with using method
- enum - list/set/dict/tuple to check if obj __contains__ in "value"
"any_key" : need for obj type of dict - schema for all keys (ignores if value is set)
"default" : default value if this object does not exists (if callable will be called)
"filter" : any of
- Callable[value -> bool] - if false then raise error
- Iterable[Callable[value -> bool]] - if any of them return false then raise error
"pre_call" : any of
- Callable[value -> value] - will be called before checking type and call filter's functions
- Iterable[Callable[value -> value]] - will call all of them
"post_call" : any of
- Callable[value -> value] - will be called after checking type and call filter's functions
- Iterable[Callable[value -> value]] - will call all of them
"blank" : raise error if value is blank
"max_length" : extra check of length (len)
"min_length" : extra check of length (len)
"unexpected" : allow unexpected keys (for dict)
"errmsg" : will be in ValueError in case of error on this level
}
"""
return _apply(obj, schema, 'Top-level')
|
schema-checker
|
/schema_checker-1.1.1.tar.gz/schema_checker-1.1.1/schema_checker/jschema.py
|
jschema.py
|
from typing import Any, Dict, NoReturn, TypeVar, Union, Type, Tuple, Callable, Iterable
ObjType = TypeVar('ObjType')
SchemaType = Union[str, Type, Tuple[Type], Dict[Union[str, Type], Any]]
def _get_type(sch: Dict[Union[str, Type], Any]) -> Any:
return sch[type if type in sch else 'type']
def _default(value: Any) -> Any:
return value() if callable(value) else value
def _on_error(schema: SchemaType, msg: Union[str, Exception]) -> NoReturn:
if isinstance(schema, dict):
msg = schema.get('errmsg', msg)
raise ValueError(msg)
def _validate_const_enum(obj: ObjType, schema: Dict[str, Any], schema_type: str, key: str) -> ObjType:
if 'value' not in schema:
_on_error(schema, 'schema for "enum" must contain "value"')
if schema_type == 'enum':
if obj not in schema['value']:
_on_error(schema, '"{}" is not in enum "{}"')
elif obj != schema['value']:
_on_error(schema, '"{}" is not allowed as "{}"'.format(obj, key))
return obj
def _check_dict_key(obj: ObjType, schema: Dict[str, Any], extra: str) -> ObjType:
unex = {i for i in obj if i not in schema['value']}
if unex and not schema.get('unexpected', False):
_on_error(schema, 'Got unexpected keys: "{}" {};'.format('", "'.join([str(i) for i in unex]), extra))
missed = {
i
for i in schema['value']
if i not in obj and (not isinstance(schema['value'][i], dict) or 'default' not in schema['value'][i])
}
if missed:
_on_error(schema, 'expected keys "{}" {}'.format('", "'.join([str(i) for i in missed]), extra))
return {
i: obj[i]
for i in unex
}
def _validate_dicts_value(obj: ObjType, schema: Dict[str, Any], extra: str) -> ObjType:
new_obj = _check_dict_key(obj=obj, schema=schema, extra=extra)
try:
new_obj.update(
{
i: (
_default(schema['value'][i]['default'])
if i not in obj else
_apply(obj=obj[i], schema=schema['value'][i], key=i)
)
for i in schema['value']
}
)
except ValueError as ex:
_on_error(schema, ex)
return new_obj
def _validate_dict(obj: ObjType, schema: Dict[str, Any], extra: str) -> ObjType:
if 'value' in schema:
obj = _validate_dicts_value(obj=obj, schema=schema, extra=extra)
elif 'any_key' in schema:
try:
obj = {i: _apply(obj[i], schema['any_key'], i) for i in obj}
except ValueError as ex:
_on_error(schema, ex)
return obj
def _check_filter(obj: ObjType, func: Union[Callable, Iterable[Callable]]) -> bool:
return all(func(obj) for func in ([func] if callable(func) else func))
def _generic_checks(obj: ObjType, schema: SchemaType, schema_type: Type, extra: str, key: str) -> ObjType:
if not isinstance(obj, schema_type):
_on_error(schema, 'expected type "{}" {} ; got {}'.format(schema_type, extra, type(obj)))
if 'filter' in schema and not _check_filter(obj, schema['filter']):
_on_error(schema, '"{}" not passed filter'.format(key))
if schema.get('blank') is False and not obj:
_on_error(schema, '"{}" is blank'.format(key))
if 'max_length' in schema and len(obj) > schema['max_length']:
_on_error(schema, '"{}" > max_length'.format(key))
if 'min_length' in schema and len(obj) < schema['min_length']:
_on_error(schema, '"{}" < min_length'.format(key))
return obj
def _validate_generic(obj: ObjType, schema: SchemaType, schema_type: Type, key: str, extra: str) -> ObjType:
obj = _generic_checks(obj=obj, schema=schema, schema_type=schema_type, key=key, extra=extra)
if isinstance(schema_type, type) and issubclass(schema_type, (list, tuple)) and 'value' in schema:
try:
obj = schema_type(_apply(i, schema['value'], key=key) for i in obj)
except ValueError as ex:
_on_error(schema, ex)
elif isinstance(schema_type, type) and issubclass(schema_type, dict):
obj = _validate_dict(obj=obj, schema=schema, extra=extra)
return obj
def _validate(obj: ObjType, schema: SchemaType, key: str, extra: str) -> ObjType:
schema_type = _get_type(schema)
if schema_type in {'const', 'enum'}:
return _validate_const_enum(obj=obj, schema=schema, schema_type=schema_type, key=key)
return _validate_generic(obj=obj, schema=schema, schema_type=schema_type, extra=extra, key=key)
def _apply_callable(obj: ObjType, func: Union[Callable, Iterable[Callable]]) -> ObjType:
for func in ([func] if callable(func) else func):
obj = func(obj)
return obj
def _apply(obj: ObjType, schema: SchemaType, key: str) -> ObjType:
extra = ''.join(['for ', key]) if key else ''
if not isinstance(schema, (dict, type, tuple)) and schema not in {'const', 'enum'}:
raise ValueError('schema must be type, dict, tuple or "const"/"enum" {}'.format(extra))
if schema == 'const':
return obj
if isinstance(schema, (type, tuple)):
if isinstance(obj, schema):
return obj
raise ValueError('"{}" is not type of "{}" {}'.format(obj, schema, extra))
if 'pre_call' in schema:
obj = _apply_callable(obj, schema['pre_call'])
obj = _validate(obj=obj, schema=schema, key=key, extra=extra)
if 'post_call' in schema:
obj = _apply_callable(obj, schema['post_call'])
return obj
def validate(obj: ObjType, schema: SchemaType) -> ObjType:
"""
obj - some object
schema - schema_checker
schema ::= type of this object : list/dict/str/int/float (can be tuple of types) or "const"/"enum"
OR
schema ::= dict - {
type : type of this object : "list/tuple/dict/str/int/float or "const"
"value" : need for obj type of
- list/tuple - is schema for all elements in list
- dict - dict[key -> schema]
- const - some value to be compared with using method
- enum - list/set/dict/tuple to check if obj __contains__ in "value"
"any_key" : need for obj type of dict - schema for all keys (ignores if value is set)
"default" : default value if this object does not exists (if callable will be called)
"filter" : any of
- Callable[value -> bool] - if false then raise error
- Iterable[Callable[value -> bool]] - if any of them return false then raise error
"pre_call" : any of
- Callable[value -> value] - will be called before checking type and call filter's functions
- Iterable[Callable[value -> value]] - will call all of them
"post_call" : any of
- Callable[value -> value] - will be called after checking type and call filter's functions
- Iterable[Callable[value -> value]] - will call all of them
"blank" : raise error if value is blank
"max_length" : extra check of length (len)
"min_length" : extra check of length (len)
"unexpected" : allow unexpected keys (for dict)
"errmsg" : will be in ValueError in case of error on this level
}
"""
return _apply(obj, schema, 'Top-level')
| 0.629661 | 0.227491 |
from typing import List
from pprint import pformat
from baseblock import Stopwatch
from baseblock import BaseObject
from schema_classification.dto import NormalizedSchema
from schema_classification.dto import MappingResultDict
from schema_classification.dmo import FilterIncludeAllOf
from schema_classification.dmo import FilterExcludeOneOf
from schema_classification.dmo import FilterExcludeAllOf
from schema_classification.dmo import FilterIncludeOneOf
from schema_classification.dmo import FilterStartsWith
class FilterMapping(BaseObject):
""" Filter all Invalid Mapping """
def __init__(self,
d_index: NormalizedSchema):
""" Initialize Service
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/169
Updated:
8-Jun-2022
[email protected]
* read schema in-memory
https://github.com/grafflr/deepnlu/issues/45
Updated:
30-Nov-2022
[email protected]
* use list-of-str for input tokens rather than mapped dict
https://github.com/craigtrim/schema-classification/issues/3
* rename from 'predict-mapping'
https://github.com/craigtrim/schema-classification/issues/4
Args:
d_index (dict): the indexed schema
"""
BaseObject.__init__(self, __name__)
self._include_one_of = FilterIncludeOneOf(d_index).process
self._include_all_of = FilterIncludeAllOf(d_index).process
self._exclude_one_of = FilterExcludeOneOf(d_index).process
self._exclude_all_of = FilterExcludeAllOf(d_index).process
self._startswith = FilterStartsWith(d_index).process
def _process(self,
input_tokens: List[str]) -> MappingResultDict:
m_include_oneof = self._include_one_of(input_tokens)
m_include_allof = self._include_all_of(input_tokens)
m_exclude_oneof = self._exclude_one_of(input_tokens)
m_exclude_allof = self._exclude_all_of(input_tokens)
m_startswith = self._startswith(input_tokens)
return {
'include_one_of': m_include_oneof,
'include_all_of': m_include_allof,
'exclude_one_of': m_exclude_oneof,
'exclude_all_of': m_exclude_allof,
'startswith': m_startswith,
}
def process(self,
input_tokens: List[str]) -> MappingResultDict:
sw = Stopwatch()
results = self._process(input_tokens)
if self.isEnabledForInfo:
self.logger.info('\n'.join([
'Mapping Prediction Completed',
f'\tTotal Time: {str(sw)}',
f'\tTotal Results: {len(results)}']))
if self.isEnabledForDebug and len(results):
self.logger.debug('\n'.join([
'Mapping Prediction Results',
f'{pformat(results)}']))
return results
|
schema-classification
|
/schema_classification-0.1.8-py3-none-any.whl/schema_classification/svc/filter_mapping.py
|
filter_mapping.py
|
from typing import List
from pprint import pformat
from baseblock import Stopwatch
from baseblock import BaseObject
from schema_classification.dto import NormalizedSchema
from schema_classification.dto import MappingResultDict
from schema_classification.dmo import FilterIncludeAllOf
from schema_classification.dmo import FilterExcludeOneOf
from schema_classification.dmo import FilterExcludeAllOf
from schema_classification.dmo import FilterIncludeOneOf
from schema_classification.dmo import FilterStartsWith
class FilterMapping(BaseObject):
""" Filter all Invalid Mapping """
def __init__(self,
d_index: NormalizedSchema):
""" Initialize Service
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/169
Updated:
8-Jun-2022
[email protected]
* read schema in-memory
https://github.com/grafflr/deepnlu/issues/45
Updated:
30-Nov-2022
[email protected]
* use list-of-str for input tokens rather than mapped dict
https://github.com/craigtrim/schema-classification/issues/3
* rename from 'predict-mapping'
https://github.com/craigtrim/schema-classification/issues/4
Args:
d_index (dict): the indexed schema
"""
BaseObject.__init__(self, __name__)
self._include_one_of = FilterIncludeOneOf(d_index).process
self._include_all_of = FilterIncludeAllOf(d_index).process
self._exclude_one_of = FilterExcludeOneOf(d_index).process
self._exclude_all_of = FilterExcludeAllOf(d_index).process
self._startswith = FilterStartsWith(d_index).process
def _process(self,
input_tokens: List[str]) -> MappingResultDict:
m_include_oneof = self._include_one_of(input_tokens)
m_include_allof = self._include_all_of(input_tokens)
m_exclude_oneof = self._exclude_one_of(input_tokens)
m_exclude_allof = self._exclude_all_of(input_tokens)
m_startswith = self._startswith(input_tokens)
return {
'include_one_of': m_include_oneof,
'include_all_of': m_include_allof,
'exclude_one_of': m_exclude_oneof,
'exclude_all_of': m_exclude_allof,
'startswith': m_startswith,
}
def process(self,
input_tokens: List[str]) -> MappingResultDict:
sw = Stopwatch()
results = self._process(input_tokens)
if self.isEnabledForInfo:
self.logger.info('\n'.join([
'Mapping Prediction Completed',
f'\tTotal Time: {str(sw)}',
f'\tTotal Results: {len(results)}']))
if self.isEnabledForDebug and len(results):
self.logger.debug('\n'.join([
'Mapping Prediction Results',
f'{pformat(results)}']))
return results
| 0.813313 | 0.242329 |
from typing import List
from typing import Dict
from typing import Any
from pprint import pprint
from collections import defaultdict
from baseblock import BaseObject
from schema_classification.dto import ListOfDicts
from schema_classification.dto import NormalizedSchema
from schema_classification.dto import MappingResultDict
class SelectMapping(BaseObject):
""" Choose the Best Mapping """
def __init__(self,
d_filter: dict,
d_index: dict):
""" Change Log
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/169
Updated:
8-Jun-2022
[email protected]
* eliminate callback and pass d-index in pursuit of
https://github.com/grafflr/deepnlu/issues/45
Updated:
30-Nov-2022
[email protected]
* pass d-filter instead of 'mapping'
https://github.com/grafflr/deepnlu/issues/45
:param d_filter:
relevant section of mapping ruleset
:param d_index:
callback to scoring method
"""
BaseObject.__init__(self, __name__)
self._d_filter = d_filter
self._d_index = d_index
def _invalid_names(self) -> set:
""" Join Include and Exclude results to find Candidate Mappings """
invalid_names = set()
for include_key in ['include_one_of', 'include_all_of', 'startswith']:
[invalid_names.add(x) for x in self._d_filter[include_key]]
for exclude_key in ['exclude_one_of', 'exclude_all_of']:
[invalid_names.add(x) for x in self._d_filter[exclude_key]]
return invalid_names
def process(self) -> Dict:
invalid_names = self._invalid_names()
d_mapping = self._d_index['mapping']
d_mapping = {
k: d_mapping[k]
for k in d_mapping if k not in invalid_names
}
d_by_score = defaultdict(list)
for classification in d_mapping:
def get_score() -> float:
if 'score' not in d_mapping[classification]:
return 100.0
return 100 + d_mapping[classification]['score']
d_by_score[get_score()].append(classification)
if not len(d_by_score):
return {
'classification': None,
'score': None,
}
max_score = max(d_by_score)
def cleanse() -> str:
max_classification = sorted(d_by_score[max_score])[0]
if '#' in max_classification:
max_classification = max_classification.split('#')[0].strip()
return max_classification
def bounded_score() -> float:
if max_score > 100.0:
return 100.0
if max_score < 0.0:
return 0.0
return max_score
return {
'classification': cleanse(),
'score': bounded_score()
}
|
schema-classification
|
/schema_classification-0.1.8-py3-none-any.whl/schema_classification/svc/select_mapping.py
|
select_mapping.py
|
from typing import List
from typing import Dict
from typing import Any
from pprint import pprint
from collections import defaultdict
from baseblock import BaseObject
from schema_classification.dto import ListOfDicts
from schema_classification.dto import NormalizedSchema
from schema_classification.dto import MappingResultDict
class SelectMapping(BaseObject):
""" Choose the Best Mapping """
def __init__(self,
d_filter: dict,
d_index: dict):
""" Change Log
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/169
Updated:
8-Jun-2022
[email protected]
* eliminate callback and pass d-index in pursuit of
https://github.com/grafflr/deepnlu/issues/45
Updated:
30-Nov-2022
[email protected]
* pass d-filter instead of 'mapping'
https://github.com/grafflr/deepnlu/issues/45
:param d_filter:
relevant section of mapping ruleset
:param d_index:
callback to scoring method
"""
BaseObject.__init__(self, __name__)
self._d_filter = d_filter
self._d_index = d_index
def _invalid_names(self) -> set:
""" Join Include and Exclude results to find Candidate Mappings """
invalid_names = set()
for include_key in ['include_one_of', 'include_all_of', 'startswith']:
[invalid_names.add(x) for x in self._d_filter[include_key]]
for exclude_key in ['exclude_one_of', 'exclude_all_of']:
[invalid_names.add(x) for x in self._d_filter[exclude_key]]
return invalid_names
def process(self) -> Dict:
invalid_names = self._invalid_names()
d_mapping = self._d_index['mapping']
d_mapping = {
k: d_mapping[k]
for k in d_mapping if k not in invalid_names
}
d_by_score = defaultdict(list)
for classification in d_mapping:
def get_score() -> float:
if 'score' not in d_mapping[classification]:
return 100.0
return 100 + d_mapping[classification]['score']
d_by_score[get_score()].append(classification)
if not len(d_by_score):
return {
'classification': None,
'score': None,
}
max_score = max(d_by_score)
def cleanse() -> str:
max_classification = sorted(d_by_score[max_score])[0]
if '#' in max_classification:
max_classification = max_classification.split('#')[0].strip()
return max_classification
def bounded_score() -> float:
if max_score > 100.0:
return 100.0
if max_score < 0.0:
return 0.0
return max_score
return {
'classification': cleanse(),
'score': bounded_score()
}
| 0.858896 | 0.316818 |
from typing import Dict
from baseblock import FileIO
from baseblock import BaseObject
from schema_classification.dmo import IndexScoring
from schema_classification.dmo import IndexExcludeAllOf
from schema_classification.dmo import IndexExcludeOneOf
from schema_classification.dmo import IndexIncludeAllOf
from schema_classification.dmo import IndexIncludeOneOf
from schema_classification.dmo import IndexStartsWith
from schema_classification.dto import RawSchema
from schema_classification.dto import NormalizedSchema
class ReadMapping(BaseObject):
""" Build an in-memory Index over a Dictionary of Classifications """
def __init__(self,
d_schema: Dict):
""" Initialize Manifest Indicer
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/167
Updated:
8-Jun-2022
[email protected]
* read schema in-memory
https://github.com/grafflr/deepnlu/issues/45
Updated:
26-Jul-2022
[email protected]
* remove 'schema-name' and 'absolute-path' as parameters, and instead
pass the full absolute path of a schema file, in pursuit of
https://bast-ai.atlassian.net/browse/COR-12
Updated:
26-Sept-2022
[email protected]
* pass in d-schema as dict rather than a filepath
https://github.com/craigtrim/schema-classification/issues/1
Args:
d_schema (Dict): the schema JSON
"""
BaseObject.__init__(self, __name__)
self._d_index = self._create_index(d_schema)
def _create_index(self,
d_schema: RawSchema) -> NormalizedSchema:
""" Create Index
Args:
d_schema (dict): _description_
Sample Input:
{
'Favorite_Animal_Response#1': [
{
'include_all_of': ['favorite', 'animal']
}
]
}
Raises:
ValueError: _description_
Returns:
InMemoryIndex: _description_
"""
return {
'scoring': IndexScoring(d_schema).process(),
'include_one_of': IndexIncludeOneOf(d_schema).process(),
'include_all_of': IndexIncludeAllOf(d_schema).process(),
'exclude_one_of': IndexExcludeOneOf(d_schema).process(),
'exclude_all_of': IndexExcludeAllOf(d_schema).process(),
'startswith': IndexStartsWith(d_schema).process(),
'mapping': d_schema,
}
def index(self) -> NormalizedSchema:
return self._d_index
|
schema-classification
|
/schema_classification-0.1.8-py3-none-any.whl/schema_classification/svc/read_mapping.py
|
read_mapping.py
|
from typing import Dict
from baseblock import FileIO
from baseblock import BaseObject
from schema_classification.dmo import IndexScoring
from schema_classification.dmo import IndexExcludeAllOf
from schema_classification.dmo import IndexExcludeOneOf
from schema_classification.dmo import IndexIncludeAllOf
from schema_classification.dmo import IndexIncludeOneOf
from schema_classification.dmo import IndexStartsWith
from schema_classification.dto import RawSchema
from schema_classification.dto import NormalizedSchema
class ReadMapping(BaseObject):
""" Build an in-memory Index over a Dictionary of Classifications """
def __init__(self,
d_schema: Dict):
""" Initialize Manifest Indicer
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/167
Updated:
8-Jun-2022
[email protected]
* read schema in-memory
https://github.com/grafflr/deepnlu/issues/45
Updated:
26-Jul-2022
[email protected]
* remove 'schema-name' and 'absolute-path' as parameters, and instead
pass the full absolute path of a schema file, in pursuit of
https://bast-ai.atlassian.net/browse/COR-12
Updated:
26-Sept-2022
[email protected]
* pass in d-schema as dict rather than a filepath
https://github.com/craigtrim/schema-classification/issues/1
Args:
d_schema (Dict): the schema JSON
"""
BaseObject.__init__(self, __name__)
self._d_index = self._create_index(d_schema)
def _create_index(self,
d_schema: RawSchema) -> NormalizedSchema:
""" Create Index
Args:
d_schema (dict): _description_
Sample Input:
{
'Favorite_Animal_Response#1': [
{
'include_all_of': ['favorite', 'animal']
}
]
}
Raises:
ValueError: _description_
Returns:
InMemoryIndex: _description_
"""
return {
'scoring': IndexScoring(d_schema).process(),
'include_one_of': IndexIncludeOneOf(d_schema).process(),
'include_all_of': IndexIncludeAllOf(d_schema).process(),
'exclude_one_of': IndexExcludeOneOf(d_schema).process(),
'exclude_all_of': IndexExcludeAllOf(d_schema).process(),
'startswith': IndexStartsWith(d_schema).process(),
'mapping': d_schema,
}
def index(self) -> NormalizedSchema:
return self._d_index
| 0.85814 | 0.214301 |
from pprint import pprint
from pprint import pformat
from typing import List
from typing import Dict
from baseblock import Stopwatch
from baseblock import BaseObject
from schema_classification.svc import ReadMapping
from schema_classification.svc import FilterMapping
from schema_classification.svc import SelectMapping
from schema_classification.dto import ServiceEvent
class SchemaOrchestrator(BaseObject):
""" Portendo performs Predictive Classification of deepNLU parsed ASTs
This Orchestration sequence requires a pre-written schema for classification
Pre-written schemas are more complex and are capable of nuanced classification
"""
def __init__(self,
d_schema: Dict):
"""Initialize Portendo API
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/169
Updated:
8-Jun-2022
[email protected]
* make absolute_path a required parameter in pursuit of
https://github.com/grafflr/deepnlu/issues/44
* read classifications from memory (not python files)
https://github.com/grafflr/deepnlu/issues/45
Updated:
13-Jul-2022
[email protected]
* renamed from 'portendo' in pursuit of
https://github.com/grafflr/deepnlu/issues/48
Updated:
26-Jul-2022
[email protected]
* remove 'schema-name' and 'absolute-path' as parameters, and instead
pass the full absolute path of a schema file, in pursuit of
https://bast-ai.atlassian.net/browse/COR-12
* document the schema-file to schema-name mapping convention
https://bast-ai.atlassian.net/browse/COR-13
Updated:
26-Sept-2022
[email protected]
* pass in d-schema as dict rather than a filepath
https://github.com/craigtrim/schema-classification/issues/1
Updated:
30-Nov-2022
[email protected]
* use list-of-str for input tokens rather than mapped dict
https://github.com/craigtrim/schema-classification/issues/3
Args:
d_schema (Dict): the schema JSON
"""
BaseObject.__init__(self, __name__)
self._d_index = ReadMapping(d_schema).index()
def _run(self,
input_tokens: List[str]) -> ServiceEvent:
# this output dictionary are the classifications that are still valid
d_filter = FilterMapping(
self._d_index).process(input_tokens)
mapping = SelectMapping(
d_filter=d_filter,
d_index=self._d_index).process()
if self.isEnabledForDebug:
self.logger.debug('\n'.join([
'Mapping Completed',
f'\tInput:\n{pformat(input_tokens)}',
f'\tOutput:\n{pformat(mapping)}']))
if not len(mapping):
return {
'result': None,
'tokens': input_tokens
}
return {
'result': mapping,
'tokens': input_tokens
}
def run(self,
input_tokens: List[str]) -> ServiceEvent:
""" Run the Schema Orchestrator on Input Tokens
Args:
input_tokens (list): a flat list of tokens extracted from text
Sample Input:
['network_topology', 'user', 'customer']
Returns:
tuple: the service result
"""
sw = Stopwatch()
svcresult = self._run(input_tokens)
self.logger.info('\n'.join([
'Portendo Schema Orchestrator Completed',
f'\tTotal Time: {str(sw)}',
f'\tResult:\n{pformat(svcresult)}']))
return svcresult
|
schema-classification
|
/schema_classification-0.1.8-py3-none-any.whl/schema_classification/bp/schema_orchestrator.py
|
schema_orchestrator.py
|
from pprint import pprint
from pprint import pformat
from typing import List
from typing import Dict
from baseblock import Stopwatch
from baseblock import BaseObject
from schema_classification.svc import ReadMapping
from schema_classification.svc import FilterMapping
from schema_classification.svc import SelectMapping
from schema_classification.dto import ServiceEvent
class SchemaOrchestrator(BaseObject):
""" Portendo performs Predictive Classification of deepNLU parsed ASTs
This Orchestration sequence requires a pre-written schema for classification
Pre-written schemas are more complex and are capable of nuanced classification
"""
def __init__(self,
d_schema: Dict):
"""Initialize Portendo API
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/169
Updated:
8-Jun-2022
[email protected]
* make absolute_path a required parameter in pursuit of
https://github.com/grafflr/deepnlu/issues/44
* read classifications from memory (not python files)
https://github.com/grafflr/deepnlu/issues/45
Updated:
13-Jul-2022
[email protected]
* renamed from 'portendo' in pursuit of
https://github.com/grafflr/deepnlu/issues/48
Updated:
26-Jul-2022
[email protected]
* remove 'schema-name' and 'absolute-path' as parameters, and instead
pass the full absolute path of a schema file, in pursuit of
https://bast-ai.atlassian.net/browse/COR-12
* document the schema-file to schema-name mapping convention
https://bast-ai.atlassian.net/browse/COR-13
Updated:
26-Sept-2022
[email protected]
* pass in d-schema as dict rather than a filepath
https://github.com/craigtrim/schema-classification/issues/1
Updated:
30-Nov-2022
[email protected]
* use list-of-str for input tokens rather than mapped dict
https://github.com/craigtrim/schema-classification/issues/3
Args:
d_schema (Dict): the schema JSON
"""
BaseObject.__init__(self, __name__)
self._d_index = ReadMapping(d_schema).index()
def _run(self,
input_tokens: List[str]) -> ServiceEvent:
# this output dictionary are the classifications that are still valid
d_filter = FilterMapping(
self._d_index).process(input_tokens)
mapping = SelectMapping(
d_filter=d_filter,
d_index=self._d_index).process()
if self.isEnabledForDebug:
self.logger.debug('\n'.join([
'Mapping Completed',
f'\tInput:\n{pformat(input_tokens)}',
f'\tOutput:\n{pformat(mapping)}']))
if not len(mapping):
return {
'result': None,
'tokens': input_tokens
}
return {
'result': mapping,
'tokens': input_tokens
}
def run(self,
input_tokens: List[str]) -> ServiceEvent:
""" Run the Schema Orchestrator on Input Tokens
Args:
input_tokens (list): a flat list of tokens extracted from text
Sample Input:
['network_topology', 'user', 'customer']
Returns:
tuple: the service result
"""
sw = Stopwatch()
svcresult = self._run(input_tokens)
self.logger.info('\n'.join([
'Portendo Schema Orchestrator Completed',
f'\tTotal Time: {str(sw)}',
f'\tResult:\n{pformat(svcresult)}']))
return svcresult
| 0.841468 | 0.354685 |
from typing import Dict
from baseblock import BaseObject
from schema_classification.dto import Markers
from schema_classification.dto import MappingResult
class ConfidenceExcludeAllOf(BaseObject):
""" Determine Confidence Level for Selected Mapping """
def __init__(self,
mapping: Dict,
markers: Markers,
result: MappingResult):
"""
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/169
:param d_include_oneof:
relevant section of mapping ruleset
"""
BaseObject.__init__(self, __name__)
self._result = result
self._mapping = mapping
self._markers = markers
def _excludeall(self,
confidence: float,
mapping: dict) -> float:
# but do any of these exclusions exist in the mapping?
exclusions = set(mapping['exclude_all_of'])
markers = set(self._markers.keys())
# let's multiply each exclusion by N and deduct from the confidence
total_matches = len(exclusions.intersection(markers))
ratio = round((total_matches / len(markers)) * 100, 0)
if ratio > 80:
confidence -= 8
elif ratio > 60:
confidence -= 16
elif ratio > 40:
confidence -= 32
elif ratio > 20:
confidence -= 64
elif ratio > 0:
confidence -= 90
else:
confidence -= 99
self.logger.debug('\n'.join([
'Exclude All Of Confidence',
f'\tExclusions ({len(exclusions)}): {exclusions}',
f'\tMarkers ({len(markers)}): {markers}',
f'\tMatches: {total_matches}',
f'\tRatio: {ratio}']))
return confidence
def process(self) -> float:
confidence = self._result['confidence']
mappings = self._mapping[self._result['classification']]
# at this point, we know the exclusions rule did not apply
for mapping in mappings:
if 'exclude_all_of' in mapping:
confidence = self._excludeall(mapping=mapping,
confidence=confidence)
return confidence
|
schema-classification
|
/schema_classification-0.1.8-py3-none-any.whl/schema_classification/dmo/confidence_exclude_allof.py
|
confidence_exclude_allof.py
|
from typing import Dict
from baseblock import BaseObject
from schema_classification.dto import Markers
from schema_classification.dto import MappingResult
class ConfidenceExcludeAllOf(BaseObject):
""" Determine Confidence Level for Selected Mapping """
def __init__(self,
mapping: Dict,
markers: Markers,
result: MappingResult):
"""
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/169
:param d_include_oneof:
relevant section of mapping ruleset
"""
BaseObject.__init__(self, __name__)
self._result = result
self._mapping = mapping
self._markers = markers
def _excludeall(self,
confidence: float,
mapping: dict) -> float:
# but do any of these exclusions exist in the mapping?
exclusions = set(mapping['exclude_all_of'])
markers = set(self._markers.keys())
# let's multiply each exclusion by N and deduct from the confidence
total_matches = len(exclusions.intersection(markers))
ratio = round((total_matches / len(markers)) * 100, 0)
if ratio > 80:
confidence -= 8
elif ratio > 60:
confidence -= 16
elif ratio > 40:
confidence -= 32
elif ratio > 20:
confidence -= 64
elif ratio > 0:
confidence -= 90
else:
confidence -= 99
self.logger.debug('\n'.join([
'Exclude All Of Confidence',
f'\tExclusions ({len(exclusions)}): {exclusions}',
f'\tMarkers ({len(markers)}): {markers}',
f'\tMatches: {total_matches}',
f'\tRatio: {ratio}']))
return confidence
def process(self) -> float:
confidence = self._result['confidence']
mappings = self._mapping[self._result['classification']]
# at this point, we know the exclusions rule did not apply
for mapping in mappings:
if 'exclude_all_of' in mapping:
confidence = self._excludeall(mapping=mapping,
confidence=confidence)
return confidence
| 0.902588 | 0.282468 |
from typing import Set
from typing import List
from typing import Dict
from pprint import pformat
from baseblock import Stopwatch
from baseblock import BaseObject
class FilterStartsWith(BaseObject):
""" Check if Input Text Starts with Value
Reference:
https://github.com/grafflr/graffl-core/issues/264#issuecomment-1089413865
"""
def __init__(self,
d_index: Dict):
""" Change Log
Created:
5-Apr-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/264
Updated:
8-Jun-2022
[email protected]
* read schema in-memory
https://github.com/grafflr/deepnlu/issues/45
Updated:
30-Nov-2022
[email protected]
* renamed from 'computer-startswith' and basically rewrite from scratch
https://github.com/craigtrim/schema-classification/issues/4
Args:
d_index (dict): the in-memory schema
"""
BaseObject.__init__(self, __name__)
self._mapping = d_index['mapping']
self._d_startswith = d_index['startswith']
def _coverage(self,
weight: int,
mapping_name: str) -> float:
""" Determine the Coverage """
d_mapping = self._mapping[mapping_name][0]['include_one_of']
total_markers = len(d_mapping)
return round(weight / total_markers, 2)
def process(self,
input_tokens: List) -> Set:
sw = Stopwatch()
d_results = {}
input_text = ' '.join(input_tokens).lower().strip()
for phrase in self._d_startswith:
if input_text.startswith(phrase.lower()):
for mapping in self._d_startswith[phrase]:
d_results[mapping] = {'weight': 100.0, 'coverage': 100.0}
if self.isEnabledForDebug and len(d_results):
self.logger.debug('\n'.join([
'StartsWith Results:',
f'\tTotal Time: {str(sw)}',
f'\t{pformat(d_results)}']))
return d_results
|
schema-classification
|
/schema_classification-0.1.8-py3-none-any.whl/schema_classification/dmo/filter_startswith.py
|
filter_startswith.py
|
from typing import Set
from typing import List
from typing import Dict
from pprint import pformat
from baseblock import Stopwatch
from baseblock import BaseObject
class FilterStartsWith(BaseObject):
""" Check if Input Text Starts with Value
Reference:
https://github.com/grafflr/graffl-core/issues/264#issuecomment-1089413865
"""
def __init__(self,
d_index: Dict):
""" Change Log
Created:
5-Apr-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/264
Updated:
8-Jun-2022
[email protected]
* read schema in-memory
https://github.com/grafflr/deepnlu/issues/45
Updated:
30-Nov-2022
[email protected]
* renamed from 'computer-startswith' and basically rewrite from scratch
https://github.com/craigtrim/schema-classification/issues/4
Args:
d_index (dict): the in-memory schema
"""
BaseObject.__init__(self, __name__)
self._mapping = d_index['mapping']
self._d_startswith = d_index['startswith']
def _coverage(self,
weight: int,
mapping_name: str) -> float:
""" Determine the Coverage """
d_mapping = self._mapping[mapping_name][0]['include_one_of']
total_markers = len(d_mapping)
return round(weight / total_markers, 2)
def process(self,
input_tokens: List) -> Set:
sw = Stopwatch()
d_results = {}
input_text = ' '.join(input_tokens).lower().strip()
for phrase in self._d_startswith:
if input_text.startswith(phrase.lower()):
for mapping in self._d_startswith[phrase]:
d_results[mapping] = {'weight': 100.0, 'coverage': 100.0}
if self.isEnabledForDebug and len(d_results):
self.logger.debug('\n'.join([
'StartsWith Results:',
f'\tTotal Time: {str(sw)}',
f'\t{pformat(d_results)}']))
return d_results
| 0.849222 | 0.297866 |
from typing import List
from typing import Dict
from pprint import pprint
from baseblock import Stopwatch
from baseblock import BaseObject
from schema_classification.dto import NormalizedSchema
class FilterExcludeAllOf(BaseObject):
""" Filter Classifications using EXCLUDE_ALL_OF Rulesets
- Remove invalid classifications using the 'exclude-all-of' criteria.
- This component returns valid candidates only.
Implementation:
EXCLUDE_ALL_OF has some important nuances that differentiate it from EXCLUDE_ONE_OF
Reference: https://github.com/craigtrim/schema-classification/issues/5
"""
def __init__(self,
d_index: NormalizedSchema):
""" Change Log
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/169
Updated:
8-Jun-2022
[email protected]
* read schema in-memory
https://github.com/grafflr/deepnlu/issues/45
Updated:
30-Nov-2022
[email protected]
* use list-of-str for input tokens rather than mapped dict
https://github.com/craigtrim/schema-classification/issues/3
* renamed from 'computer-exclude-one-of' and basically rewrite from scratch
https://github.com/craigtrim/schema-classification/issues/4
Args:
d_index (dict): the in-memory schema
Sample Input (mapping):
{
"ASSIGN_PEER_REVIEW_DISCUSSION#1":[
{
"include_all_of":[
"discussion",
"assign"
],
"include_one_of":[
"review",
"peer_review"
],
"exclude_all_of":[
"create"
]
}
]
}
Sample Input (exclude_all_of):
{
"create": "ASSIGN_PEER_REVIEW_DISCUSSION#1"
}
"""
BaseObject.__init__(self, __name__)
self._mapping = d_index['mapping']
def process(self,
input_tokens: List[str]) -> Dict:
sw = Stopwatch()
invalid_names = []
s_input_tokens = set(input_tokens)
for classification in self._mapping:
for ruleset in self._mapping[classification]:
if 'exclude_all_of' not in ruleset:
continue
exclude_all_of = set(ruleset['exclude_all_of'])
if not len(exclude_all_of):
continue
result = exclude_all_of.intersection(s_input_tokens)
# all the exclusion tokens must be found
if result != exclude_all_of:
continue
invalid_names.append(classification)
if self.isEnabledForDebug:
self.logger.debug('\n'.join([
'Invalid Classification Found',
f'\tName: {classification}',
f'\tRule Tokens: {exclude_all_of}',
f'\tInput Tokens: {input_tokens}']))
if self.isEnabledForInfo:
self.logger.info('\n'.join([
'Filtering Complete',
f'\tRemoved Classifications: {len(invalid_names)}',
f'\tTotal Time: {str(sw)}']))
return invalid_names
|
schema-classification
|
/schema_classification-0.1.8-py3-none-any.whl/schema_classification/dmo/filter_exclude_allof.py
|
filter_exclude_allof.py
|
from typing import List
from typing import Dict
from pprint import pprint
from baseblock import Stopwatch
from baseblock import BaseObject
from schema_classification.dto import NormalizedSchema
class FilterExcludeAllOf(BaseObject):
""" Filter Classifications using EXCLUDE_ALL_OF Rulesets
- Remove invalid classifications using the 'exclude-all-of' criteria.
- This component returns valid candidates only.
Implementation:
EXCLUDE_ALL_OF has some important nuances that differentiate it from EXCLUDE_ONE_OF
Reference: https://github.com/craigtrim/schema-classification/issues/5
"""
def __init__(self,
d_index: NormalizedSchema):
""" Change Log
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/169
Updated:
8-Jun-2022
[email protected]
* read schema in-memory
https://github.com/grafflr/deepnlu/issues/45
Updated:
30-Nov-2022
[email protected]
* use list-of-str for input tokens rather than mapped dict
https://github.com/craigtrim/schema-classification/issues/3
* renamed from 'computer-exclude-one-of' and basically rewrite from scratch
https://github.com/craigtrim/schema-classification/issues/4
Args:
d_index (dict): the in-memory schema
Sample Input (mapping):
{
"ASSIGN_PEER_REVIEW_DISCUSSION#1":[
{
"include_all_of":[
"discussion",
"assign"
],
"include_one_of":[
"review",
"peer_review"
],
"exclude_all_of":[
"create"
]
}
]
}
Sample Input (exclude_all_of):
{
"create": "ASSIGN_PEER_REVIEW_DISCUSSION#1"
}
"""
BaseObject.__init__(self, __name__)
self._mapping = d_index['mapping']
def process(self,
input_tokens: List[str]) -> Dict:
sw = Stopwatch()
invalid_names = []
s_input_tokens = set(input_tokens)
for classification in self._mapping:
for ruleset in self._mapping[classification]:
if 'exclude_all_of' not in ruleset:
continue
exclude_all_of = set(ruleset['exclude_all_of'])
if not len(exclude_all_of):
continue
result = exclude_all_of.intersection(s_input_tokens)
# all the exclusion tokens must be found
if result != exclude_all_of:
continue
invalid_names.append(classification)
if self.isEnabledForDebug:
self.logger.debug('\n'.join([
'Invalid Classification Found',
f'\tName: {classification}',
f'\tRule Tokens: {exclude_all_of}',
f'\tInput Tokens: {input_tokens}']))
if self.isEnabledForInfo:
self.logger.info('\n'.join([
'Filtering Complete',
f'\tRemoved Classifications: {len(invalid_names)}',
f'\tTotal Time: {str(sw)}']))
return invalid_names
| 0.843122 | 0.283763 |
from typing import List
from typing import Dict
from pprint import pformat
from baseblock import Stopwatch
from baseblock import BaseObject
from schema_classification.dto import NormalizedSchema
class FilterExcludeOneOf(BaseObject):
""" Filter Classifications using EXCLUDE_ONE_OF Rulesets
Remove invalid classifications using the 'exclude-one-of' criteria.
This component returns valid candidates only.
"""
def __init__(self,
d_index: NormalizedSchema):
""" Change Log
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/169
Updated:
8-Jun-2022
[email protected]
* read schema in-memory
https://github.com/grafflr/deepnlu/issues/45
Updated:
30-Nov-2022
[email protected]
* renamed from 'computer-exclude-one-of' and basically rewrite from scratch
https://github.com/craigtrim/schema-classification/issues/4
Args:
d_index (dict): the in-memory schema
"""
BaseObject.__init__(self, __name__)
self._mapping = d_index['mapping']
self._d_exclude_oneof = d_index['exclude_one_of']
def process(self,
input_tokens: List[str]) -> Dict:
sw = Stopwatch()
invalid_names = []
s_input_tokens = set(input_tokens)
for classification in self._mapping:
for ruleset in self._mapping[classification]:
if 'exclude_one_of' not in ruleset:
continue
exclude_one_of = set(ruleset['exclude_one_of'])
if not len(exclude_one_of):
continue
common = exclude_one_of.intersection(s_input_tokens)
# at least one exclusion token must be found
if not len(common):
continue
invalid_names.append(classification)
if self.isEnabledForDebug:
self.logger.debug('\n'.join([
'Invalid Classification Found',
f'\tName: {classification}',
f'\tRule Tokens: {exclude_one_of}',
f'\tMatched Rule Tokens: {common}',
f'\tInput Tokens: {input_tokens}']))
if self.isEnabledForInfo:
self.logger.info('\n'.join([
'Filtering Complete',
f'\tRemoved Classifications: {len(invalid_names)}',
f'\tTotal Time: {str(sw)}']))
return invalid_names
|
schema-classification
|
/schema_classification-0.1.8-py3-none-any.whl/schema_classification/dmo/filter_exclude_oneof.py
|
filter_exclude_oneof.py
|
from typing import List
from typing import Dict
from pprint import pformat
from baseblock import Stopwatch
from baseblock import BaseObject
from schema_classification.dto import NormalizedSchema
class FilterExcludeOneOf(BaseObject):
""" Filter Classifications using EXCLUDE_ONE_OF Rulesets
Remove invalid classifications using the 'exclude-one-of' criteria.
This component returns valid candidates only.
"""
def __init__(self,
d_index: NormalizedSchema):
""" Change Log
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/169
Updated:
8-Jun-2022
[email protected]
* read schema in-memory
https://github.com/grafflr/deepnlu/issues/45
Updated:
30-Nov-2022
[email protected]
* renamed from 'computer-exclude-one-of' and basically rewrite from scratch
https://github.com/craigtrim/schema-classification/issues/4
Args:
d_index (dict): the in-memory schema
"""
BaseObject.__init__(self, __name__)
self._mapping = d_index['mapping']
self._d_exclude_oneof = d_index['exclude_one_of']
def process(self,
input_tokens: List[str]) -> Dict:
sw = Stopwatch()
invalid_names = []
s_input_tokens = set(input_tokens)
for classification in self._mapping:
for ruleset in self._mapping[classification]:
if 'exclude_one_of' not in ruleset:
continue
exclude_one_of = set(ruleset['exclude_one_of'])
if not len(exclude_one_of):
continue
common = exclude_one_of.intersection(s_input_tokens)
# at least one exclusion token must be found
if not len(common):
continue
invalid_names.append(classification)
if self.isEnabledForDebug:
self.logger.debug('\n'.join([
'Invalid Classification Found',
f'\tName: {classification}',
f'\tRule Tokens: {exclude_one_of}',
f'\tMatched Rule Tokens: {common}',
f'\tInput Tokens: {input_tokens}']))
if self.isEnabledForInfo:
self.logger.info('\n'.join([
'Filtering Complete',
f'\tRemoved Classifications: {len(invalid_names)}',
f'\tTotal Time: {str(sw)}']))
return invalid_names
| 0.876066 | 0.315565 |
import pprint
from typing import Dict
from baseblock import BaseObject
from schema_classification.dto import Markers
from schema_classification.dto import ExplainResult
from schema_classification.dto import MappingResult
class ConfidenceIncludeAllOf(BaseObject):
""" Determine Confidence Level for Selected Mapping """
def __init__(self,
mapping: Dict,
markers: Markers):
"""
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/169
:param d_include_oneof:
relevant section of mapping ruleset
"""
BaseObject.__init__(self, __name__)
self._mapping = mapping
self._markers = markers
@staticmethod
def _max_confidence(d: Dict) -> Dict:
"""
Sample Input
{ 23.0: { 'classification': 'GICS_CODE_50102010_2',
'confidence': 23.0,
'explain': <ExplainResult.INCLUDE_NEAR_MATCH_1: 30>},
35.0: { 'classification': 'GICS_CODE_25302020_2',
'confidence': 35.0,
'explain': <ExplainResult.INCLUDE_NEAR_MATCH_1: 30>},
85.0: { 'classification': 'GICS_CODE_50101020_3',
'confidence': 85.0,
'explain': <ExplainResult.INCLUDE_NEAR_MATCH_1: 30>}}
Sample Output:
{ 'classification': 'GICS_CODE_50101020_3',
'confidence': 85.0,
'explain': <ExplainResult.INCLUDE_NEAR_MATCH_1: 30>}
"""
return d[max(d)]
def process(self) -> MappingResult or None:
confidence = 100.0
d = {}
for k in self._mapping:
for mapping in self._mapping[k]:
if 'include_all_of' in mapping:
# mapping-supplied
map_tags = set(mapping['include_all_of'])
usr_tags = set(self._markers.keys()) # user supplied
matches = map_tags.intersection(usr_tags)
total_matches = len(matches)
if total_matches == 0:
continue
total_map_tags = len(map_tags)
total_usr_tags = len(usr_tags)
def compute() -> float:
base = 5.00
boost = ((base - total_map_tags) * base) / 100
confidence = (total_matches / total_map_tags) - boost
confidence = round(confidence * 100, 0)
if confidence > 100:
return 99
if confidence < 1:
return 0
return confidence
confidence = compute()
self.logger.debug('\n'.join([
'Include All Confidence Computation',
f'\tClassification: {k}',
f'\tUser Tags ({total_usr_tags}): {usr_tags}',
f'\tMapping Tags ({total_map_tags}): {map_tags}',
f'\tMatches ({total_matches}): {matches}',
f'\tConfidence: {confidence}']))
if confidence == 0:
continue
d[confidence] = MappingResult(confidence=confidence,
classification=k,
explain=ExplainResult.INCLUDE_NEAR_MATCH_1)
if not len(d):
return None
return self._max_confidence(d)
|
schema-classification
|
/schema_classification-0.1.8-py3-none-any.whl/schema_classification/dmo/confidence_include_allof.py
|
confidence_include_allof.py
|
import pprint
from typing import Dict
from baseblock import BaseObject
from schema_classification.dto import Markers
from schema_classification.dto import ExplainResult
from schema_classification.dto import MappingResult
class ConfidenceIncludeAllOf(BaseObject):
""" Determine Confidence Level for Selected Mapping """
def __init__(self,
mapping: Dict,
markers: Markers):
"""
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/169
:param d_include_oneof:
relevant section of mapping ruleset
"""
BaseObject.__init__(self, __name__)
self._mapping = mapping
self._markers = markers
@staticmethod
def _max_confidence(d: Dict) -> Dict:
"""
Sample Input
{ 23.0: { 'classification': 'GICS_CODE_50102010_2',
'confidence': 23.0,
'explain': <ExplainResult.INCLUDE_NEAR_MATCH_1: 30>},
35.0: { 'classification': 'GICS_CODE_25302020_2',
'confidence': 35.0,
'explain': <ExplainResult.INCLUDE_NEAR_MATCH_1: 30>},
85.0: { 'classification': 'GICS_CODE_50101020_3',
'confidence': 85.0,
'explain': <ExplainResult.INCLUDE_NEAR_MATCH_1: 30>}}
Sample Output:
{ 'classification': 'GICS_CODE_50101020_3',
'confidence': 85.0,
'explain': <ExplainResult.INCLUDE_NEAR_MATCH_1: 30>}
"""
return d[max(d)]
def process(self) -> MappingResult or None:
confidence = 100.0
d = {}
for k in self._mapping:
for mapping in self._mapping[k]:
if 'include_all_of' in mapping:
# mapping-supplied
map_tags = set(mapping['include_all_of'])
usr_tags = set(self._markers.keys()) # user supplied
matches = map_tags.intersection(usr_tags)
total_matches = len(matches)
if total_matches == 0:
continue
total_map_tags = len(map_tags)
total_usr_tags = len(usr_tags)
def compute() -> float:
base = 5.00
boost = ((base - total_map_tags) * base) / 100
confidence = (total_matches / total_map_tags) - boost
confidence = round(confidence * 100, 0)
if confidence > 100:
return 99
if confidence < 1:
return 0
return confidence
confidence = compute()
self.logger.debug('\n'.join([
'Include All Confidence Computation',
f'\tClassification: {k}',
f'\tUser Tags ({total_usr_tags}): {usr_tags}',
f'\tMapping Tags ({total_map_tags}): {map_tags}',
f'\tMatches ({total_matches}): {matches}',
f'\tConfidence: {confidence}']))
if confidence == 0:
continue
d[confidence] = MappingResult(confidence=confidence,
classification=k,
explain=ExplainResult.INCLUDE_NEAR_MATCH_1)
if not len(d):
return None
return self._max_confidence(d)
| 0.745676 | 0.252833 |
from typing import Any
from typing import Dict
from typing import List
from typing import DefaultDict
from collections import defaultdict
from baseblock import Stopwatch
from baseblock import BaseObject
class IndexIncludeAllOf(BaseObject):
""" Generate an Index of 'include-all-of' Mappings"""
def __init__(self,
mapping: Dict):
""" Change Log
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/169
Updated:
8-Jun-2022
[email protected]
* https://github.com/grafflr/deepnlu/issues/45
:param mapping
"""
BaseObject.__init__(self, __name__)
self._mapping = mapping
def process(self) -> Dict:
sw = Stopwatch()
d = defaultdict(list)
for k in self._mapping:
for mapping in self._mapping[k]:
if 'include_all_of' not in mapping:
continue
if not len(mapping['include_all_of']):
continue
cluster = sorted(set(mapping['include_all_of']))
first_term = cluster[0]
terms_n = sorted(set(cluster[1:]))
if first_term not in d:
d[first_term] = []
update_existing_flag = False # NLP-889-12303; an example of this structure
for existing in d[first_term]:
if terms_n == existing['terms']:
existing['mappings'].append(k)
update_existing_flag = True
if not update_existing_flag:
d[cluster[0]].append({ # NLP-889-12304; an example of this structure
'mappings': [k],
'terms': terms_n})
if self.isEnabledForDebug:
self.logger.debug('\n'.join([
'Generated Index: Include All Of',
f'\tTotal Rows: {len(d)}',
f'\tTotal Time: {str(sw)}']))
return dict(d)
|
schema-classification
|
/schema_classification-0.1.8-py3-none-any.whl/schema_classification/dmo/index_include_allof.py
|
index_include_allof.py
|
from typing import Any
from typing import Dict
from typing import List
from typing import DefaultDict
from collections import defaultdict
from baseblock import Stopwatch
from baseblock import BaseObject
class IndexIncludeAllOf(BaseObject):
""" Generate an Index of 'include-all-of' Mappings"""
def __init__(self,
mapping: Dict):
""" Change Log
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/169
Updated:
8-Jun-2022
[email protected]
* https://github.com/grafflr/deepnlu/issues/45
:param mapping
"""
BaseObject.__init__(self, __name__)
self._mapping = mapping
def process(self) -> Dict:
sw = Stopwatch()
d = defaultdict(list)
for k in self._mapping:
for mapping in self._mapping[k]:
if 'include_all_of' not in mapping:
continue
if not len(mapping['include_all_of']):
continue
cluster = sorted(set(mapping['include_all_of']))
first_term = cluster[0]
terms_n = sorted(set(cluster[1:]))
if first_term not in d:
d[first_term] = []
update_existing_flag = False # NLP-889-12303; an example of this structure
for existing in d[first_term]:
if terms_n == existing['terms']:
existing['mappings'].append(k)
update_existing_flag = True
if not update_existing_flag:
d[cluster[0]].append({ # NLP-889-12304; an example of this structure
'mappings': [k],
'terms': terms_n})
if self.isEnabledForDebug:
self.logger.debug('\n'.join([
'Generated Index: Include All Of',
f'\tTotal Rows: {len(d)}',
f'\tTotal Time: {str(sw)}']))
return dict(d)
| 0.767341 | 0.178418 |
from typing import Dict
from typing import List
from baseblock import Stopwatch
from baseblock import BaseObject
from schema_classification.dto import NormalizedSchema
class FilterIncludeOneOf(BaseObject):
""" Compute INCLUDE_ONE_OF Rulesets """
def __init__(self,
d_index: NormalizedSchema):
""" Change Log
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/169
Updated:
8-Jun-2022
[email protected]
* read schema in-memory
https://github.com/grafflr/deepnlu/issues/45
Updated:
30-Nov-2022
[email protected]
* renamed from 'computer-include-one-of' and basically rewrite from scratch
https://github.com/craigtrim/schema-classification/issues/4
Args:
d_index (dict): the in-memory schema
"""
BaseObject.__init__(self, __name__)
self._mapping = d_index['mapping']
self._d_include_oneof = d_index['include_one_of']
def process(self,
input_tokens: List[str]) -> Dict:
sw = Stopwatch()
invalid_names = []
s_input_tokens = set(input_tokens)
for classification in self._mapping:
for ruleset in self._mapping[classification]:
if 'include_one_of' not in ruleset:
continue
include_one_of = set(ruleset['include_one_of'])
if not len(include_one_of):
continue
common = include_one_of.intersection(s_input_tokens)
# this classification is invalid when no common tokens are found
if len(common):
continue
invalid_names.append(classification)
if self.isEnabledForDebug:
self.logger.debug('\n'.join([
'Invalid Classification Found',
f'\tName: {classification}',
f'\tRule Tokens: {include_one_of}',
f'\tMatched Rule Tokens: {common}',
f'\tInput Tokens: {input_tokens}']))
if self.isEnabledForInfo:
self.logger.info('\n'.join([
'Filtering Complete',
f'\tRemoved Classifications: {len(invalid_names)}',
f'\tTotal Time: {str(sw)}']))
return invalid_names
|
schema-classification
|
/schema_classification-0.1.8-py3-none-any.whl/schema_classification/dmo/filter_include_oneof.py
|
filter_include_oneof.py
|
from typing import Dict
from typing import List
from baseblock import Stopwatch
from baseblock import BaseObject
from schema_classification.dto import NormalizedSchema
class FilterIncludeOneOf(BaseObject):
""" Compute INCLUDE_ONE_OF Rulesets """
def __init__(self,
d_index: NormalizedSchema):
""" Change Log
Created:
7-Feb-2022
[email protected]
* https://github.com/grafflr/graffl-core/issues/169
Updated:
8-Jun-2022
[email protected]
* read schema in-memory
https://github.com/grafflr/deepnlu/issues/45
Updated:
30-Nov-2022
[email protected]
* renamed from 'computer-include-one-of' and basically rewrite from scratch
https://github.com/craigtrim/schema-classification/issues/4
Args:
d_index (dict): the in-memory schema
"""
BaseObject.__init__(self, __name__)
self._mapping = d_index['mapping']
self._d_include_oneof = d_index['include_one_of']
def process(self,
input_tokens: List[str]) -> Dict:
sw = Stopwatch()
invalid_names = []
s_input_tokens = set(input_tokens)
for classification in self._mapping:
for ruleset in self._mapping[classification]:
if 'include_one_of' not in ruleset:
continue
include_one_of = set(ruleset['include_one_of'])
if not len(include_one_of):
continue
common = include_one_of.intersection(s_input_tokens)
# this classification is invalid when no common tokens are found
if len(common):
continue
invalid_names.append(classification)
if self.isEnabledForDebug:
self.logger.debug('\n'.join([
'Invalid Classification Found',
f'\tName: {classification}',
f'\tRule Tokens: {include_one_of}',
f'\tMatched Rule Tokens: {common}',
f'\tInput Tokens: {input_tokens}']))
if self.isEnabledForInfo:
self.logger.info('\n'.join([
'Filtering Complete',
f'\tRemoved Classifications: {len(invalid_names)}',
f'\tTotal Time: {str(sw)}']))
return invalid_names
| 0.83193 | 0.24951 |
from innoldb.qldb import Document
from schema_cntl import settings
from schema_cntl.dialects.postgres import Table
from schema_cntl.util.logger import getLogger
log = getLogger('schema_cntl.schema')
def commit(schema):
schema_doc = Document(table=settings.TABLE,
ledger=settings.LEDGER, snapshot=schema)
schema_doc.save()
return schema_doc
def revision_history(id, start=0, no=1):
schema_doc = Document(table=settings.TABLE,
ledger=settings.LEDGER, id=id, stranded=True)
if start + no > len(schema_doc.strands):
raise KeyError("Too many strands specified")
return schema_doc.strands[start:start+no]
def revisions(id):
return len(Document(table=settings.TABLE, ledger=settings.LEDGER, id=id, stranded=True).strands)
def revision_schema(id, strand_no):
schema_doc = Document(table=settings.TABLE,
ledger=settings.LEDGER, id=id, stranded=True)
if strand_no > len(schema_doc.strands) - 1:
raise KeyError("Too many strands specified")
revision_doc = schema_doc.strands[strand_no]
create_tables = []
for table in revision_doc.schema.tables:
pieces = Table.create(table['name'], *table['columns'])
create_tables.append(pieces)
return create_tables
def differences(id, strand_start_index, strand_end_index):
schema_doc = Document(table=settings.TABLE,
ledger=settings.LEDGER, id=id, stranded=True)
if strand_start_index > len(schema_doc.strands) - 1 or \
strand_end_index > len(schema_doc.strands):
raise ValueError("Specified indices exceed number of strands")
log.debug('Computing schema differences in revision #%s relative to #%s...',
strand_end_index, strand_start_index)
# TODO: compute difference in tables themselves, i.e. addition, subtractions
alter_tables = []
start_strand = schema_doc.strands[strand_start_index]
end_strand = schema_doc.strands[strand_end_index]
end_table_names = [tb['name'] for tb in end_strand.schema.tables]
for table in start_strand.schema.tables:
if table['name'] in end_table_names:
start_columns = start_strand.schema.tables[0]['columns']
start_names = [col['name'] for col in start_columns]
end_columns = end_strand.schema.tables[0]['columns']
end_names = [col['name'] for col in end_columns]
# NOTE: columns in end but not in start, by strict property
# equality, i.e. a column with the same name but different
# data type will get caught in this generator expression.
diff_rel_to_start = [
col for col in end_columns if col not in start_columns]
# therefore, find columns whose names are in diff, but whose
# properties are different
altered_rel_to_start = [
col for col in diff_rel_to_start if col['name'] in start_names]
# columns whose names are in diff and not in start at all
new_rel_to_start = [
col for col in diff_rel_to_start if col not in altered_rel_to_start]
# columns not in diff, but in start
removed_rel_to_start = [
col for col in start_columns if col['name'] not in end_names]
formulae = {
'ALTERED': altered_rel_to_start,
'ADDED': new_rel_to_start,
'REMOVED': removed_rel_to_start
}
log.debug('Schema Formula: %s', formulae)
alter_tables.append(Table.alter(settings.TABLE, **formulae))
return alter_tables
|
schema-cntl
|
/schema_cntl-1.0.1-py3-none-any.whl/schema_cntl/schema.py
|
schema.py
|
from innoldb.qldb import Document
from schema_cntl import settings
from schema_cntl.dialects.postgres import Table
from schema_cntl.util.logger import getLogger
log = getLogger('schema_cntl.schema')
def commit(schema):
schema_doc = Document(table=settings.TABLE,
ledger=settings.LEDGER, snapshot=schema)
schema_doc.save()
return schema_doc
def revision_history(id, start=0, no=1):
schema_doc = Document(table=settings.TABLE,
ledger=settings.LEDGER, id=id, stranded=True)
if start + no > len(schema_doc.strands):
raise KeyError("Too many strands specified")
return schema_doc.strands[start:start+no]
def revisions(id):
return len(Document(table=settings.TABLE, ledger=settings.LEDGER, id=id, stranded=True).strands)
def revision_schema(id, strand_no):
schema_doc = Document(table=settings.TABLE,
ledger=settings.LEDGER, id=id, stranded=True)
if strand_no > len(schema_doc.strands) - 1:
raise KeyError("Too many strands specified")
revision_doc = schema_doc.strands[strand_no]
create_tables = []
for table in revision_doc.schema.tables:
pieces = Table.create(table['name'], *table['columns'])
create_tables.append(pieces)
return create_tables
def differences(id, strand_start_index, strand_end_index):
schema_doc = Document(table=settings.TABLE,
ledger=settings.LEDGER, id=id, stranded=True)
if strand_start_index > len(schema_doc.strands) - 1 or \
strand_end_index > len(schema_doc.strands):
raise ValueError("Specified indices exceed number of strands")
log.debug('Computing schema differences in revision #%s relative to #%s...',
strand_end_index, strand_start_index)
# TODO: compute difference in tables themselves, i.e. addition, subtractions
alter_tables = []
start_strand = schema_doc.strands[strand_start_index]
end_strand = schema_doc.strands[strand_end_index]
end_table_names = [tb['name'] for tb in end_strand.schema.tables]
for table in start_strand.schema.tables:
if table['name'] in end_table_names:
start_columns = start_strand.schema.tables[0]['columns']
start_names = [col['name'] for col in start_columns]
end_columns = end_strand.schema.tables[0]['columns']
end_names = [col['name'] for col in end_columns]
# NOTE: columns in end but not in start, by strict property
# equality, i.e. a column with the same name but different
# data type will get caught in this generator expression.
diff_rel_to_start = [
col for col in end_columns if col not in start_columns]
# therefore, find columns whose names are in diff, but whose
# properties are different
altered_rel_to_start = [
col for col in diff_rel_to_start if col['name'] in start_names]
# columns whose names are in diff and not in start at all
new_rel_to_start = [
col for col in diff_rel_to_start if col not in altered_rel_to_start]
# columns not in diff, but in start
removed_rel_to_start = [
col for col in start_columns if col['name'] not in end_names]
formulae = {
'ALTERED': altered_rel_to_start,
'ADDED': new_rel_to_start,
'REMOVED': removed_rel_to_start
}
log.debug('Schema Formula: %s', formulae)
alter_tables.append(Table.alter(settings.TABLE, **formulae))
return alter_tables
| 0.294621 | 0.221983 |
from schema_cntl.util.logger import getLogger
from schema_cntl.schema import commit, revision_history, differences, revision_schema
from schema_cntl import settings
import json
import os
import sys
from argparse import ArgumentParser
from pprint import pprint
APP_DIR = os.path.dirname(os.path.abspath(__file__))
SRC_DIR = os.path.dirname(APP_DIR)
sys.path.append(SRC_DIR)
log = getLogger('schema_cntl.main')
def parse_cli_args(args):
parser = ArgumentParser()
parser.add_argument('action', nargs='*',
help="Actions: commit, history, diff, schema")
parser.add_argument(
'-l', '--limit', help="Number of records in the revision history", default=1, type=int)
return parser.parse_args(args)
def print_title(title):
if settings.LOG_LEVEL in ['INFO', 'DEBUG']:
print('---------------------------------------------- ', title)
print('-------------------------------------------------------------')
def load_schema(file):
file_path = os.path.join(os.getcwd(), file)
if os.path.exists(file_path):
with open(file_path, 'r') as infile:
schema = json.load(infile)
return schema
log.warning("No schema found at %s", file_path)
return None
def save_schema(file, schema):
file_path = os.path.join(os.getcwd(), file)
with open(file_path, 'w') as outfile:
json.dump(schema, outfile)
def commit_schema(file):
schema = load_schema(file)
if schema is not None:
doc = commit(schema)
if schema.get('id', None) is None or schema.get('meta_id', None) is None:
schema['id'], schema['meta_id'] = doc.id, doc.meta_id
save_schema(file, schema)
log.info("Schema commited to DOCUMENT(meta_id=%s)", doc.meta_id)
def generate_history(file, no):
schema = load_schema(file)
if schema is not None:
if schema.get('id', None) is None:
log.warning(
'Schema has no id, please commit before generating history')
return
items = revision_history(id=schema['id'], start=0, no=no)
for i, item in enumerate(items):
print_title(f'Revision {i}')
pprint(item.schema.to_json())
def generate_diff(file, start, end):
schema = load_schema(file)
if schema is not None:
if schema.get('id', None) is None:
log.warning(
'Schema has no id, please commit before generating revision diff')
return
alter_tables = differences(
id=schema['id'], strand_start_index=start, strand_end_index=end)
for i, table_stmt in enumerate(alter_tables):
print_title(f'Revision Change #{i}')
print('SQL -----------------', table_stmt[0])
print('Parameter Names -----', table_stmt[1])
print('Parameter Values ----', table_stmt[2])
def generate_schema_revision(file, revision):
schema = load_schema(file)
if schema is not None:
if schema.get('id', None) is None:
log.warning(
'Schema has no id, please commit before generating schema')
return
create_tables = revision_schema(id=schema['id'], strand_no=revision)
for table_stmt in create_tables:
print_title(f'Table Schema')
print('SQL -----------------', table_stmt[0])
print('Parameter Names -----', table_stmt[1])
print('Parameter Values ----', table_stmt[2])
def do_program(args):
args = parse_cli_args(args)
command_form = None
if args.action[0] == 'commit':
if len(args.action[1:]) == 1:
commit_schema(args.action[1])
return
command_form = "`commit <path-to-schema>`"
if args.action[0] == 'history':
if len(args.action[1:]) == 1:
generate_history(args.action[1], args.limit)
return
command_form = "`history <path-to-schema> --limit <limit>`"
if args.action[0] == 'diff':
if len(args.action[1:]) == 3:
generate_diff(file=args.action[1], start=int(
args.action[2]), end=int(args.action[3]))
return
command_form = "`diff <path-to-schema> <revision 1> <revision 2>`"
if args.action[0] == 'schema':
if len(args.action[1:]) == 2:
generate_schema_revision(
file=args.action[1], revision=int(args.action[2]))
return
command_form = "`schema <path-to-schema> <revision>`"
if command_form is not None:
log.warning("Command is of the form : %s", command_form)
return
log.warning("Input not understood.")
return
def entrypoint():
"""Entrypoint for build package
"""
do_program(sys.argv[1:])
if __name__ == "__main__":
do_program(sys.argv[1:])
|
schema-cntl
|
/schema_cntl-1.0.1-py3-none-any.whl/schema_cntl/main.py
|
main.py
|
from schema_cntl.util.logger import getLogger
from schema_cntl.schema import commit, revision_history, differences, revision_schema
from schema_cntl import settings
import json
import os
import sys
from argparse import ArgumentParser
from pprint import pprint
APP_DIR = os.path.dirname(os.path.abspath(__file__))
SRC_DIR = os.path.dirname(APP_DIR)
sys.path.append(SRC_DIR)
log = getLogger('schema_cntl.main')
def parse_cli_args(args):
parser = ArgumentParser()
parser.add_argument('action', nargs='*',
help="Actions: commit, history, diff, schema")
parser.add_argument(
'-l', '--limit', help="Number of records in the revision history", default=1, type=int)
return parser.parse_args(args)
def print_title(title):
if settings.LOG_LEVEL in ['INFO', 'DEBUG']:
print('---------------------------------------------- ', title)
print('-------------------------------------------------------------')
def load_schema(file):
file_path = os.path.join(os.getcwd(), file)
if os.path.exists(file_path):
with open(file_path, 'r') as infile:
schema = json.load(infile)
return schema
log.warning("No schema found at %s", file_path)
return None
def save_schema(file, schema):
file_path = os.path.join(os.getcwd(), file)
with open(file_path, 'w') as outfile:
json.dump(schema, outfile)
def commit_schema(file):
schema = load_schema(file)
if schema is not None:
doc = commit(schema)
if schema.get('id', None) is None or schema.get('meta_id', None) is None:
schema['id'], schema['meta_id'] = doc.id, doc.meta_id
save_schema(file, schema)
log.info("Schema commited to DOCUMENT(meta_id=%s)", doc.meta_id)
def generate_history(file, no):
schema = load_schema(file)
if schema is not None:
if schema.get('id', None) is None:
log.warning(
'Schema has no id, please commit before generating history')
return
items = revision_history(id=schema['id'], start=0, no=no)
for i, item in enumerate(items):
print_title(f'Revision {i}')
pprint(item.schema.to_json())
def generate_diff(file, start, end):
schema = load_schema(file)
if schema is not None:
if schema.get('id', None) is None:
log.warning(
'Schema has no id, please commit before generating revision diff')
return
alter_tables = differences(
id=schema['id'], strand_start_index=start, strand_end_index=end)
for i, table_stmt in enumerate(alter_tables):
print_title(f'Revision Change #{i}')
print('SQL -----------------', table_stmt[0])
print('Parameter Names -----', table_stmt[1])
print('Parameter Values ----', table_stmt[2])
def generate_schema_revision(file, revision):
schema = load_schema(file)
if schema is not None:
if schema.get('id', None) is None:
log.warning(
'Schema has no id, please commit before generating schema')
return
create_tables = revision_schema(id=schema['id'], strand_no=revision)
for table_stmt in create_tables:
print_title(f'Table Schema')
print('SQL -----------------', table_stmt[0])
print('Parameter Names -----', table_stmt[1])
print('Parameter Values ----', table_stmt[2])
def do_program(args):
args = parse_cli_args(args)
command_form = None
if args.action[0] == 'commit':
if len(args.action[1:]) == 1:
commit_schema(args.action[1])
return
command_form = "`commit <path-to-schema>`"
if args.action[0] == 'history':
if len(args.action[1:]) == 1:
generate_history(args.action[1], args.limit)
return
command_form = "`history <path-to-schema> --limit <limit>`"
if args.action[0] == 'diff':
if len(args.action[1:]) == 3:
generate_diff(file=args.action[1], start=int(
args.action[2]), end=int(args.action[3]))
return
command_form = "`diff <path-to-schema> <revision 1> <revision 2>`"
if args.action[0] == 'schema':
if len(args.action[1:]) == 2:
generate_schema_revision(
file=args.action[1], revision=int(args.action[2]))
return
command_form = "`schema <path-to-schema> <revision>`"
if command_form is not None:
log.warning("Command is of the form : %s", command_form)
return
log.warning("Input not understood.")
return
def entrypoint():
"""Entrypoint for build package
"""
do_program(sys.argv[1:])
if __name__ == "__main__":
do_program(sys.argv[1:])
| 0.285073 | 0.14069 |
from enum import Enum
from psycopg2 import sql
class DataTypes(Enum):
BOOL = "bool"
TEXT = "text"
CHAR = "char"
VARCHAR = "varchar"
DOUBLE = "float8"
INTEGER = "int"
DECIMAL = "decimal"
DATE = "date"
@staticmethod
def limited():
return [DataTypes.CHAR, DataTypes.VARCHAR]
@staticmethod
def convert(string=None):
if string is None:
return None, None
limit = None
if any(datatype.value in string for datatype in DataTypes.limited()) \
and '(' in string:
split = string.replace(')', '').split('(')
string, limit = split[0], int(split[-1])
for member in DataTypes.__members__.values():
if member.value == string:
return member, limit
return None, None
@staticmethod
def belongs_to_types(string, *enums):
types = [member.value for member in DataTypes.__members__.values()
if member in enums]
for data_type in types:
if string == data_type:
return True
return False
class Column:
RENAME = "RENAME COLUMN {old_name} TO {new_name}"
ADD_CONST = "ADD CONSTRAINT {constraint_name} FOREIGN KEY {fk_name} REFERENCES {primary_table} ({fk_name})"
@staticmethod
def define(**col_def):
"""Create a column definition clause in **PostgreSQL**. Column names will be parameterized to prevent injection, i.e. the `name` passed into this method is not the name of the column, but the name of the column parameter in the clause that is passed into the query cursor.
:param kwargs: Keyword arguments. The input format should make the `schema.json` input format.
:return: Column definition clause for `CREATE TABLE` **PostgreSQL** statement.
:rtype: str
"""
if col_def.get('primary_key', None) is not None:
return "{%s} SERIAL PRIMARY KEY" % (col_def['name'])
if col_def.get('foreign_key_references', None) is not None:
return "{%s} integer REFERENCES {fkr}" % (col_def['name'])
# data type is not parameterized in string, so it is converted into an enum before formatting
# so user input can never directly touched the query string
enumerated_type, limit = DataTypes.convert(col_def['type'])
if enumerated_type is None:
raise ValueError(
"No data type specified for column %s" % (col_def['name']))
definition = "{%s} %s" % (col_def['name'], enumerated_type.value)
if limit is not None:
definition += "(%s)" % (limit)
if col_def.get('not_null', None) is not None:
definition += " NOT NULL"
return definition
@staticmethod
def add(**col_def):
"""`ADD COLUMN` subclause in **PostgreSQL**, for use in `ALTER TABLE` clauses. Column names will be parameterized to prevent injection, i.e. the `name` passed into this method is not the name of the column, but the name of the column parameter in the clause that is passed into the query cursor.
:param kwargs: Keyword arguments. The input format should make the `schema.json` input format.
:return: Column definition clause for `ADD COLUMN` **PostgreSQL** statement.
:rtype: str
"""
# data type is not parameterized in string, so it is converted into an enum before formatting
# so user input can never directly touched the query string
enumerated_type, limit = DataTypes.convert(col_def['type'])
if enumerated_type is None:
raise ValueError(
"No data type specified for column %s" % (col_def['name']))
add_column = "ADD COLUMN {%s} %s" % (
col_def['name'], enumerated_type.value)
if limit is not None:
definition += "(%s)" % (limit)
if col_def.get('not_null', None):
add_column += " NOT NULL"
if col_def.get('foreign_key_references', None) is not None:
add_column += "CONSTRAINT fk REFERENCES {fkr}"
return add_column
@staticmethod
def add_constraint(**col_def):
pass
@staticmethod
def drop_constraint(**col_def):
pass
@staticmethod
def drop(**col_def):
return "DROP COLUMN {%s}" % col_def['name']
class Table:
@staticmethod
def create(table_name, *col_defs):
"""Generate a `CREATE TABLE` PostgreSQL statement. Table and column names will be parameterized in query to prevent injection, i.e. `table_name` and `col_def[]['name']` are not the names of the table and columns, but the names of the column and column parameters in the statement.
:param table_name: name of table
:type table_name: string
:param `*args`: column objects from `schema.tables[]` structure, with `name` replaced by a parameterized key.
:return: `CREATE TABLE` statement, parameter names, parameter values
:rtype: tuple
"""
create_table = "CREATE TABLE {table_name} ("
parameters = [table_name]
parameter_names = ['table_name']
for i, col_def in enumerate(col_defs):
# parameterize column_name
param_name = 'col_' + str(i)
parameters.append(col_def['name'])
parameter_names.append(param_name)
col_def['name'] = param_name
col_statement = Column.define(**col_def)
create_table += col_statement
if col_defs.index(col_def) != len(col_defs) - 1:
create_table += ", "
create_table += ");"
return create_table, parameter_names, parameters
@staticmethod
def alter(table_name, **col_formulae):
statement = ""
parameters = [table_name]
parameter_names = ['table_name']
accumulated = 0
for verb, cols in col_formulae.items():
alter_table = None
for i, formula in enumerate(cols):
param_name = 'col_' + str(i + accumulated)
parameters.append(formula['name'])
parameter_names.append(param_name)
formula['name'] = param_name
if verb == 'ALTERED':
pass
elif verb == 'ADDED':
if alter_table is None:
alter_table = "ALTER TABLE {table_name} "
else:
alter_table += ", "
alter_table += Column.add(**formula)
elif verb == 'REMOVED':
if alter_table is None:
alter_table = "ALTER TABLE {table_name} "
alter_table += Column.drop(**formula)
if len(cols) > 0:
accumulated = len(cols)
if alter_table is not None:
statement += alter_table + "; "
return statement, parameter_names, parameters
|
schema-cntl
|
/schema_cntl-1.0.1-py3-none-any.whl/schema_cntl/dialects/postgres.py
|
postgres.py
|
from enum import Enum
from psycopg2 import sql
class DataTypes(Enum):
BOOL = "bool"
TEXT = "text"
CHAR = "char"
VARCHAR = "varchar"
DOUBLE = "float8"
INTEGER = "int"
DECIMAL = "decimal"
DATE = "date"
@staticmethod
def limited():
return [DataTypes.CHAR, DataTypes.VARCHAR]
@staticmethod
def convert(string=None):
if string is None:
return None, None
limit = None
if any(datatype.value in string for datatype in DataTypes.limited()) \
and '(' in string:
split = string.replace(')', '').split('(')
string, limit = split[0], int(split[-1])
for member in DataTypes.__members__.values():
if member.value == string:
return member, limit
return None, None
@staticmethod
def belongs_to_types(string, *enums):
types = [member.value for member in DataTypes.__members__.values()
if member in enums]
for data_type in types:
if string == data_type:
return True
return False
class Column:
RENAME = "RENAME COLUMN {old_name} TO {new_name}"
ADD_CONST = "ADD CONSTRAINT {constraint_name} FOREIGN KEY {fk_name} REFERENCES {primary_table} ({fk_name})"
@staticmethod
def define(**col_def):
"""Create a column definition clause in **PostgreSQL**. Column names will be parameterized to prevent injection, i.e. the `name` passed into this method is not the name of the column, but the name of the column parameter in the clause that is passed into the query cursor.
:param kwargs: Keyword arguments. The input format should make the `schema.json` input format.
:return: Column definition clause for `CREATE TABLE` **PostgreSQL** statement.
:rtype: str
"""
if col_def.get('primary_key', None) is not None:
return "{%s} SERIAL PRIMARY KEY" % (col_def['name'])
if col_def.get('foreign_key_references', None) is not None:
return "{%s} integer REFERENCES {fkr}" % (col_def['name'])
# data type is not parameterized in string, so it is converted into an enum before formatting
# so user input can never directly touched the query string
enumerated_type, limit = DataTypes.convert(col_def['type'])
if enumerated_type is None:
raise ValueError(
"No data type specified for column %s" % (col_def['name']))
definition = "{%s} %s" % (col_def['name'], enumerated_type.value)
if limit is not None:
definition += "(%s)" % (limit)
if col_def.get('not_null', None) is not None:
definition += " NOT NULL"
return definition
@staticmethod
def add(**col_def):
"""`ADD COLUMN` subclause in **PostgreSQL**, for use in `ALTER TABLE` clauses. Column names will be parameterized to prevent injection, i.e. the `name` passed into this method is not the name of the column, but the name of the column parameter in the clause that is passed into the query cursor.
:param kwargs: Keyword arguments. The input format should make the `schema.json` input format.
:return: Column definition clause for `ADD COLUMN` **PostgreSQL** statement.
:rtype: str
"""
# data type is not parameterized in string, so it is converted into an enum before formatting
# so user input can never directly touched the query string
enumerated_type, limit = DataTypes.convert(col_def['type'])
if enumerated_type is None:
raise ValueError(
"No data type specified for column %s" % (col_def['name']))
add_column = "ADD COLUMN {%s} %s" % (
col_def['name'], enumerated_type.value)
if limit is not None:
definition += "(%s)" % (limit)
if col_def.get('not_null', None):
add_column += " NOT NULL"
if col_def.get('foreign_key_references', None) is not None:
add_column += "CONSTRAINT fk REFERENCES {fkr}"
return add_column
@staticmethod
def add_constraint(**col_def):
pass
@staticmethod
def drop_constraint(**col_def):
pass
@staticmethod
def drop(**col_def):
return "DROP COLUMN {%s}" % col_def['name']
class Table:
@staticmethod
def create(table_name, *col_defs):
"""Generate a `CREATE TABLE` PostgreSQL statement. Table and column names will be parameterized in query to prevent injection, i.e. `table_name` and `col_def[]['name']` are not the names of the table and columns, but the names of the column and column parameters in the statement.
:param table_name: name of table
:type table_name: string
:param `*args`: column objects from `schema.tables[]` structure, with `name` replaced by a parameterized key.
:return: `CREATE TABLE` statement, parameter names, parameter values
:rtype: tuple
"""
create_table = "CREATE TABLE {table_name} ("
parameters = [table_name]
parameter_names = ['table_name']
for i, col_def in enumerate(col_defs):
# parameterize column_name
param_name = 'col_' + str(i)
parameters.append(col_def['name'])
parameter_names.append(param_name)
col_def['name'] = param_name
col_statement = Column.define(**col_def)
create_table += col_statement
if col_defs.index(col_def) != len(col_defs) - 1:
create_table += ", "
create_table += ");"
return create_table, parameter_names, parameters
@staticmethod
def alter(table_name, **col_formulae):
statement = ""
parameters = [table_name]
parameter_names = ['table_name']
accumulated = 0
for verb, cols in col_formulae.items():
alter_table = None
for i, formula in enumerate(cols):
param_name = 'col_' + str(i + accumulated)
parameters.append(formula['name'])
parameter_names.append(param_name)
formula['name'] = param_name
if verb == 'ALTERED':
pass
elif verb == 'ADDED':
if alter_table is None:
alter_table = "ALTER TABLE {table_name} "
else:
alter_table += ", "
alter_table += Column.add(**formula)
elif verb == 'REMOVED':
if alter_table is None:
alter_table = "ALTER TABLE {table_name} "
alter_table += Column.drop(**formula)
if len(cols) > 0:
accumulated = len(cols)
if alter_table is not None:
statement += alter_table + "; "
return statement, parameter_names, parameters
| 0.689933 | 0.272709 |
# Schema Compare
Schema compare is a simple utility tool with the sole goal of comparing schemas between two databases. Currently it only supports postgresql.
### Installation
You can install Schema Compare from PyPi:
pip install schema-compare
### How to use
1. Define config.toml file
2. python schema-compare {{config.toml path}}
### Configuration file
[source]
username = ....
password = ....
hostname = ....
database = ....
[target]
username = ....
password = ....
hostname = ....
database = ....
|
schema-compare
|
/schema-compare-1.0.9.tar.gz/schema-compare-1.0.9/README.md
|
README.md
|
# Schema Compare
Schema compare is a simple utility tool with the sole goal of comparing schemas between two databases. Currently it only supports postgresql.
### Installation
You can install Schema Compare from PyPi:
pip install schema-compare
### How to use
1. Define config.toml file
2. python schema-compare {{config.toml path}}
### Configuration file
[source]
username = ....
password = ....
hostname = ....
database = ....
[target]
username = ....
password = ....
hostname = ....
database = ....
| 0.574275 | 0.719297 |
# Schema Enforcer
Schema Enforcer provides a framework for testing structured data against schema definitions using [JSONSchema](https://json-schema.org/understanding-json-schema/index.html).
## Getting Started
### Install
Schema Enforcer is a python library which is available on PyPi. It requires a python version of 3.8 or greater. Once a supported version of python is installed on your machine, pip can be used to install the tool by using the command `python -m pip install schema-enforcer`.
```cli
python -m pip install schema-enforcer
```
### Overview
Schema Enforcer requires that two different elements be defined by the user:
- Schema Definition Files: These are files which define the schema to which a given set of data should adhere.
- Structured Data Files: These are files which contain data that should adhere to the schema defined in one (or multiple) of the schema definition files.
> Note: Data which needs to be validated against a schema definition can come in the form of Structured Data Files or Ansible host vars. Ansible is not installed by default when schema-enforcer is installed. In order to use Ansible features, ansible must already be available or must be declared as an optional dependency when schema-enforcer upon installation. In the interest of brevity and simplicity, this README.md contains discussion only of Structured Data Files -- for more information on how to use `schema-enforcer` with ansible host vars, see [the ansible_command README](docs/ansible_command.md)
When `schema-enforcer` runs, it assumes directory hierarchy which should be in place from the folder in which the tool is run.
- `schema-enforcer` will search for **schema definition files** nested inside of `./schema/schemas/` which end in `.yml`, `.yaml`, or `.json`.
- `schema-enforcer` will do a recursive search for **structured data files** starting in the current working diretory (`./`). It does this by searching all directories (including the current working directory) for files ending in `.yml`, `.yaml`, or `.json`. The `schema` folder and it's subdirectories are excluded from this search by default.
```cli
bash$ cd examples/example1
bash$ tree
.
├── chi-beijing-rt1
│ ├── dns.yml
│ └── syslog.yml
├── eng-london-rt1
│ ├── dns.yml
│ └── ntp.yml
└── schema
└── schemas
├── dns.yml
├── ntp.yml
└── syslog.yml
4 directories, 7 files
```
In the above example, `chi-beijing-rt1` is a directory with structured data files containing some configuration for a router named `chi-beijing-rt1`. There are two structured data files inside of this folder, `dns.yml` and `syslog.yml`. Similarly, the `eng-london-rt1` directory contains definition files for a router named `eng-london-rt1` -- `dns.yml` and `ntp.yml`.
The file `chi-beijing-rt1/dns.yml` defines the DNS servers `chi-beijing.rt1` should use. The data in this file includes a simple hash-type data structure with a key of `dns_servers` and a value of an array. Each element in this array is a hash-type object with a key of `address` and a value which is the string of an IP address.
```yaml
bash$ cat chi-beijing-rt1/dns.yml
# jsonschema: schemas/dns_servers
---
dns_servers:
- address: "10.1.1.1"
- address: "10.2.2.2"
```
> Note: The line `# jsonschema: schemas/dns_servers` tells `schema-enforcer` the ID of the schema which the structured data defined in the file should be validated against. The schema ID is defined by the `$id` top level key in a schema definition. More information on how the structured data is mapped to a schema ID to which it should adhere can be found in the [mapping_schemas README](./docs/mapping_schemas.md)
The file `schema/schemas/dns.yml` is a schema definition file. It contains a schema definition for ntp servers written in JSONSchema. The data in `chi-beijing-rt1/dns.yml` and `eng-london-rt1/dns.yml` should adhere to the schema defined in this schema definition file.
```yaml
bash$ cat schema/schemas/dns.yml
---
$schema: "http://json-schema.org/draft-07/schema#"
$id: "schemas/dns_servers"
description: "DNS Server Configuration schema."
type: "object"
properties:
dns_servers:
type: "array"
items:
type: "object"
properties:
name:
type: "string"
address:
type: "string"
format: "ipv4"
vrf:
type: "string"
required:
- "address"
uniqueItems: true
required:
- "dns_servers"
```
> Note: The cat of the schema definition file may be a little scary if you haven't seen JSONSchema before. Don't worry too much if it is difficult to parse right now. The important thing to note is that this file contains a schema definition to which the structured data in the files `chi-beijing-rt1/dns.yml` and `eng-london-rt1/dns.yml` should adhere.
### Basic usage
Once schema-enforcer has been installed, the `schema-enforcer validate` command can be used run schema validations of YAML/JSON instance files against the defined schema.
```shell
bash$ schema-enforcer --help
Usage: schema-enforcer [OPTIONS] COMMAND [ARGS]...
Options:
--help Show this message and exit.
Commands:
ansible Validate the hostvar for all hosts within an Ansible...
schema Manage your schemas
validate Validates instance files against defined schema
```
To run the schema validations, the command `schema-enforcer validate` can be run.
```shell
bash$ schema-enforcer validate
schema-enforcer validate
ALL SCHEMA VALIDATION CHECKS PASSED
```
To acquire more context regarding what files specifically passed schema validation, the `--show-pass` flag can be passed in.
```shell
bash$ schema-enforcer validate --show-pass
PASS [FILE] ./eng-london-rt1/ntp.yml
PASS [FILE] ./eng-london-rt1/dns.yml
PASS [FILE] ./chi-beijing-rt1/syslog.yml
PASS [FILE] ./chi-beijing-rt1/dns.yml
ALL SCHEMA VALIDATION CHECKS PASSED
```
If we modify one of the addresses in the `chi-beijing-rt1/dns.yml` file so that it's value is the boolean `true` instead of an IP address string, then run the `schema-enforcer` tool, the validation will fail with an error message.
```yaml
bash$ cat chi-beijing-rt1/dns.yml
# jsonschema: schemas/dns_servers
---
dns_servers:
- address: true
- address: "10.2.2.2"
```
```shell
bash$ test-schema validate
FAIL | [ERROR] True is not of type 'string' [FILE] ./chi-beijing-rt1/dns.yml [PROPERTY] dns_servers:0:address
bash$ echo $?
1
```
When a structured data file fails schema validation, `schema-enforcer` exits with a code of 1.
### Configuration Settings
Schema enforcer will work with default settings, however, a `pyproject.toml` file can be placed at the root of the path in which `schema-enforcer` is run in order to override default settings or declare configuration for more advanced features. Inside of this `pyproject.toml` file, `tool.schema_enforcer` sections can be used to declare settings for schema enforcer. Take for example the `pyproject.toml` file in example 2.
```shell
bash$ cd examples/example2 && tree -L 2
.
├── README.md
├── hostvars
│ ├── chi-beijing-rt1
│ ├── eng-london-rt1
│ └── ger-berlin-rt1
├── invalid
├── pyproject.toml
└── schema
├── definitions
└── schemas
8 directories, 2 files
```
In this toml file, a schema mapping is declared which tells schema enforcer which structured data files should be checked by which schema IDs.
```shell
bash$ cat pyproject.toml
[tool.schema_enforcer.schema_mapping]
# Map structured data filename to schema IDs
'dns_v1.yml' = ['schemas/dns_servers']
'dns_v2.yml' = ['schemas/dns_servers_v2']
'syslog.yml' = ['schemas/syslog_servers']
```
> More information on available configuration settings can be found in the [configuration README](docs/configuration.md)
### Supported Formats
By default, schema enforcer installs the jsonschema `format_nongpl` extra (in version <1.2.0) or `format-nongpl` (in versions >=1.2.0). This extra allows the use of formats that can be used in schema definitions (e.g. ipv4, hostname...etc). The `format_nongpl` or `format-nongpl` extra only installs transitive dependencies that are not licensed under GPL. The `iri` and `iri-reference` formats are defined by the `rfc3987` transitive dependency which is licensed under GPL. As such, `iri` and `iri-reference` formats are *not* supported by `format-nongpl`/`format_nongpl`. If you have a need to use `iri` and/or `iri-reference` formats, you can do so by running the following pip command (or it's poetry equivalent):
```
pip install 'jsonschema[rfc3987]'
```
See the "Validating Formats" section in the [jsonschema documentation](https://github.com/python-jsonschema/jsonschema/blob/main/docs/validate.rst) for more information.
### Where To Go Next
Detailed documentation can be found in the README.md files inside of the `docs/` directory.
- ["Introducing Schema Enforcer" blog post](https://blog.networktocode.com/post/introducing_schema_enforcer/)
- [Using a pyproject.toml file for configuration](docs/configuration.md)
- [Mapping Structured Data Files to Schema Files](docs/mapping_data_files_to_schemas.md)
- [The `ansible` command](docs/ansible_command.md)
- [The `validate` command](docs/validate_command.md)
- [The `schema` command](docs/schema_command.md)
- [Implementing custom validators](docs/custom_validators.md)
|
schema-enforcer
|
/schema_enforcer-1.2.2.tar.gz/schema_enforcer-1.2.2/README.md
|
README.md
|
python -m pip install schema-enforcer
bash$ cd examples/example1
bash$ tree
.
├── chi-beijing-rt1
│ ├── dns.yml
│ └── syslog.yml
├── eng-london-rt1
│ ├── dns.yml
│ └── ntp.yml
└── schema
└── schemas
├── dns.yml
├── ntp.yml
└── syslog.yml
4 directories, 7 files
bash$ cat chi-beijing-rt1/dns.yml
# jsonschema: schemas/dns_servers
---
dns_servers:
- address: "10.1.1.1"
- address: "10.2.2.2"
bash$ cat schema/schemas/dns.yml
---
$schema: "http://json-schema.org/draft-07/schema#"
$id: "schemas/dns_servers"
description: "DNS Server Configuration schema."
type: "object"
properties:
dns_servers:
type: "array"
items:
type: "object"
properties:
name:
type: "string"
address:
type: "string"
format: "ipv4"
vrf:
type: "string"
required:
- "address"
uniqueItems: true
required:
- "dns_servers"
bash$ schema-enforcer --help
Usage: schema-enforcer [OPTIONS] COMMAND [ARGS]...
Options:
--help Show this message and exit.
Commands:
ansible Validate the hostvar for all hosts within an Ansible...
schema Manage your schemas
validate Validates instance files against defined schema
bash$ schema-enforcer validate
schema-enforcer validate
ALL SCHEMA VALIDATION CHECKS PASSED
bash$ schema-enforcer validate --show-pass
PASS [FILE] ./eng-london-rt1/ntp.yml
PASS [FILE] ./eng-london-rt1/dns.yml
PASS [FILE] ./chi-beijing-rt1/syslog.yml
PASS [FILE] ./chi-beijing-rt1/dns.yml
ALL SCHEMA VALIDATION CHECKS PASSED
bash$ cat chi-beijing-rt1/dns.yml
# jsonschema: schemas/dns_servers
---
dns_servers:
- address: true
- address: "10.2.2.2"
bash$ test-schema validate
FAIL | [ERROR] True is not of type 'string' [FILE] ./chi-beijing-rt1/dns.yml [PROPERTY] dns_servers:0:address
bash$ echo $?
1
bash$ cd examples/example2 && tree -L 2
.
├── README.md
├── hostvars
│ ├── chi-beijing-rt1
│ ├── eng-london-rt1
│ └── ger-berlin-rt1
├── invalid
├── pyproject.toml
└── schema
├── definitions
└── schemas
8 directories, 2 files
bash$ cat pyproject.toml
[tool.schema_enforcer.schema_mapping]
# Map structured data filename to schema IDs
'dns_v1.yml' = ['schemas/dns_servers']
'dns_v2.yml' = ['schemas/dns_servers_v2']
'syslog.yml' = ['schemas/syslog_servers']
pip install 'jsonschema[rfc3987]'
| 0.394901 | 0.95469 |
# Changelog
## v1.2.2
- #156 Add support for jsonschema 4.18
- Remove support for python version 3.7
## v1.2.1
### Changes
- #152 Update requirement for rich to `>=9.5`
## v1.2.0 - 2023-06-05
### Adds
- Support for versions of jsonschema >= 4.6
### Removes
- Support for versions of jsonschema < 4.6. See #141 for details.
## v1.1.5 - 2022-07-27
### Changes
- Fixes #141 - Can not install schema-enforcer in environments which require a version of jsonschema < 4.6
## v1.1.4 - 2022-07-13
### Adds
- Add format_nongpl extra to jsonschema install. This ensures draft7 format checkers validate format adherence as expected while also ensuring GPL-Licenced transitive dependencies are not installed.
### Changes
- Update jsonschema schema version dependency so that versions in the 4.x train are supported.
### Removes
- Automatic support for `iri` and `iri-reference` format checkers. This was removed because these format checkers require the `rfc3987` library, which is licensed under GPL. If you require these checkers, you can manually install `rfc3987` or install this package as `jsonschema[rfc3987]`.
## v1.1.3 - 2022-05-31
### Changes
- jinja2 version dependency specification modified such that versions in the 3.x release are supported
## v1.1.2 - 2022-01-10
### Changes
- Update dependencies
- Switch from slim to full python docker base image
## v1.1.1 - 2021-12-23
### Changes
- Minor updates to documentation
- Update CI build environment to use github actions instead of Travis CI
- Update version of ruamel from 0.16 to 0.17
## v1.1.0 - 2021-05-25
### Adds
- [Custom Validators](docs/custom_validators.md)
- [Automatic mapping of schemas to data files](docs/mapping_data_files_to_schemas.md)
- Automatic implementation of draft7 format checker to support [IPv4 and IPv6 format declarations](https://json-schema.org/understanding-json-schema/reference/string.html#id12) in a JSON Schema definition [#94](https://github.com/networktocode/schema-enforcer/issues/94)
### Changes
- Removes Ansible as a mandatory dependency [#90](https://github.com/networktocode/schema-enforcer/issues/90)
- `docs/mapping_schemas.md` renamed to `docs/mapping_data_files_to_schemas.md`
- Simplifies the invoke tasks used for development
- Schema enforcer now exits if an invalid schema is found while loading schemas [#99](https://github.com/networktocode/schema-enforcer/issues/99)
## v1.0.0 - 2021-01-26
Schema Enforcer Initial Release
|
schema-enforcer
|
/schema_enforcer-1.2.2.tar.gz/schema_enforcer-1.2.2/CHANGELOG.md
|
CHANGELOG.md
|
# Changelog
## v1.2.2
- #156 Add support for jsonschema 4.18
- Remove support for python version 3.7
## v1.2.1
### Changes
- #152 Update requirement for rich to `>=9.5`
## v1.2.0 - 2023-06-05
### Adds
- Support for versions of jsonschema >= 4.6
### Removes
- Support for versions of jsonschema < 4.6. See #141 for details.
## v1.1.5 - 2022-07-27
### Changes
- Fixes #141 - Can not install schema-enforcer in environments which require a version of jsonschema < 4.6
## v1.1.4 - 2022-07-13
### Adds
- Add format_nongpl extra to jsonschema install. This ensures draft7 format checkers validate format adherence as expected while also ensuring GPL-Licenced transitive dependencies are not installed.
### Changes
- Update jsonschema schema version dependency so that versions in the 4.x train are supported.
### Removes
- Automatic support for `iri` and `iri-reference` format checkers. This was removed because these format checkers require the `rfc3987` library, which is licensed under GPL. If you require these checkers, you can manually install `rfc3987` or install this package as `jsonschema[rfc3987]`.
## v1.1.3 - 2022-05-31
### Changes
- jinja2 version dependency specification modified such that versions in the 3.x release are supported
## v1.1.2 - 2022-01-10
### Changes
- Update dependencies
- Switch from slim to full python docker base image
## v1.1.1 - 2021-12-23
### Changes
- Minor updates to documentation
- Update CI build environment to use github actions instead of Travis CI
- Update version of ruamel from 0.16 to 0.17
## v1.1.0 - 2021-05-25
### Adds
- [Custom Validators](docs/custom_validators.md)
- [Automatic mapping of schemas to data files](docs/mapping_data_files_to_schemas.md)
- Automatic implementation of draft7 format checker to support [IPv4 and IPv6 format declarations](https://json-schema.org/understanding-json-schema/reference/string.html#id12) in a JSON Schema definition [#94](https://github.com/networktocode/schema-enforcer/issues/94)
### Changes
- Removes Ansible as a mandatory dependency [#90](https://github.com/networktocode/schema-enforcer/issues/90)
- `docs/mapping_schemas.md` renamed to `docs/mapping_data_files_to_schemas.md`
- Simplifies the invoke tasks used for development
- Schema enforcer now exits if an invalid schema is found while loading schemas [#99](https://github.com/networktocode/schema-enforcer/issues/99)
## v1.0.0 - 2021-01-26
Schema Enforcer Initial Release
| 0.703957 | 0.264269 |
from ansible.inventory.manager import InventoryManager # pylint: disable=import-error
from ansible.parsing.dataloader import DataLoader # pylint: disable=import-error
from ansible.vars.manager import VariableManager # pylint: disable=import-error
from ansible.template import Templar # pylint: disable=import-error
# Referenced https://github.com/fgiorgetti/qpid-dispatch-tests/ for the below class
class AnsibleInventory:
"""AnsibleInventory."""
def __init__(self, inventory=None, extra_vars=None):
"""Imitates Ansible Inventory Loader.
Args:
inventory (str): Path to Ansible Inventory files.
extra_vars (dict): Extra Vars passed at run time.
"""
self.inventory = inventory
self.loader = DataLoader()
self.inv_mgr = InventoryManager(loader=self.loader, sources=self.inventory)
self.var_mgr = VariableManager(loader=self.loader, inventory=self.inv_mgr)
# TODO As of Ansible==2.8.0 the extra_vars property cannot be set to VariableManager
# This needs to be investigated and fixed properly
self.extra_vars = extra_vars or {}
def get_hosts_containing(self, var=None):
"""Gets hosts that have a value for ``var``.
If ``var`` is None, then all hosts in the inventory will be returned.
Args:
var (str): The variable to use to restrict hosts.
Returns:
list: All ansible.inventory.host.Host objects that define ``var``.
"""
all_hosts = self.inv_mgr.get_hosts()
if var is None:
return all_hosts
# Only add hosts that define the variable.
hosts_with_var = [host for host in all_hosts if var in self.var_mgr.get_vars(host=host)]
return hosts_with_var
def get_host_vars(self, host):
"""Retrieves Jinja2 rendered variables for ``host``.
Args:
host (ansible.inventory.host.Host): The host to retrieve variable data from.
Returns:
dict: The variables defined by the ``host`` in Ansible Inventory.
"""
data = self.var_mgr.get_vars(host=host)
templar = Templar(variables=data, loader=self.loader)
return templar.template(data, fail_on_undefined=False)
def get_clean_host_vars(self, host):
"""Return clean hostvars for a given host, cleaned up of all keys inserted by Templar.
Args:
host (ansible.inventory.host.Host): The host to retrieve variable data from.
Raises:
TypeError: When "magic_vars_to_evaluate" is declared in an Ansible inventory file and is not of type list,
a type error is raised
Returns:
dict: clean hostvars
"""
keys_cleanup = [
"inventory_file",
"inventory_dir",
"inventory_hostname",
"inventory_hostname_short",
"group_names",
"ansible_facts",
"playbook_dir",
"ansible_playbook_python",
"groups",
"omit",
"ansible_version",
"ansible_config_file",
"schema_enforcer_schema_ids",
"schema_enforcer_strict",
"schema_enforcer_automap_default",
"magic_vars_to_evaluate",
]
hostvars = self.get_host_vars(host)
# Extract magic vars which should be evaluated
magic_vars_to_evaluate = hostvars.get("magic_vars_to_evaluate", [])
if not isinstance(magic_vars_to_evaluate, list):
raise TypeError(f"magic_vars_to_evaluate variable configured for host {host.name} must be of type list")
keys_cleanup = list(set(keys_cleanup) - set(magic_vars_to_evaluate))
for key in keys_cleanup:
if key in hostvars:
del hostvars[key]
return hostvars
@staticmethod
def get_applicable_schemas(hostvars, smgr, declared_schema_ids, automap):
"""Get applicable schemas.
Search an explicit mapping to determine the schemas which should be used to validate hostvars
for a given host.
If an explicit mapping is not defined, correlate top level keys in the structured data with top
level properties in the schema to acquire applicable schemas.
Args:
hostvars (dict): dictionary of cleaned host vars which will be evaluated against schema
smgr (schema_enforcer.schemas.manager.SchemaManager): SchemaManager object
declared_schema_ids (list): A list of declared schema IDs inferred from schema_enforcer_schemas variable
automap (bool): Whether or not to use the `automap` feature to automatically map top level hostvar keys
to top level schema definition properties if no schema ids are declared (list of schema ids is empty)
Returns:
applicable_schemas (dict): dictionary mapping schema_id to schema obj for all applicable schemas
"""
applicable_schemas = {}
for key in hostvars.keys():
# extract applicable schema ID to JsonSchema objects if schema_ids are declared
if declared_schema_ids:
for schema_id in declared_schema_ids:
applicable_schemas[schema_id] = smgr.schemas[schema_id]
# extract applicable schema ID to JsonSchema objects based on host var to top level property mapping.
elif automap:
for schema in smgr.schemas.values():
if key in schema.top_level_properties:
applicable_schemas[schema.id] = schema
continue
return applicable_schemas
def get_schema_validation_settings(self, host):
"""Parse Ansible Schema Validation Settings from a host object.
Validate settings or ensure an error is raised in the event an invalid parameter is
configured in the host file.
Args:
host (AnsibleInventory.host): Ansible Inventory Host Object
Raises:
TypeError: Raised when one of the schema configuration parameters is of the wrong type
ValueError: Raised when one of the schema configuration parameters is incorrectly configured
Returns:
(dict): Dict of validation settings with keys "declared_schema_ids", "strict", and "automap"
"""
# Generate host_var and automatically remove all keys inserted by ansible
hostvars = self.get_host_vars(host)
# Extract declared_schema_ids from hostvar setting
declared_schema_ids = []
if "schema_enforcer_schema_ids" in hostvars:
if not isinstance(hostvars["schema_enforcer_schema_ids"], list):
raise TypeError(f"'schema_enforcer_schema_ids' attribute defined for {host.name} must be of type list")
declared_schema_ids = hostvars["schema_enforcer_schema_ids"]
# Extract whether to use a strict validator or a loose validator from hostvar setting
strict = False
if "schema_enforcer_strict" in hostvars:
if not isinstance(hostvars["schema_enforcer_strict"], bool):
raise TypeError(f"'schema_enforcer_strict' attribute defined for {host.name} must be of type bool")
strict = hostvars["schema_enforcer_strict"]
automap = True
if "schema_enforcer_automap_default" in hostvars:
if not isinstance(hostvars["schema_enforcer_automap_default"], bool):
raise TypeError(
f"'schema_enforcer_automap_default' attribute defined for {host.name} must be of type bool"
)
automap = hostvars["schema_enforcer_automap_default"]
# Raise error if settings are set incorrectly
if strict and not declared_schema_ids:
msg = (
f"The 'schema_enforcer_strict' parameter is set for {host.name} but the 'schema_enforcer_schema_ids' parameter does not declare a schema id. "
"The 'schema_enforcer_schema_ids' parameter MUST be defined as a list declaring only one schema ID if 'schema_enforcer_strict' is set."
)
raise ValueError(msg)
if strict and declared_schema_ids and len(declared_schema_ids) > 1:
msg = (
f"The 'schema_enforcer_strict' parameter is set for {host.name} but the 'schema_enforcer_schema_ids' parameter declares more than one schema id. "
"The 'schema_enforcer_schema_ids' parameter MUST be defined as a list declaring only one schema ID if 'schema_enforcer_strict' is set."
)
raise ValueError(msg)
return {
"declared_schema_ids": declared_schema_ids,
"strict": strict,
"automap": automap,
}
def print_schema_mapping(self, hosts, limit, smgr):
"""Print host to schema IDs mapping.
Args:
hosts (list): A list of ansible.inventory.host.Host objects for which the mapping should be printed
limit (str): The host to which to limit the search
smgr (schema_enforcer.schemas.manager.SchemaManager): Schema manager which handles schema objects
"""
print_dict = {}
for host in hosts:
if limit and host.name != limit:
continue
# Get hostvars
hostvars = self.get_clean_host_vars(host)
# Acquire validation settings for the given host
schema_validation_settings = self.get_schema_validation_settings(host)
declared_schema_ids = schema_validation_settings["declared_schema_ids"]
automap = schema_validation_settings["automap"]
# Validate declared schemas exist
smgr.validate_schemas_exist(declared_schema_ids)
# Acquire schemas applicable to the given host
applicable_schemas = self.get_applicable_schemas(hostvars, smgr, declared_schema_ids, automap)
# Add an element to the print dict for this host
print_dict[host.name] = list(applicable_schemas.keys())
if print_dict:
print("{:25} Schema ID".format("Ansible Host")) # pylint: disable=consider-using-f-string
print("-" * 80)
print_strings = []
for hostname, schema_ids in print_dict.items():
print_strings.append(f"{hostname:25} {schema_ids}")
print("\n".join(sorted(print_strings)))
|
schema-enforcer
|
/schema_enforcer-1.2.2.tar.gz/schema_enforcer-1.2.2/schema_enforcer/ansible_inventory.py
|
ansible_inventory.py
|
from ansible.inventory.manager import InventoryManager # pylint: disable=import-error
from ansible.parsing.dataloader import DataLoader # pylint: disable=import-error
from ansible.vars.manager import VariableManager # pylint: disable=import-error
from ansible.template import Templar # pylint: disable=import-error
# Referenced https://github.com/fgiorgetti/qpid-dispatch-tests/ for the below class
class AnsibleInventory:
"""AnsibleInventory."""
def __init__(self, inventory=None, extra_vars=None):
"""Imitates Ansible Inventory Loader.
Args:
inventory (str): Path to Ansible Inventory files.
extra_vars (dict): Extra Vars passed at run time.
"""
self.inventory = inventory
self.loader = DataLoader()
self.inv_mgr = InventoryManager(loader=self.loader, sources=self.inventory)
self.var_mgr = VariableManager(loader=self.loader, inventory=self.inv_mgr)
# TODO As of Ansible==2.8.0 the extra_vars property cannot be set to VariableManager
# This needs to be investigated and fixed properly
self.extra_vars = extra_vars or {}
def get_hosts_containing(self, var=None):
"""Gets hosts that have a value for ``var``.
If ``var`` is None, then all hosts in the inventory will be returned.
Args:
var (str): The variable to use to restrict hosts.
Returns:
list: All ansible.inventory.host.Host objects that define ``var``.
"""
all_hosts = self.inv_mgr.get_hosts()
if var is None:
return all_hosts
# Only add hosts that define the variable.
hosts_with_var = [host for host in all_hosts if var in self.var_mgr.get_vars(host=host)]
return hosts_with_var
def get_host_vars(self, host):
"""Retrieves Jinja2 rendered variables for ``host``.
Args:
host (ansible.inventory.host.Host): The host to retrieve variable data from.
Returns:
dict: The variables defined by the ``host`` in Ansible Inventory.
"""
data = self.var_mgr.get_vars(host=host)
templar = Templar(variables=data, loader=self.loader)
return templar.template(data, fail_on_undefined=False)
def get_clean_host_vars(self, host):
"""Return clean hostvars for a given host, cleaned up of all keys inserted by Templar.
Args:
host (ansible.inventory.host.Host): The host to retrieve variable data from.
Raises:
TypeError: When "magic_vars_to_evaluate" is declared in an Ansible inventory file and is not of type list,
a type error is raised
Returns:
dict: clean hostvars
"""
keys_cleanup = [
"inventory_file",
"inventory_dir",
"inventory_hostname",
"inventory_hostname_short",
"group_names",
"ansible_facts",
"playbook_dir",
"ansible_playbook_python",
"groups",
"omit",
"ansible_version",
"ansible_config_file",
"schema_enforcer_schema_ids",
"schema_enforcer_strict",
"schema_enforcer_automap_default",
"magic_vars_to_evaluate",
]
hostvars = self.get_host_vars(host)
# Extract magic vars which should be evaluated
magic_vars_to_evaluate = hostvars.get("magic_vars_to_evaluate", [])
if not isinstance(magic_vars_to_evaluate, list):
raise TypeError(f"magic_vars_to_evaluate variable configured for host {host.name} must be of type list")
keys_cleanup = list(set(keys_cleanup) - set(magic_vars_to_evaluate))
for key in keys_cleanup:
if key in hostvars:
del hostvars[key]
return hostvars
@staticmethod
def get_applicable_schemas(hostvars, smgr, declared_schema_ids, automap):
"""Get applicable schemas.
Search an explicit mapping to determine the schemas which should be used to validate hostvars
for a given host.
If an explicit mapping is not defined, correlate top level keys in the structured data with top
level properties in the schema to acquire applicable schemas.
Args:
hostvars (dict): dictionary of cleaned host vars which will be evaluated against schema
smgr (schema_enforcer.schemas.manager.SchemaManager): SchemaManager object
declared_schema_ids (list): A list of declared schema IDs inferred from schema_enforcer_schemas variable
automap (bool): Whether or not to use the `automap` feature to automatically map top level hostvar keys
to top level schema definition properties if no schema ids are declared (list of schema ids is empty)
Returns:
applicable_schemas (dict): dictionary mapping schema_id to schema obj for all applicable schemas
"""
applicable_schemas = {}
for key in hostvars.keys():
# extract applicable schema ID to JsonSchema objects if schema_ids are declared
if declared_schema_ids:
for schema_id in declared_schema_ids:
applicable_schemas[schema_id] = smgr.schemas[schema_id]
# extract applicable schema ID to JsonSchema objects based on host var to top level property mapping.
elif automap:
for schema in smgr.schemas.values():
if key in schema.top_level_properties:
applicable_schemas[schema.id] = schema
continue
return applicable_schemas
def get_schema_validation_settings(self, host):
"""Parse Ansible Schema Validation Settings from a host object.
Validate settings or ensure an error is raised in the event an invalid parameter is
configured in the host file.
Args:
host (AnsibleInventory.host): Ansible Inventory Host Object
Raises:
TypeError: Raised when one of the schema configuration parameters is of the wrong type
ValueError: Raised when one of the schema configuration parameters is incorrectly configured
Returns:
(dict): Dict of validation settings with keys "declared_schema_ids", "strict", and "automap"
"""
# Generate host_var and automatically remove all keys inserted by ansible
hostvars = self.get_host_vars(host)
# Extract declared_schema_ids from hostvar setting
declared_schema_ids = []
if "schema_enforcer_schema_ids" in hostvars:
if not isinstance(hostvars["schema_enforcer_schema_ids"], list):
raise TypeError(f"'schema_enforcer_schema_ids' attribute defined for {host.name} must be of type list")
declared_schema_ids = hostvars["schema_enforcer_schema_ids"]
# Extract whether to use a strict validator or a loose validator from hostvar setting
strict = False
if "schema_enforcer_strict" in hostvars:
if not isinstance(hostvars["schema_enforcer_strict"], bool):
raise TypeError(f"'schema_enforcer_strict' attribute defined for {host.name} must be of type bool")
strict = hostvars["schema_enforcer_strict"]
automap = True
if "schema_enforcer_automap_default" in hostvars:
if not isinstance(hostvars["schema_enforcer_automap_default"], bool):
raise TypeError(
f"'schema_enforcer_automap_default' attribute defined for {host.name} must be of type bool"
)
automap = hostvars["schema_enforcer_automap_default"]
# Raise error if settings are set incorrectly
if strict and not declared_schema_ids:
msg = (
f"The 'schema_enforcer_strict' parameter is set for {host.name} but the 'schema_enforcer_schema_ids' parameter does not declare a schema id. "
"The 'schema_enforcer_schema_ids' parameter MUST be defined as a list declaring only one schema ID if 'schema_enforcer_strict' is set."
)
raise ValueError(msg)
if strict and declared_schema_ids and len(declared_schema_ids) > 1:
msg = (
f"The 'schema_enforcer_strict' parameter is set for {host.name} but the 'schema_enforcer_schema_ids' parameter declares more than one schema id. "
"The 'schema_enforcer_schema_ids' parameter MUST be defined as a list declaring only one schema ID if 'schema_enforcer_strict' is set."
)
raise ValueError(msg)
return {
"declared_schema_ids": declared_schema_ids,
"strict": strict,
"automap": automap,
}
def print_schema_mapping(self, hosts, limit, smgr):
"""Print host to schema IDs mapping.
Args:
hosts (list): A list of ansible.inventory.host.Host objects for which the mapping should be printed
limit (str): The host to which to limit the search
smgr (schema_enforcer.schemas.manager.SchemaManager): Schema manager which handles schema objects
"""
print_dict = {}
for host in hosts:
if limit and host.name != limit:
continue
# Get hostvars
hostvars = self.get_clean_host_vars(host)
# Acquire validation settings for the given host
schema_validation_settings = self.get_schema_validation_settings(host)
declared_schema_ids = schema_validation_settings["declared_schema_ids"]
automap = schema_validation_settings["automap"]
# Validate declared schemas exist
smgr.validate_schemas_exist(declared_schema_ids)
# Acquire schemas applicable to the given host
applicable_schemas = self.get_applicable_schemas(hostvars, smgr, declared_schema_ids, automap)
# Add an element to the print dict for this host
print_dict[host.name] = list(applicable_schemas.keys())
if print_dict:
print("{:25} Schema ID".format("Ansible Host")) # pylint: disable=consider-using-f-string
print("-" * 80)
print_strings = []
for hostname, schema_ids in print_dict.items():
print_strings.append(f"{hostname:25} {schema_ids}")
print("\n".join(sorted(print_strings)))
| 0.777849 | 0.193795 |
import os
import os.path
import sys
from pathlib import Path
from typing import Dict, List, Optional
import toml
from pydantic import BaseSettings, ValidationError
SETTINGS = None
class Settings(BaseSettings): # pylint: disable=too-few-public-methods
"""Main Settings Class for the project.
The type of each setting is defined using Python annotations
and is validated when a config file is loaded with Pydantic.
Most input files specific to this project are expected to be located in the same directory. e.g.
schema/
- definitions
- schemas
"""
# Main directory names
main_directory: str = "schema"
definition_directory: str = "definitions"
schema_directory: str = "schemas"
validator_directory: str = "validators"
test_directory: str = "tests"
# Settings specific to the schema files
schema_file_extensions: List[str] = [".json", ".yaml", ".yml"] # Do we still need that ?
schema_file_exclude_filenames: List[str] = []
# settings specific to search and identify all instance file to validate
data_file_search_directories: List[str] = ["./"]
data_file_extensions: List[str] = [".json", ".yaml", ".yml"]
data_file_exclude_filenames: List[str] = [".yamllint.yml", ".travis.yml"]
data_file_automap: bool = True
ansible_inventory: Optional[str]
schema_mapping: Dict = {}
class Config: # pylint: disable=too-few-public-methods
"""Additional parameters to automatically map environment variable to some settings."""
fields = {
"main_directory": {"env": "jsonschema_directory"},
"definition_directory": {"env": "jsonschema_definition_directory"},
}
def load(config_file_name="pyproject.toml", config_data=None):
"""Load configuration.
Configuration is loaded from a file in pyproject.toml format that contains the settings,
or from a dictionary of those settings passed in as "config_data"
The settings for this app are expected to be in [tool.json_schema_testing] in TOML
if nothing is found in the config file or if the config file do not exist, the default values will be used.
config_data can be passed in to override the config_file_name. If this is done, a combination of the data
specified and the defaults for parameters not specified will be used, and settings in the config file will
be ignored.
Args:
config_file_name (str, optional): Name of the configuration file to load. Defaults to "pyproject.toml".
config_data (dict, optional): dict to load as the config file instead of reading the file. Defaults to None.
"""
global SETTINGS # pylint: disable=global-statement
if config_data:
SETTINGS = Settings(**config_data)
return
if os.path.exists(config_file_name):
config_string = Path(config_file_name).read_text(encoding="utf-8")
config_tmp = toml.loads(config_string)
if "tool" in config_tmp and "schema_enforcer" in config_tmp.get("tool", {}):
SETTINGS = Settings(**config_tmp["tool"]["schema_enforcer"])
return
SETTINGS = Settings()
def load_and_exit(config_file_name="pyproject.toml", config_data=None):
"""Calls load, but wraps it in a try except block.
This is done to handle a ValidationErorr which is raised when settings are specified but invalid.
In such cases, a message is printed to the screen indicating the settings which don't pass validation.
Args:
config_file_name (str, optional): [description]. Defaults to "pyproject.toml".
config_data (dict, optional): [description]. Defaults to None.
"""
try:
load(config_file_name=config_file_name, config_data=config_data)
except ValidationError as err:
print(f"Configuration not valid, found {len(err.errors())} error(s)")
for error in err.errors():
print(f" {'/'.join(error['loc'])} | {error['msg']} ({error['type']})")
sys.exit(1)
|
schema-enforcer
|
/schema_enforcer-1.2.2.tar.gz/schema_enforcer-1.2.2/schema_enforcer/config.py
|
config.py
|
import os
import os.path
import sys
from pathlib import Path
from typing import Dict, List, Optional
import toml
from pydantic import BaseSettings, ValidationError
SETTINGS = None
class Settings(BaseSettings): # pylint: disable=too-few-public-methods
"""Main Settings Class for the project.
The type of each setting is defined using Python annotations
and is validated when a config file is loaded with Pydantic.
Most input files specific to this project are expected to be located in the same directory. e.g.
schema/
- definitions
- schemas
"""
# Main directory names
main_directory: str = "schema"
definition_directory: str = "definitions"
schema_directory: str = "schemas"
validator_directory: str = "validators"
test_directory: str = "tests"
# Settings specific to the schema files
schema_file_extensions: List[str] = [".json", ".yaml", ".yml"] # Do we still need that ?
schema_file_exclude_filenames: List[str] = []
# settings specific to search and identify all instance file to validate
data_file_search_directories: List[str] = ["./"]
data_file_extensions: List[str] = [".json", ".yaml", ".yml"]
data_file_exclude_filenames: List[str] = [".yamllint.yml", ".travis.yml"]
data_file_automap: bool = True
ansible_inventory: Optional[str]
schema_mapping: Dict = {}
class Config: # pylint: disable=too-few-public-methods
"""Additional parameters to automatically map environment variable to some settings."""
fields = {
"main_directory": {"env": "jsonschema_directory"},
"definition_directory": {"env": "jsonschema_definition_directory"},
}
def load(config_file_name="pyproject.toml", config_data=None):
"""Load configuration.
Configuration is loaded from a file in pyproject.toml format that contains the settings,
or from a dictionary of those settings passed in as "config_data"
The settings for this app are expected to be in [tool.json_schema_testing] in TOML
if nothing is found in the config file or if the config file do not exist, the default values will be used.
config_data can be passed in to override the config_file_name. If this is done, a combination of the data
specified and the defaults for parameters not specified will be used, and settings in the config file will
be ignored.
Args:
config_file_name (str, optional): Name of the configuration file to load. Defaults to "pyproject.toml".
config_data (dict, optional): dict to load as the config file instead of reading the file. Defaults to None.
"""
global SETTINGS # pylint: disable=global-statement
if config_data:
SETTINGS = Settings(**config_data)
return
if os.path.exists(config_file_name):
config_string = Path(config_file_name).read_text(encoding="utf-8")
config_tmp = toml.loads(config_string)
if "tool" in config_tmp and "schema_enforcer" in config_tmp.get("tool", {}):
SETTINGS = Settings(**config_tmp["tool"]["schema_enforcer"])
return
SETTINGS = Settings()
def load_and_exit(config_file_name="pyproject.toml", config_data=None):
"""Calls load, but wraps it in a try except block.
This is done to handle a ValidationErorr which is raised when settings are specified but invalid.
In such cases, a message is printed to the screen indicating the settings which don't pass validation.
Args:
config_file_name (str, optional): [description]. Defaults to "pyproject.toml".
config_data (dict, optional): [description]. Defaults to None.
"""
try:
load(config_file_name=config_file_name, config_data=config_data)
except ValidationError as err:
print(f"Configuration not valid, found {len(err.errors())} error(s)")
for error in err.errors():
print(f" {'/'.join(error['loc'])} | {error['msg']} ({error['type']})")
sys.exit(1)
| 0.712432 | 0.334168 |
import sys
import click
from termcolor import colored
from schema_enforcer.utils import MutuallyExclusiveOption
from schema_enforcer import config
from schema_enforcer.schemas.manager import SchemaManager
from schema_enforcer.instances.file import InstanceFileManager
from schema_enforcer.utils import error
from schema_enforcer.exceptions import InvalidJSONSchema
@click.group()
def main():
"""SCHEMA ENFORCER.
This tool is used to ensure data adheres to a schema definition. The data can come
from YAML files, JSON files, or an Ansible inventory. The schema to which the data
should adhere can currently be defined using the JSONSchema language in YAML or JSON
format.
"""
@click.option("--show-pass", default=False, help="Shows validation checks that passed", is_flag=True, show_default=True)
@click.option(
"--strict",
default=False,
help="Forces a stricter schema check that warns about unexpected additional properties",
is_flag=True,
show_default=True,
)
@click.option(
"--show-checks",
default=False,
help="Shows the schemas to be checked for each structured data file",
is_flag=True,
show_default=True,
)
@main.command()
def validate(show_pass, show_checks, strict): # noqa D205
"""Validates instance files against defined schema.
\f
Args:
show_pass (bool): show successful schema validations
show_checks (bool): show schemas which will be validated against each instance file
strict (bool): Forces a stricter schema check that warns about unexpected additional properties
"""
config.load()
# ---------------------------------------------------------------------
# Load Schema(s) from disk
# ---------------------------------------------------------------------
try:
smgr = SchemaManager(config=config.SETTINGS)
except InvalidJSONSchema as exc:
error(str(exc))
sys.exit(1)
if not smgr.schemas:
error("No schemas were loaded")
sys.exit(1)
# ---------------------------------------------------------------------
# Load Instances
# ---------------------------------------------------------------------
ifm = InstanceFileManager(config=config.SETTINGS)
if not ifm.instances:
error("No instance files were found to validate")
sys.exit(1)
if config.SETTINGS.data_file_automap:
ifm.add_matches_by_property_automap(smgr)
if show_checks:
ifm.print_schema_mapping()
sys.exit(0)
error_exists = False
for instance in ifm.instances:
for result in instance.validate(smgr, strict):
result.instance_type = "FILE"
result.instance_name = instance.filename
result.instance_location = instance.path
if not result.passed():
error_exists = True
result.print()
elif result.passed() and show_pass:
result.print()
if not error_exists:
print(colored("ALL SCHEMA VALIDATION CHECKS PASSED", "green"))
else:
sys.exit(1)
@click.option(
"--list",
"list_schemas",
default=False,
cls=MutuallyExclusiveOption,
mutually_exclusive=["generate_invalid", "check", "schema-id", "dump"],
help="List all available schemas",
is_flag=True,
)
@click.option(
"--dump",
"dump_schemas",
default=False,
cls=MutuallyExclusiveOption,
mutually_exclusive=["generate_invalid", "check", "list"],
help="Dump full schema for all schemas or schema-id",
is_flag=True,
)
@click.option(
"--check",
default=False,
cls=MutuallyExclusiveOption,
mutually_exclusive=["generate_invalid", "list", "dump"],
help="Validates that all schemas are valid (spec and unit tests)",
is_flag=True,
)
@click.option(
"--generate-invalid",
default=False,
cls=MutuallyExclusiveOption,
mutually_exclusive=["check", "list", "dump"],
help="Generates expected invalid result from a given schema [--schema-id] and data defined in a data file",
is_flag=True,
)
@click.option(
"--schema-id", default=None, cls=MutuallyExclusiveOption, mutually_exclusive=["list"], help="The name of a schema."
)
@main.command()
def schema(check, generate_invalid, list_schemas, schema_id, dump_schemas): # noqa: D417,D301,D205
"""Manage your schemas.
\f
Args:
check (bool): Validates that all schemas are valid (spec and unit tests)
generate_invalid (bool): Generates expected invalid data from a given schema
list_schemas (bool): List all available schemas
schema_id (str): Name of schema to evaluate
dump_schemas (bool): Dump all schema data or a single schema if schema_id is provided
"""
if not check and not generate_invalid and not list_schemas and not schema_id and not dump_schemas:
error(
"The 'schema' command requires one or more arguments. You can run the command 'schema-enforcer schema --help' to see the arguments available."
)
sys.exit(1)
config.load()
# ---------------------------------------------------------------------
# Load Schema(s) from disk
# ---------------------------------------------------------------------
try:
smgr = SchemaManager(config=config.SETTINGS)
except InvalidJSONSchema as exc:
error(str(exc))
sys.exit(1)
if not smgr.schemas:
error("No schemas were loaded")
sys.exit(1)
if list_schemas:
smgr.print_schemas_list()
sys.exit(0)
if dump_schemas:
smgr.dump_schema(schema_id)
sys.exit(0)
if generate_invalid:
if not schema_id:
sys.exit("Please indicate the schema you'd like to generate invalid data for using the --schema-id flag")
smgr.generate_invalid_tests_expected(schema_id=schema_id)
sys.exit(0)
if check:
smgr.test_schemas()
sys.exit(0)
@main.command()
@click.option("--inventory", "-i", help="Ansible inventory file.", required=False)
@click.option("--host", "-h", "limit", help="Limit the execution to a single host.", required=False)
@click.option("--show-pass", default=False, help="Shows validation checks that passed", is_flag=True, show_default=True)
@click.option(
"--show-checks",
default=False,
help="Shows the schemas to be checked for each ansible host",
is_flag=True,
show_default=True,
)
def ansible(
inventory, limit, show_pass, show_checks
): # pylint: disable=too-many-branches,too-many-locals,too-many-locals,too-many-statements # noqa: D417,D301
"""Validate the hostvars for all hosts within an Ansible inventory.
The hostvars are dynamically rendered based on groups to which each host belongs.
For each host, if a variable `schema_enforcer_schema_ids` is defined, it will be used
to determine which schemas should be use to validate each key. If this variable is
not defined, the hostvars top level keys will be automatically mapped to a schema
definition's top level properties to automatically infer which schema should be used
to validate which hostvar.
\f
Args:
inventory (string): The name of the file used to construct an ansible inventory.
limit (string, None): Name of a host to limit the execution to.
show_pass (bool): Shows validation checks that pass. Defaults to False.
show_checks (bool): Shows the schema ids each host will be evaluated against.
Example:
$ cd examples/ansible
$ ls -ls
total 8
drwxr-xr-x 5 damien staff 160B Jul 25 16:37 group_vars
drwxr-xr-x 4 damien staff 128B Jul 25 16:37 host_vars
-rw-r--r-- 1 damien staff 69B Jul 25 16:37 inventory.ini
drwxr-xr-x 4 damien staff 128B Jul 25 16:37 schema
$ schema-enforcer ansible -i inventory.ini
Found 4 hosts in the inventory
FAIL | [ERROR] False is not of type 'string' [HOST] spine1 [PROPERTY] dns_servers:0:address
FAIL | [ERROR] False is not of type 'string' [HOST] spine2 [PROPERTY] dns_servers:0:address
$ schema-enforcer ansible -i inventory.ini -h leaf1
Found 4 hosts in the inventory
ALL SCHEMA VALIDATION CHECKS PASSED
$ schema-enforcer ansible -i inventory.ini -h spine1 --show-pass
Found 4 hosts in the inventory
FAIL | [ERROR] False is not of type 'string' [HOST] spine1 [PROPERTY] dns_servers:0:address
PASS | [HOST] spine1 [SCHEMA ID] schemas/interfaces
"""
# Ansible is currently always installed by schema-enforcer. This was added in the interest of making ansible an
# optional dependency. We decided to make two separate packages installable via PyPi, one with ansible, one without.
# This has been left in the code until such a time as we implement the change to two packages so code will not need
# to be re-written/
try:
from schema_enforcer.ansible_inventory import AnsibleInventory # pylint: disable=import-outside-toplevel
except ModuleNotFoundError:
error(
"ansible package not found, you can run the command 'pip install schema-enforcer[ansible]' to install the latest schema-enforcer sanctioned version."
)
sys.exit(1)
if inventory:
config.load(config_data={"ansible_inventory": inventory})
else:
config.load()
# ---------------------------------------------------------------------
# Load Schema(s) from disk
# ---------------------------------------------------------------------
try:
smgr = SchemaManager(config=config.SETTINGS)
except InvalidJSONSchema as exc:
error(str(exc))
sys.exit(1)
if not smgr.schemas:
error("No schemas were loaded")
sys.exit(1)
# ---------------------------------------------------------------------
# Load Ansible Inventory file
# - generate hostvar for all devices in the inventory
# - Validate Each key in the hostvar individually against the schemas defined in the var jsonschema_mapping
# ---------------------------------------------------------------------
inv = AnsibleInventory(inventory=config.SETTINGS.ansible_inventory)
hosts = inv.get_hosts_containing()
print(f"Found {len(hosts)} hosts in the inventory")
if show_checks:
inv.print_schema_mapping(hosts, limit, smgr)
sys.exit(0)
error_exists = False
for host in hosts:
if limit and host.name != limit:
continue
# Acquire Host Variables
hostvars = inv.get_clean_host_vars(host)
# Acquire validation settings for the given host
schema_validation_settings = inv.get_schema_validation_settings(host)
declared_schema_ids = schema_validation_settings["declared_schema_ids"]
strict = schema_validation_settings["strict"]
automap = schema_validation_settings["automap"]
# Validate declared schemas exist
smgr.validate_schemas_exist(declared_schema_ids)
# Acquire schemas applicable to the given host
applicable_schemas = inv.get_applicable_schemas(hostvars, smgr, declared_schema_ids, automap)
for schema_obj in applicable_schemas.values():
# Combine host attributes into a single data structure matching to properties defined at the top level of the schema definition
if not strict:
data = {}
for var in schema_obj.top_level_properties:
data.update({var: hostvars.get(var)})
# If the schema_enforcer_strict bool is set, hostvars should match a single schema exactly.
# Thus, we want to pass the entirety of the cleaned host vars into the validate method rather
# than creating a data structure with only the top level vars defined by the schema.
else:
data = hostvars
# Validate host vars against schema
schema_obj.validate(data=data, strict=strict)
for result in schema_obj.get_results():
result.instance_type = "HOST"
result.instance_hostname = host.name
if not result.passed():
error_exists = True
result.print()
elif result.passed() and show_pass:
result.print()
schema_obj.clear_results()
if not error_exists:
print(colored("ALL SCHEMA VALIDATION CHECKS PASSED", "green"))
else:
sys.exit(1)
|
schema-enforcer
|
/schema_enforcer-1.2.2.tar.gz/schema_enforcer-1.2.2/schema_enforcer/cli.py
|
cli.py
|
import sys
import click
from termcolor import colored
from schema_enforcer.utils import MutuallyExclusiveOption
from schema_enforcer import config
from schema_enforcer.schemas.manager import SchemaManager
from schema_enforcer.instances.file import InstanceFileManager
from schema_enforcer.utils import error
from schema_enforcer.exceptions import InvalidJSONSchema
@click.group()
def main():
"""SCHEMA ENFORCER.
This tool is used to ensure data adheres to a schema definition. The data can come
from YAML files, JSON files, or an Ansible inventory. The schema to which the data
should adhere can currently be defined using the JSONSchema language in YAML or JSON
format.
"""
@click.option("--show-pass", default=False, help="Shows validation checks that passed", is_flag=True, show_default=True)
@click.option(
"--strict",
default=False,
help="Forces a stricter schema check that warns about unexpected additional properties",
is_flag=True,
show_default=True,
)
@click.option(
"--show-checks",
default=False,
help="Shows the schemas to be checked for each structured data file",
is_flag=True,
show_default=True,
)
@main.command()
def validate(show_pass, show_checks, strict): # noqa D205
"""Validates instance files against defined schema.
\f
Args:
show_pass (bool): show successful schema validations
show_checks (bool): show schemas which will be validated against each instance file
strict (bool): Forces a stricter schema check that warns about unexpected additional properties
"""
config.load()
# ---------------------------------------------------------------------
# Load Schema(s) from disk
# ---------------------------------------------------------------------
try:
smgr = SchemaManager(config=config.SETTINGS)
except InvalidJSONSchema as exc:
error(str(exc))
sys.exit(1)
if not smgr.schemas:
error("No schemas were loaded")
sys.exit(1)
# ---------------------------------------------------------------------
# Load Instances
# ---------------------------------------------------------------------
ifm = InstanceFileManager(config=config.SETTINGS)
if not ifm.instances:
error("No instance files were found to validate")
sys.exit(1)
if config.SETTINGS.data_file_automap:
ifm.add_matches_by_property_automap(smgr)
if show_checks:
ifm.print_schema_mapping()
sys.exit(0)
error_exists = False
for instance in ifm.instances:
for result in instance.validate(smgr, strict):
result.instance_type = "FILE"
result.instance_name = instance.filename
result.instance_location = instance.path
if not result.passed():
error_exists = True
result.print()
elif result.passed() and show_pass:
result.print()
if not error_exists:
print(colored("ALL SCHEMA VALIDATION CHECKS PASSED", "green"))
else:
sys.exit(1)
@click.option(
"--list",
"list_schemas",
default=False,
cls=MutuallyExclusiveOption,
mutually_exclusive=["generate_invalid", "check", "schema-id", "dump"],
help="List all available schemas",
is_flag=True,
)
@click.option(
"--dump",
"dump_schemas",
default=False,
cls=MutuallyExclusiveOption,
mutually_exclusive=["generate_invalid", "check", "list"],
help="Dump full schema for all schemas or schema-id",
is_flag=True,
)
@click.option(
"--check",
default=False,
cls=MutuallyExclusiveOption,
mutually_exclusive=["generate_invalid", "list", "dump"],
help="Validates that all schemas are valid (spec and unit tests)",
is_flag=True,
)
@click.option(
"--generate-invalid",
default=False,
cls=MutuallyExclusiveOption,
mutually_exclusive=["check", "list", "dump"],
help="Generates expected invalid result from a given schema [--schema-id] and data defined in a data file",
is_flag=True,
)
@click.option(
"--schema-id", default=None, cls=MutuallyExclusiveOption, mutually_exclusive=["list"], help="The name of a schema."
)
@main.command()
def schema(check, generate_invalid, list_schemas, schema_id, dump_schemas): # noqa: D417,D301,D205
"""Manage your schemas.
\f
Args:
check (bool): Validates that all schemas are valid (spec and unit tests)
generate_invalid (bool): Generates expected invalid data from a given schema
list_schemas (bool): List all available schemas
schema_id (str): Name of schema to evaluate
dump_schemas (bool): Dump all schema data or a single schema if schema_id is provided
"""
if not check and not generate_invalid and not list_schemas and not schema_id and not dump_schemas:
error(
"The 'schema' command requires one or more arguments. You can run the command 'schema-enforcer schema --help' to see the arguments available."
)
sys.exit(1)
config.load()
# ---------------------------------------------------------------------
# Load Schema(s) from disk
# ---------------------------------------------------------------------
try:
smgr = SchemaManager(config=config.SETTINGS)
except InvalidJSONSchema as exc:
error(str(exc))
sys.exit(1)
if not smgr.schemas:
error("No schemas were loaded")
sys.exit(1)
if list_schemas:
smgr.print_schemas_list()
sys.exit(0)
if dump_schemas:
smgr.dump_schema(schema_id)
sys.exit(0)
if generate_invalid:
if not schema_id:
sys.exit("Please indicate the schema you'd like to generate invalid data for using the --schema-id flag")
smgr.generate_invalid_tests_expected(schema_id=schema_id)
sys.exit(0)
if check:
smgr.test_schemas()
sys.exit(0)
@main.command()
@click.option("--inventory", "-i", help="Ansible inventory file.", required=False)
@click.option("--host", "-h", "limit", help="Limit the execution to a single host.", required=False)
@click.option("--show-pass", default=False, help="Shows validation checks that passed", is_flag=True, show_default=True)
@click.option(
"--show-checks",
default=False,
help="Shows the schemas to be checked for each ansible host",
is_flag=True,
show_default=True,
)
def ansible(
inventory, limit, show_pass, show_checks
): # pylint: disable=too-many-branches,too-many-locals,too-many-locals,too-many-statements # noqa: D417,D301
"""Validate the hostvars for all hosts within an Ansible inventory.
The hostvars are dynamically rendered based on groups to which each host belongs.
For each host, if a variable `schema_enforcer_schema_ids` is defined, it will be used
to determine which schemas should be use to validate each key. If this variable is
not defined, the hostvars top level keys will be automatically mapped to a schema
definition's top level properties to automatically infer which schema should be used
to validate which hostvar.
\f
Args:
inventory (string): The name of the file used to construct an ansible inventory.
limit (string, None): Name of a host to limit the execution to.
show_pass (bool): Shows validation checks that pass. Defaults to False.
show_checks (bool): Shows the schema ids each host will be evaluated against.
Example:
$ cd examples/ansible
$ ls -ls
total 8
drwxr-xr-x 5 damien staff 160B Jul 25 16:37 group_vars
drwxr-xr-x 4 damien staff 128B Jul 25 16:37 host_vars
-rw-r--r-- 1 damien staff 69B Jul 25 16:37 inventory.ini
drwxr-xr-x 4 damien staff 128B Jul 25 16:37 schema
$ schema-enforcer ansible -i inventory.ini
Found 4 hosts in the inventory
FAIL | [ERROR] False is not of type 'string' [HOST] spine1 [PROPERTY] dns_servers:0:address
FAIL | [ERROR] False is not of type 'string' [HOST] spine2 [PROPERTY] dns_servers:0:address
$ schema-enforcer ansible -i inventory.ini -h leaf1
Found 4 hosts in the inventory
ALL SCHEMA VALIDATION CHECKS PASSED
$ schema-enforcer ansible -i inventory.ini -h spine1 --show-pass
Found 4 hosts in the inventory
FAIL | [ERROR] False is not of type 'string' [HOST] spine1 [PROPERTY] dns_servers:0:address
PASS | [HOST] spine1 [SCHEMA ID] schemas/interfaces
"""
# Ansible is currently always installed by schema-enforcer. This was added in the interest of making ansible an
# optional dependency. We decided to make two separate packages installable via PyPi, one with ansible, one without.
# This has been left in the code until such a time as we implement the change to two packages so code will not need
# to be re-written/
try:
from schema_enforcer.ansible_inventory import AnsibleInventory # pylint: disable=import-outside-toplevel
except ModuleNotFoundError:
error(
"ansible package not found, you can run the command 'pip install schema-enforcer[ansible]' to install the latest schema-enforcer sanctioned version."
)
sys.exit(1)
if inventory:
config.load(config_data={"ansible_inventory": inventory})
else:
config.load()
# ---------------------------------------------------------------------
# Load Schema(s) from disk
# ---------------------------------------------------------------------
try:
smgr = SchemaManager(config=config.SETTINGS)
except InvalidJSONSchema as exc:
error(str(exc))
sys.exit(1)
if not smgr.schemas:
error("No schemas were loaded")
sys.exit(1)
# ---------------------------------------------------------------------
# Load Ansible Inventory file
# - generate hostvar for all devices in the inventory
# - Validate Each key in the hostvar individually against the schemas defined in the var jsonschema_mapping
# ---------------------------------------------------------------------
inv = AnsibleInventory(inventory=config.SETTINGS.ansible_inventory)
hosts = inv.get_hosts_containing()
print(f"Found {len(hosts)} hosts in the inventory")
if show_checks:
inv.print_schema_mapping(hosts, limit, smgr)
sys.exit(0)
error_exists = False
for host in hosts:
if limit and host.name != limit:
continue
# Acquire Host Variables
hostvars = inv.get_clean_host_vars(host)
# Acquire validation settings for the given host
schema_validation_settings = inv.get_schema_validation_settings(host)
declared_schema_ids = schema_validation_settings["declared_schema_ids"]
strict = schema_validation_settings["strict"]
automap = schema_validation_settings["automap"]
# Validate declared schemas exist
smgr.validate_schemas_exist(declared_schema_ids)
# Acquire schemas applicable to the given host
applicable_schemas = inv.get_applicable_schemas(hostvars, smgr, declared_schema_ids, automap)
for schema_obj in applicable_schemas.values():
# Combine host attributes into a single data structure matching to properties defined at the top level of the schema definition
if not strict:
data = {}
for var in schema_obj.top_level_properties:
data.update({var: hostvars.get(var)})
# If the schema_enforcer_strict bool is set, hostvars should match a single schema exactly.
# Thus, we want to pass the entirety of the cleaned host vars into the validate method rather
# than creating a data structure with only the top level vars defined by the schema.
else:
data = hostvars
# Validate host vars against schema
schema_obj.validate(data=data, strict=strict)
for result in schema_obj.get_results():
result.instance_type = "HOST"
result.instance_hostname = host.name
if not result.passed():
error_exists = True
result.print()
elif result.passed() and show_pass:
result.print()
schema_obj.clear_results()
if not error_exists:
print(colored("ALL SCHEMA VALIDATION CHECKS PASSED", "green"))
else:
sys.exit(1)
| 0.483892 | 0.30641 |
import copy
import json
import os
from functools import cached_property
from jsonschema import Draft7Validator # pylint: disable=import-self
from schema_enforcer.schemas.validator import BaseValidation
from schema_enforcer.validation import ValidationResult, RESULT_FAIL, RESULT_PASS
class JsonSchema(BaseValidation): # pylint: disable=too-many-instance-attributes
"""class to manage jsonschema type schemas."""
schematype = "jsonchema"
def __init__(self, schema, filename, root):
"""Initilize a new JsonSchema object from a dict.
Args:
schema (dict): Data representing the schema. Must be jsonschema valid.
filename (string): Name of the schema file on the filesystem.
root (string): Absolute path to the directory where the schema file is located.
"""
super().__init__()
self.filename = filename
self.root = root
self.data = schema
self.id = self.data.get("$id") # pylint: disable=invalid-name
self.top_level_properties = set(self.data.get("properties"))
self.validator = None
self.strict_validator = None
self.format_checker = Draft7Validator.FORMAT_CHECKER
@cached_property
def v7_schema(self):
"""Draft7 Schema."""
local_dirname = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(local_dirname, "draft7_schema.json"), encoding="utf-8") as fhd:
v7_schema = json.loads(fhd.read())
return v7_schema
def get_id(self):
"""Return the unique ID of the schema."""
return self.id
def validate(self, data, strict=False):
"""Validate a given data with this schema.
Args:
data (dict, list): Data to validate against the schema.
strict (bool, optional): if True the validation will automatically flag additional properties. Defaults to False.
Returns:
Iterator: Iterator of ValidationResult
"""
if strict:
validator = self.__get_strict_validator()
else:
validator = self.__get_validator()
has_error = False
for err in validator.iter_errors(data):
has_error = True
self.add_validation_error(err.message, absolute_path=list(err.absolute_path))
if not has_error:
self.add_validation_pass()
return self.get_results()
def validate_to_dict(self, data, strict=False):
"""Return a list of ValidationResult objects.
These are generated with the validate() function in dict() format instead of as a Python Object.
Args:
data (dict, list): Data to validate against the schema.
strict (bool, optional): if True the validation will automatically flag additional properties. Defaults to False.
Returns:
list of dictionnaries containing the results.
"""
return [
result.dict(exclude_unset=True, exclude_none=True) for result in self.validate(data=data, strict=strict)
]
def __get_validator(self):
"""Return the validator for this schema, create if it doesn't exist already.
Returns:
Draft7Validator: The validator for this schema.
"""
if self.validator:
return self.validator
self.validator = Draft7Validator(self.data, format_checker=self.format_checker)
return self.validator
def __get_strict_validator(self):
"""Return a strict version of the Validator, create it if it doesn't exist already.
To create a strict version of the schema, this function adds `additionalProperties` to all objects in the schema.
Returns:
Draft7Validator: Validator for this schema in strict mode.
"""
# TODO Currently the function is only modifying the top level object, need to add that to all objects recursively
if self.strict_validator:
return self.strict_validator
# Create a copy if the schema first and modify it to insert `additionalProperties`
schema = copy.deepcopy(self.data)
if schema.get("additionalProperties", False) is not False:
print(f"{schema['$id']}: Overriding existing additionalProperties: {schema['additionalProperties']}")
schema["additionalProperties"] = False
# TODO This should be recursive, e.g. all sub-objects, currently it only goes one level deep, look in jsonschema for utilitiies
for prop_name, prop in schema.get("properties", {}).items():
items = prop.get("items", {})
if items.get("type") == "object":
if items.get("additionalProperties", False) is not False:
print(
f"{schema['$id']}: Overriding item {prop_name}.additionalProperties: {items['additionalProperties']}"
)
items["additionalProperties"] = False
self.strict_validator = Draft7Validator(schema, format_checker=self.format_checker)
return self.strict_validator
def check_if_valid(self):
"""Check if the schema definition is valid against JsonSchema draft7.
Returns:
List[ValidationResult]: A list of validation result objects.
"""
validator = Draft7Validator(self.v7_schema, format_checker=self.format_checker)
results = []
has_error = False
for err in validator.iter_errors(self.data):
has_error = True
results.append(
ValidationResult(
schema_id=self.id,
result=RESULT_FAIL,
message=err.message,
absolute_path=list(err.absolute_path),
instance_type="SCHEMA",
instance_name=self.id,
instance_location="",
)
)
if not has_error:
results.append(
ValidationResult(
schema_id=self.id,
result=RESULT_PASS,
instance_type="SCHEMA",
instance_name=self.id,
instance_location="",
)
)
return results
|
schema-enforcer
|
/schema_enforcer-1.2.2.tar.gz/schema_enforcer-1.2.2/schema_enforcer/schemas/jsonschema.py
|
jsonschema.py
|
import copy
import json
import os
from functools import cached_property
from jsonschema import Draft7Validator # pylint: disable=import-self
from schema_enforcer.schemas.validator import BaseValidation
from schema_enforcer.validation import ValidationResult, RESULT_FAIL, RESULT_PASS
class JsonSchema(BaseValidation): # pylint: disable=too-many-instance-attributes
"""class to manage jsonschema type schemas."""
schematype = "jsonchema"
def __init__(self, schema, filename, root):
"""Initilize a new JsonSchema object from a dict.
Args:
schema (dict): Data representing the schema. Must be jsonschema valid.
filename (string): Name of the schema file on the filesystem.
root (string): Absolute path to the directory where the schema file is located.
"""
super().__init__()
self.filename = filename
self.root = root
self.data = schema
self.id = self.data.get("$id") # pylint: disable=invalid-name
self.top_level_properties = set(self.data.get("properties"))
self.validator = None
self.strict_validator = None
self.format_checker = Draft7Validator.FORMAT_CHECKER
@cached_property
def v7_schema(self):
"""Draft7 Schema."""
local_dirname = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(local_dirname, "draft7_schema.json"), encoding="utf-8") as fhd:
v7_schema = json.loads(fhd.read())
return v7_schema
def get_id(self):
"""Return the unique ID of the schema."""
return self.id
def validate(self, data, strict=False):
"""Validate a given data with this schema.
Args:
data (dict, list): Data to validate against the schema.
strict (bool, optional): if True the validation will automatically flag additional properties. Defaults to False.
Returns:
Iterator: Iterator of ValidationResult
"""
if strict:
validator = self.__get_strict_validator()
else:
validator = self.__get_validator()
has_error = False
for err in validator.iter_errors(data):
has_error = True
self.add_validation_error(err.message, absolute_path=list(err.absolute_path))
if not has_error:
self.add_validation_pass()
return self.get_results()
def validate_to_dict(self, data, strict=False):
"""Return a list of ValidationResult objects.
These are generated with the validate() function in dict() format instead of as a Python Object.
Args:
data (dict, list): Data to validate against the schema.
strict (bool, optional): if True the validation will automatically flag additional properties. Defaults to False.
Returns:
list of dictionnaries containing the results.
"""
return [
result.dict(exclude_unset=True, exclude_none=True) for result in self.validate(data=data, strict=strict)
]
def __get_validator(self):
"""Return the validator for this schema, create if it doesn't exist already.
Returns:
Draft7Validator: The validator for this schema.
"""
if self.validator:
return self.validator
self.validator = Draft7Validator(self.data, format_checker=self.format_checker)
return self.validator
def __get_strict_validator(self):
"""Return a strict version of the Validator, create it if it doesn't exist already.
To create a strict version of the schema, this function adds `additionalProperties` to all objects in the schema.
Returns:
Draft7Validator: Validator for this schema in strict mode.
"""
# TODO Currently the function is only modifying the top level object, need to add that to all objects recursively
if self.strict_validator:
return self.strict_validator
# Create a copy if the schema first and modify it to insert `additionalProperties`
schema = copy.deepcopy(self.data)
if schema.get("additionalProperties", False) is not False:
print(f"{schema['$id']}: Overriding existing additionalProperties: {schema['additionalProperties']}")
schema["additionalProperties"] = False
# TODO This should be recursive, e.g. all sub-objects, currently it only goes one level deep, look in jsonschema for utilitiies
for prop_name, prop in schema.get("properties", {}).items():
items = prop.get("items", {})
if items.get("type") == "object":
if items.get("additionalProperties", False) is not False:
print(
f"{schema['$id']}: Overriding item {prop_name}.additionalProperties: {items['additionalProperties']}"
)
items["additionalProperties"] = False
self.strict_validator = Draft7Validator(schema, format_checker=self.format_checker)
return self.strict_validator
def check_if_valid(self):
"""Check if the schema definition is valid against JsonSchema draft7.
Returns:
List[ValidationResult]: A list of validation result objects.
"""
validator = Draft7Validator(self.v7_schema, format_checker=self.format_checker)
results = []
has_error = False
for err in validator.iter_errors(self.data):
has_error = True
results.append(
ValidationResult(
schema_id=self.id,
result=RESULT_FAIL,
message=err.message,
absolute_path=list(err.absolute_path),
instance_type="SCHEMA",
instance_name=self.id,
instance_location="",
)
)
if not has_error:
results.append(
ValidationResult(
schema_id=self.id,
result=RESULT_PASS,
instance_type="SCHEMA",
instance_name=self.id,
instance_location="",
)
)
return results
| 0.685634 | 0.183667 |
# pylint: disable=no-member, too-few-public-methods
# See PEP585 (https://www.python.org/dev/peps/pep-0585/)
from __future__ import annotations
import pkgutil
import inspect
import jmespath
from schema_enforcer.validation import ValidationResult
class BaseValidation:
"""Base class for Validation classes."""
def __init__(self):
"""Base init for all validation classes."""
self._results: list[ValidationResult] = []
def add_validation_error(self, message: str, **kwargs):
"""Add validator error to results.
Args:
message (str): error message
kwargs (optional): additional arguments to add to ValidationResult when required
"""
self._results.append(ValidationResult(result="FAIL", schema_id=self.id, message=message, **kwargs))
def add_validation_pass(self, **kwargs):
"""Add validator pass to results.
Args:
kwargs (optional): additional arguments to add to ValidationResult when required
"""
self._results.append(ValidationResult(result="PASS", schema_id=self.id, **kwargs))
def get_results(self) -> list[ValidationResult]:
"""Return all validation results for this validator."""
if not self._results:
self._results.append(ValidationResult(result="PASS", schema_id=self.id))
return self._results
def clear_results(self):
"""Reset results for validator instance."""
self._results = []
def validate(self, data: dict, strict: bool):
"""Required function for custom validator.
Args:
data (dict): variables to be validated by validator
strict (bool): true when --strict cli option is used to request strict validation (if provided)
Returns:
None
Use add_validation_error and add_validation_pass to report results.
"""
raise NotImplementedError
class JmesPathModelValidation(BaseValidation):
"""Base class for JmesPathModelValidation classes."""
def validate(self, data: dict, strict: bool): # pylint: disable=W0613
"""Validate data using custom jmespath validator plugin."""
operators = {
"gt": lambda r, v: int(r) > int(v),
"gte": lambda r, v: int(r) >= int(v),
"eq": lambda r, v: r == v,
"lt": lambda r, v: int(r) < int(v),
"lte": lambda r, v: int(r) <= int(v),
"contains": lambda r, v: v in r,
}
lhs = jmespath.search(self.left, data)
valid = True
if lhs:
# Check rhs for compiled jmespath expression
if isinstance(self.right, jmespath.parser.ParsedResult):
rhs = self.right.search(data)
else:
rhs = self.right
valid = operators[self.operator](lhs, rhs)
if not valid:
self.add_validation_error(self.error)
def is_validator(obj) -> bool:
"""Returns True if the object is a BaseValidation or JmesPathModelValidation subclass."""
try:
return issubclass(obj, BaseValidation) and obj not in (JmesPathModelValidation, BaseValidation)
except TypeError:
return False
def load_validators(validator_path: str) -> dict[str, BaseValidation]:
"""Load all validator plugins from validator_path."""
validators = {}
for importer, module_name, _ in pkgutil.iter_modules([validator_path]):
module = importer.find_module(module_name).load_module(module_name)
for name, cls in inspect.getmembers(module, is_validator):
# Default to class name if id doesn't exist
if not hasattr(cls, "id"):
cls.id = name
if cls.id in validators:
print(
f"Unable to load the validator {cls.id}, there is already a validator with the same name ({name})."
)
else:
validators[cls.id] = cls()
return validators
|
schema-enforcer
|
/schema_enforcer-1.2.2.tar.gz/schema_enforcer-1.2.2/schema_enforcer/schemas/validator.py
|
validator.py
|
# pylint: disable=no-member, too-few-public-methods
# See PEP585 (https://www.python.org/dev/peps/pep-0585/)
from __future__ import annotations
import pkgutil
import inspect
import jmespath
from schema_enforcer.validation import ValidationResult
class BaseValidation:
"""Base class for Validation classes."""
def __init__(self):
"""Base init for all validation classes."""
self._results: list[ValidationResult] = []
def add_validation_error(self, message: str, **kwargs):
"""Add validator error to results.
Args:
message (str): error message
kwargs (optional): additional arguments to add to ValidationResult when required
"""
self._results.append(ValidationResult(result="FAIL", schema_id=self.id, message=message, **kwargs))
def add_validation_pass(self, **kwargs):
"""Add validator pass to results.
Args:
kwargs (optional): additional arguments to add to ValidationResult when required
"""
self._results.append(ValidationResult(result="PASS", schema_id=self.id, **kwargs))
def get_results(self) -> list[ValidationResult]:
"""Return all validation results for this validator."""
if not self._results:
self._results.append(ValidationResult(result="PASS", schema_id=self.id))
return self._results
def clear_results(self):
"""Reset results for validator instance."""
self._results = []
def validate(self, data: dict, strict: bool):
"""Required function for custom validator.
Args:
data (dict): variables to be validated by validator
strict (bool): true when --strict cli option is used to request strict validation (if provided)
Returns:
None
Use add_validation_error and add_validation_pass to report results.
"""
raise NotImplementedError
class JmesPathModelValidation(BaseValidation):
"""Base class for JmesPathModelValidation classes."""
def validate(self, data: dict, strict: bool): # pylint: disable=W0613
"""Validate data using custom jmespath validator plugin."""
operators = {
"gt": lambda r, v: int(r) > int(v),
"gte": lambda r, v: int(r) >= int(v),
"eq": lambda r, v: r == v,
"lt": lambda r, v: int(r) < int(v),
"lte": lambda r, v: int(r) <= int(v),
"contains": lambda r, v: v in r,
}
lhs = jmespath.search(self.left, data)
valid = True
if lhs:
# Check rhs for compiled jmespath expression
if isinstance(self.right, jmespath.parser.ParsedResult):
rhs = self.right.search(data)
else:
rhs = self.right
valid = operators[self.operator](lhs, rhs)
if not valid:
self.add_validation_error(self.error)
def is_validator(obj) -> bool:
"""Returns True if the object is a BaseValidation or JmesPathModelValidation subclass."""
try:
return issubclass(obj, BaseValidation) and obj not in (JmesPathModelValidation, BaseValidation)
except TypeError:
return False
def load_validators(validator_path: str) -> dict[str, BaseValidation]:
"""Load all validator plugins from validator_path."""
validators = {}
for importer, module_name, _ in pkgutil.iter_modules([validator_path]):
module = importer.find_module(module_name).load_module(module_name)
for name, cls in inspect.getmembers(module, is_validator):
# Default to class name if id doesn't exist
if not hasattr(cls, "id"):
cls.id = name
if cls.id in validators:
print(
f"Unable to load the validator {cls.id}, there is already a validator with the same name ({name})."
)
else:
validators[cls.id] = cls()
return validators
| 0.865835 | 0.21819 |
import os
import sys
import json
import jsonref
from termcolor import colored
from rich.console import Console
from rich.table import Table
from schema_enforcer.utils import load_file, find_file, find_files, dump_data_to_yaml
from schema_enforcer.validation import ValidationResult, RESULT_PASS, RESULT_FAIL
from schema_enforcer.exceptions import SchemaNotDefined, InvalidJSONSchema
from schema_enforcer.utils import error, warn
from schema_enforcer.schemas.jsonschema import JsonSchema
from schema_enforcer.schemas.validator import load_validators
class SchemaManager:
"""The SchemaManager class is designed to load and organaized all the schemas."""
def __init__(self, config):
"""Initialize the SchemaManager and search for all schema files in the schema_directories.
Args:
config (Config): Instance of Config object returned by schema_enforcer.config.load() method.
"""
self.schemas = {}
self.config = config
full_schema_dir = f"{config.main_directory}/{config.schema_directory}/"
files = find_files(
file_extensions=[".yaml", ".yml", ".json"],
search_directories=[full_schema_dir],
excluded_filenames=config.schema_file_exclude_filenames,
return_dir=True,
)
# For each schema file, determine the absolute path to the directory
# Create and save a JsonSchema object for each file
for root, filename in files:
root = os.path.realpath(root)
schema = self.create_schema_from_file(root, filename)
self.schemas[schema.get_id()] = schema
# Load validators
validators = load_validators(config.validator_directory)
self.schemas.update(validators)
def create_schema_from_file(self, root, filename):
"""Create a new JsonSchema object for a given file.
Load the content from disk and resolve all JSONRef within the schema file.
Args:
root (string): Absolute location of the file in the filesystem.
filename (string): Name of the file.
Returns:
JsonSchema: JsonSchema object newly created.
"""
file_data = load_file(os.path.join(root, filename))
# TODO Find the type of Schema based on the Type, currently only jsonschema is supported
# schema_type = "jsonschema"
base_uri = f"file:{root}/"
schema_full = jsonref.JsonRef.replace_refs(file_data, base_uri=base_uri, jsonschema=True, loader=load_file)
schema = JsonSchema(schema=schema_full, filename=filename, root=root)
# Only add valid jsonschema files and raise an exception if an invalid file is found
valid = all((result.passed() for result in schema.check_if_valid()))
if not valid:
raise InvalidJSONSchema(schema)
return schema
def iter_schemas(self):
"""Return an iterator of all schemas in the SchemaManager.
Returns:
Iterator: Iterator of all schemas in K,v format (key, value).
"""
return self.schemas.items()
def print_schemas_list(self):
"""Print the list of all schemas to the cli.
To avoid very long location string, dynamically replace the current dir with a dot.
"""
console = Console()
table = Table(show_header=True, header_style="bold cyan")
current_dir = os.getcwd()
table.add_column("Schema ID", style="bright_green")
table.add_column("Type")
table.add_column("Location")
table.add_column("Filename")
for schema_id, schema in self.iter_schemas():
table.add_row(schema_id, schema.schematype, schema.root.replace(current_dir, "."), schema.filename)
console.print(table)
def dump_schema(self, schema_id=None):
"""Dump schema with references resolved.
Dumps all schemas or a single schema represented by schema_id.
Args:
schema_id (str): The unique identifier of a schema.
Returns: None
"""
if schema_id:
schema = self.schemas.get(schema_id, None)
if schema is None:
raise ValueError(f"Could not find schema ID {schema_id}")
print(json.dumps(schema.data, indent=2))
else:
for _, schema in self.iter_schemas():
print(json.dumps(schema.data, indent=2))
def test_schemas(self):
"""Validate all schemas pass the tests defined for them.
For each schema, 3 set of tests will be potentially executed.
- schema must be Draft7 valid.
- Valid tests must pass.
- Invalid tests must pass.
"""
error_exists = False
for schema_id, schema in self.iter_schemas():
schema_valid = schema.check_if_valid()
valid_results = self.test_schema_valid(schema_id)
invalid_results = self.test_schema_invalid(schema_id)
for result in schema_valid + valid_results + invalid_results:
if not result.passed():
error_exists = True
result.print()
if not error_exists:
print(colored("ALL SCHEMAS ARE VALID", "green"))
def test_schema_valid(self, schema_id, strict=False):
"""Execute all valid tests for a given schema.
Args:
schema_id (str): The unique identifier of a schema.
Returns:
list of ValidationResult.
"""
schema = self.schemas[schema_id]
valid_test_dir = self._get_test_dir_absolute(test_type="valid", schema_id=schema_id)
valid_files = find_files(
file_extensions=[".yaml", ".yml", ".json"],
search_directories=[valid_test_dir],
excluded_filenames=[],
return_dir=True,
)
results = []
for root, filename in valid_files:
test_data = load_file(os.path.join(root, filename))
for result in schema.validate(test_data, strict=strict):
result.instance_name = filename
result.instance_location = root
result.instance_type = "TEST"
results.append(result)
return results
def test_schema_invalid(self, schema_id): # pylint: disable=too-many-locals
"""Execute all invalid tests for a given schema.
- Acquire structured data to be validated against a given schema. Do this by searching for a file named
"data.yml", "data.yaml", or "data.json" within a directory of hierarchy
"./<test_directory>/<schema_id>/invalid/"
- Acquire results expected after data is validated against a giveh schema. Do this by searching for a file
named "results.yml", "results.yaml", or "results.json" within a directory of hierarchy
"./<test_directory>/<schema_id>/results/"
- Validate expected results match actual results of data after it is checked for adherence to schema.
Args:
schema_id (str): The unique identifier of a schema.
Returns:
list of ValidationResult objects.
"""
schema = self.schemas.get(schema_id, None)
if schema is None:
raise ValueError(f"Could not find schema ID {schema_id}")
invalid_test_dir = self._get_test_dir_absolute(test_type="invalid", schema_id=schema_id)
test_dirs = next(os.walk(invalid_test_dir))[1]
results = []
for test_dir in test_dirs:
schema.clear_results()
data_file_path = os.path.join(invalid_test_dir, test_dir, "data")
data_file = find_file(data_file_path)
expected_results_file_path = os.path.join(invalid_test_dir, test_dir, "results")
expected_results_file = find_file(expected_results_file_path)
if not data_file:
warn(f"Could not find data file {data_file_path}. Skipping...")
continue
if not expected_results_file:
warn(f"Could not find expected_results_file {expected_results_file_path}. Skipping...")
continue
data = load_file(data_file)
expected_results = load_file(expected_results_file)
tmp_results = schema.validate_to_dict(data)
# Currently the expected results are using OrderedDict instead of Dict
# the easiest way to remove that is to dump into JSON and convert back into a "normal" dict
expected_results = json.loads(json.dumps(expected_results["results"]))
results_sorted = sorted(tmp_results, key=lambda i: i.get("message", ""))
expected_results_sorted = sorted(expected_results, key=lambda i: i.get("message", ""))
params = {
"schema_id": schema_id,
"instance_type": "TEST",
"instance_name": test_dir,
"instance_location": invalid_test_dir,
}
if results_sorted != expected_results_sorted:
params["result"] = RESULT_FAIL
params[
"message"
] = f"Invalid test results do not match expected test results from {expected_results_file}"
else:
params["result"] = RESULT_PASS
val = ValidationResult(**params)
results.append(val)
return results # [ ValidationResult(**result) for result in results ]
def generate_invalid_tests_expected(self, schema_id):
"""Generate expected invalid test results for a given schema.
Args:
schema_id (str): unique identifier of a schema
"""
# TODO: Refactor this into a method. Exists in multiple places
schema = self.schemas.get(schema_id, None)
if schema is None:
raise ValueError(f"Could not find schema ID {schema_id}")
invalid_test_dir = self._get_test_dir_absolute(test_type="invalid", schema_id=schema_id)
test_dirs = next(os.walk(invalid_test_dir))[1]
# For each test, load the data file, test the data against the schema and save the results
for test_dir in test_dirs:
schema.clear_results()
data_file_path = os.path.join(invalid_test_dir, test_dir, "data")
data_file = find_file(data_file_path)
if not data_file:
warn(f"Could not find data file {data_file_path}")
data = load_file(data_file)
results = schema.validate_to_dict(data)
self._ensure_results_invalid(results, data_file)
result_file = os.path.join(invalid_test_dir, test_dir, "results.yml")
dump_data_to_yaml({"results": results}, result_file)
print(f"Generated/Updated results file: {result_file}")
def validate_schemas_exist(self, schema_ids):
"""Validate that each schema ID in a list of schema IDs exists.
Args:
schema_ids (list): A list of schema IDs, each of which should exist as a schema object.
"""
if not isinstance(schema_ids, list):
raise TypeError("schema_ids argument passed into validate_schemas_exist must be of type list")
for schema_id in schema_ids:
if not self.schemas.get(schema_id, None):
raise SchemaNotDefined(f"Schema ID {schema_id} declared but not defined")
@property
def test_directory(self):
"""Return the path to the main schema test directory."""
return os.path.join(self.config.main_directory, self.config.test_directory)
def _get_test_dir_absolute(self, test_type, schema_id):
"""Get absolute path of directory in which schema unit tests exist.
Args:
test_type (str): Test type. One of "valid" or "invalid"
schema_id (str): Schema ID for which to get test dir absolute path
Returns:
str: Full path of test directory.
"""
if test_type not in ["valid", "invalid"]:
raise ValueError(f"Test type parameter was {test_type}. Must be one of 'valid' or 'invalid'")
if not self.schemas.get(schema_id, None):
raise ValueError(f"Could not find schema ID {schema_id}")
root = os.path.abspath(os.getcwd())
short_schema_id = schema_id.split("/")[1] if "/" in schema_id else schema_id
test_dir = os.path.join(root, self.test_directory, short_schema_id, test_type)
if not os.path.exists(test_dir):
error(f"Tried to search {test_dir} for {test_type} data, but the path does not exist.")
sys.exit(1)
return test_dir
@staticmethod
def _ensure_results_invalid(results, data_file):
"""Ensures each result is schema valid in a list of results data structures.
Args:
results(dict): List of Dicts of results. Each result dict must include a 'result' key of 'PASS' or 'FAIL'
data_file (str): Data file which should be schema invalid
Raises:
error: Raises an error and calls sys.exit(1) if one of the results objects is schema valid.
"""
results_pass_or_fail = [result["result"] for result in results]
if "PASS" in results_pass_or_fail:
error(f"{data_file} is schema valid, but should be schema invalid as it defines an invalid test")
sys.exit(1)
|
schema-enforcer
|
/schema_enforcer-1.2.2.tar.gz/schema_enforcer-1.2.2/schema_enforcer/schemas/manager.py
|
manager.py
|
import os
import sys
import json
import jsonref
from termcolor import colored
from rich.console import Console
from rich.table import Table
from schema_enforcer.utils import load_file, find_file, find_files, dump_data_to_yaml
from schema_enforcer.validation import ValidationResult, RESULT_PASS, RESULT_FAIL
from schema_enforcer.exceptions import SchemaNotDefined, InvalidJSONSchema
from schema_enforcer.utils import error, warn
from schema_enforcer.schemas.jsonschema import JsonSchema
from schema_enforcer.schemas.validator import load_validators
class SchemaManager:
"""The SchemaManager class is designed to load and organaized all the schemas."""
def __init__(self, config):
"""Initialize the SchemaManager and search for all schema files in the schema_directories.
Args:
config (Config): Instance of Config object returned by schema_enforcer.config.load() method.
"""
self.schemas = {}
self.config = config
full_schema_dir = f"{config.main_directory}/{config.schema_directory}/"
files = find_files(
file_extensions=[".yaml", ".yml", ".json"],
search_directories=[full_schema_dir],
excluded_filenames=config.schema_file_exclude_filenames,
return_dir=True,
)
# For each schema file, determine the absolute path to the directory
# Create and save a JsonSchema object for each file
for root, filename in files:
root = os.path.realpath(root)
schema = self.create_schema_from_file(root, filename)
self.schemas[schema.get_id()] = schema
# Load validators
validators = load_validators(config.validator_directory)
self.schemas.update(validators)
def create_schema_from_file(self, root, filename):
"""Create a new JsonSchema object for a given file.
Load the content from disk and resolve all JSONRef within the schema file.
Args:
root (string): Absolute location of the file in the filesystem.
filename (string): Name of the file.
Returns:
JsonSchema: JsonSchema object newly created.
"""
file_data = load_file(os.path.join(root, filename))
# TODO Find the type of Schema based on the Type, currently only jsonschema is supported
# schema_type = "jsonschema"
base_uri = f"file:{root}/"
schema_full = jsonref.JsonRef.replace_refs(file_data, base_uri=base_uri, jsonschema=True, loader=load_file)
schema = JsonSchema(schema=schema_full, filename=filename, root=root)
# Only add valid jsonschema files and raise an exception if an invalid file is found
valid = all((result.passed() for result in schema.check_if_valid()))
if not valid:
raise InvalidJSONSchema(schema)
return schema
def iter_schemas(self):
"""Return an iterator of all schemas in the SchemaManager.
Returns:
Iterator: Iterator of all schemas in K,v format (key, value).
"""
return self.schemas.items()
def print_schemas_list(self):
"""Print the list of all schemas to the cli.
To avoid very long location string, dynamically replace the current dir with a dot.
"""
console = Console()
table = Table(show_header=True, header_style="bold cyan")
current_dir = os.getcwd()
table.add_column("Schema ID", style="bright_green")
table.add_column("Type")
table.add_column("Location")
table.add_column("Filename")
for schema_id, schema in self.iter_schemas():
table.add_row(schema_id, schema.schematype, schema.root.replace(current_dir, "."), schema.filename)
console.print(table)
def dump_schema(self, schema_id=None):
"""Dump schema with references resolved.
Dumps all schemas or a single schema represented by schema_id.
Args:
schema_id (str): The unique identifier of a schema.
Returns: None
"""
if schema_id:
schema = self.schemas.get(schema_id, None)
if schema is None:
raise ValueError(f"Could not find schema ID {schema_id}")
print(json.dumps(schema.data, indent=2))
else:
for _, schema in self.iter_schemas():
print(json.dumps(schema.data, indent=2))
def test_schemas(self):
"""Validate all schemas pass the tests defined for them.
For each schema, 3 set of tests will be potentially executed.
- schema must be Draft7 valid.
- Valid tests must pass.
- Invalid tests must pass.
"""
error_exists = False
for schema_id, schema in self.iter_schemas():
schema_valid = schema.check_if_valid()
valid_results = self.test_schema_valid(schema_id)
invalid_results = self.test_schema_invalid(schema_id)
for result in schema_valid + valid_results + invalid_results:
if not result.passed():
error_exists = True
result.print()
if not error_exists:
print(colored("ALL SCHEMAS ARE VALID", "green"))
def test_schema_valid(self, schema_id, strict=False):
"""Execute all valid tests for a given schema.
Args:
schema_id (str): The unique identifier of a schema.
Returns:
list of ValidationResult.
"""
schema = self.schemas[schema_id]
valid_test_dir = self._get_test_dir_absolute(test_type="valid", schema_id=schema_id)
valid_files = find_files(
file_extensions=[".yaml", ".yml", ".json"],
search_directories=[valid_test_dir],
excluded_filenames=[],
return_dir=True,
)
results = []
for root, filename in valid_files:
test_data = load_file(os.path.join(root, filename))
for result in schema.validate(test_data, strict=strict):
result.instance_name = filename
result.instance_location = root
result.instance_type = "TEST"
results.append(result)
return results
def test_schema_invalid(self, schema_id): # pylint: disable=too-many-locals
"""Execute all invalid tests for a given schema.
- Acquire structured data to be validated against a given schema. Do this by searching for a file named
"data.yml", "data.yaml", or "data.json" within a directory of hierarchy
"./<test_directory>/<schema_id>/invalid/"
- Acquire results expected after data is validated against a giveh schema. Do this by searching for a file
named "results.yml", "results.yaml", or "results.json" within a directory of hierarchy
"./<test_directory>/<schema_id>/results/"
- Validate expected results match actual results of data after it is checked for adherence to schema.
Args:
schema_id (str): The unique identifier of a schema.
Returns:
list of ValidationResult objects.
"""
schema = self.schemas.get(schema_id, None)
if schema is None:
raise ValueError(f"Could not find schema ID {schema_id}")
invalid_test_dir = self._get_test_dir_absolute(test_type="invalid", schema_id=schema_id)
test_dirs = next(os.walk(invalid_test_dir))[1]
results = []
for test_dir in test_dirs:
schema.clear_results()
data_file_path = os.path.join(invalid_test_dir, test_dir, "data")
data_file = find_file(data_file_path)
expected_results_file_path = os.path.join(invalid_test_dir, test_dir, "results")
expected_results_file = find_file(expected_results_file_path)
if not data_file:
warn(f"Could not find data file {data_file_path}. Skipping...")
continue
if not expected_results_file:
warn(f"Could not find expected_results_file {expected_results_file_path}. Skipping...")
continue
data = load_file(data_file)
expected_results = load_file(expected_results_file)
tmp_results = schema.validate_to_dict(data)
# Currently the expected results are using OrderedDict instead of Dict
# the easiest way to remove that is to dump into JSON and convert back into a "normal" dict
expected_results = json.loads(json.dumps(expected_results["results"]))
results_sorted = sorted(tmp_results, key=lambda i: i.get("message", ""))
expected_results_sorted = sorted(expected_results, key=lambda i: i.get("message", ""))
params = {
"schema_id": schema_id,
"instance_type": "TEST",
"instance_name": test_dir,
"instance_location": invalid_test_dir,
}
if results_sorted != expected_results_sorted:
params["result"] = RESULT_FAIL
params[
"message"
] = f"Invalid test results do not match expected test results from {expected_results_file}"
else:
params["result"] = RESULT_PASS
val = ValidationResult(**params)
results.append(val)
return results # [ ValidationResult(**result) for result in results ]
def generate_invalid_tests_expected(self, schema_id):
"""Generate expected invalid test results for a given schema.
Args:
schema_id (str): unique identifier of a schema
"""
# TODO: Refactor this into a method. Exists in multiple places
schema = self.schemas.get(schema_id, None)
if schema is None:
raise ValueError(f"Could not find schema ID {schema_id}")
invalid_test_dir = self._get_test_dir_absolute(test_type="invalid", schema_id=schema_id)
test_dirs = next(os.walk(invalid_test_dir))[1]
# For each test, load the data file, test the data against the schema and save the results
for test_dir in test_dirs:
schema.clear_results()
data_file_path = os.path.join(invalid_test_dir, test_dir, "data")
data_file = find_file(data_file_path)
if not data_file:
warn(f"Could not find data file {data_file_path}")
data = load_file(data_file)
results = schema.validate_to_dict(data)
self._ensure_results_invalid(results, data_file)
result_file = os.path.join(invalid_test_dir, test_dir, "results.yml")
dump_data_to_yaml({"results": results}, result_file)
print(f"Generated/Updated results file: {result_file}")
def validate_schemas_exist(self, schema_ids):
"""Validate that each schema ID in a list of schema IDs exists.
Args:
schema_ids (list): A list of schema IDs, each of which should exist as a schema object.
"""
if not isinstance(schema_ids, list):
raise TypeError("schema_ids argument passed into validate_schemas_exist must be of type list")
for schema_id in schema_ids:
if not self.schemas.get(schema_id, None):
raise SchemaNotDefined(f"Schema ID {schema_id} declared but not defined")
@property
def test_directory(self):
"""Return the path to the main schema test directory."""
return os.path.join(self.config.main_directory, self.config.test_directory)
def _get_test_dir_absolute(self, test_type, schema_id):
"""Get absolute path of directory in which schema unit tests exist.
Args:
test_type (str): Test type. One of "valid" or "invalid"
schema_id (str): Schema ID for which to get test dir absolute path
Returns:
str: Full path of test directory.
"""
if test_type not in ["valid", "invalid"]:
raise ValueError(f"Test type parameter was {test_type}. Must be one of 'valid' or 'invalid'")
if not self.schemas.get(schema_id, None):
raise ValueError(f"Could not find schema ID {schema_id}")
root = os.path.abspath(os.getcwd())
short_schema_id = schema_id.split("/")[1] if "/" in schema_id else schema_id
test_dir = os.path.join(root, self.test_directory, short_schema_id, test_type)
if not os.path.exists(test_dir):
error(f"Tried to search {test_dir} for {test_type} data, but the path does not exist.")
sys.exit(1)
return test_dir
@staticmethod
def _ensure_results_invalid(results, data_file):
"""Ensures each result is schema valid in a list of results data structures.
Args:
results(dict): List of Dicts of results. Each result dict must include a 'result' key of 'PASS' or 'FAIL'
data_file (str): Data file which should be schema invalid
Raises:
error: Raises an error and calls sys.exit(1) if one of the results objects is schema valid.
"""
results_pass_or_fail = [result["result"] for result in results]
if "PASS" in results_pass_or_fail:
error(f"{data_file} is schema valid, but should be schema invalid as it defines an invalid test")
sys.exit(1)
| 0.567457 | 0.21428 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.