id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
/Newgram-0.0.5.tar.gz/Newgram-0.0.5/newgram/file_id.py |
import base64
import logging
import struct
from enum import IntEnum
from io import BytesIO
from typing import List
from newgram.raw.core import Bytes, String
log = logging.getLogger(__name__)
def b64_encode(s: bytes) -> str:
"""Encode bytes into a URL-safe Base64 string without padding
Parameters:
s (``bytes``):
Bytes to encode
Returns:
``str``: The encoded bytes
"""
return base64.urlsafe_b64encode(s).decode().strip("=")
def b64_decode(s: str) -> bytes:
"""Decode a URL-safe Base64 string without padding to bytes
Parameters:
s (``str``):
String to decode
Returns:
``bytes``: The decoded string
"""
return base64.urlsafe_b64decode(s + "=" * (-len(s) % 4))
def rle_encode(s: bytes) -> bytes:
"""Zero-value RLE encoder
Parameters:
s (``bytes``):
Bytes to encode
Returns:
``bytes``: The encoded bytes
"""
r: List[int] = []
n: int = 0
for b in s:
if not b:
n += 1
else:
if n:
r.extend((0, n))
n = 0
r.append(b)
if n:
r.extend((0, n))
return bytes(r)
def rle_decode(s: bytes) -> bytes:
"""Zero-value RLE decoder
Parameters:
s (``bytes``):
Bytes to decode
Returns:
``bytes``: The decoded bytes
"""
r: List[int] = []
z: bool = False
for b in s:
if not b:
z = True
continue
if z:
r.extend((0,) * b)
z = False
else:
r.append(b)
return bytes(r)
class FileType(IntEnum):
"""Known file types"""
THUMBNAIL = 0
CHAT_PHOTO = 1 # ProfilePhoto
PHOTO = 2
VOICE = 3 # VoiceNote
VIDEO = 4
DOCUMENT = 5
ENCRYPTED = 6
TEMP = 7
STICKER = 8
AUDIO = 9
ANIMATION = 10
ENCRYPTED_THUMBNAIL = 11
WALLPAPER = 12
VIDEO_NOTE = 13
SECURE_RAW = 14
SECURE = 15
BACKGROUND = 16
DOCUMENT_AS_FILE = 17
class ThumbnailSource(IntEnum):
"""Known thumbnail sources"""
LEGACY = 0
THUMBNAIL = 1
CHAT_PHOTO_SMALL = 2 # DialogPhotoSmall
CHAT_PHOTO_BIG = 3 # DialogPhotoBig
STICKER_SET_THUMBNAIL = 4
# Photo-like file ids are longer and contain extra info, the rest are all documents
PHOTO_TYPES = {FileType.THUMBNAIL, FileType.CHAT_PHOTO, FileType.PHOTO, FileType.WALLPAPER,
FileType.ENCRYPTED_THUMBNAIL}
DOCUMENT_TYPES = set(FileType) - PHOTO_TYPES
# Since the file type values are small enough to fit them in few bits, Telegram thought it would be a good idea to
# encode extra information about web url and file reference existence as flag inside the 4 bytes allocated for the field
WEB_LOCATION_FLAG = 1 << 24
FILE_REFERENCE_FLAG = 1 << 25
class FileId:
MAJOR = 4
MINOR = 30
def __init__(
self, *,
major: int = MAJOR,
minor: int = MINOR,
file_type: FileType,
dc_id: int,
file_reference: bytes = b"",
url: str = None,
media_id: int = None,
access_hash: int = None,
volume_id: int = None,
thumbnail_source: ThumbnailSource = None,
thumbnail_file_type: FileType = None,
thumbnail_size: str = "",
secret: int = None,
local_id: int = None,
chat_id: int = None,
chat_access_hash: int = None,
sticker_set_id: int = None,
sticker_set_access_hash: int = None
):
self.major = major
self.minor = minor
self.file_type = file_type
self.dc_id = dc_id
self.file_reference = file_reference
self.url = url
self.media_id = media_id
self.access_hash = access_hash
self.volume_id = volume_id
self.thumbnail_source = thumbnail_source
self.thumbnail_file_type = thumbnail_file_type
self.thumbnail_size = thumbnail_size
self.secret = secret
self.local_id = local_id
self.chat_id = chat_id
self.chat_access_hash = chat_access_hash
self.sticker_set_id = sticker_set_id
self.sticker_set_access_hash = sticker_set_access_hash
@staticmethod
def decode(file_id: str):
decoded = rle_decode(b64_decode(file_id))
# region read version
# File id versioning. Major versions lower than 4 don't have a minor version
major = decoded[-1]
if major < 4:
minor = 0
buffer = BytesIO(decoded[:-1])
else:
minor = decoded[-2]
buffer = BytesIO(decoded[:-2])
# endregion
file_type, dc_id = struct.unpack("<ii", buffer.read(8))
# region media type flags
# Check for flags existence
has_web_location = bool(file_type & WEB_LOCATION_FLAG)
has_file_reference = bool(file_type & FILE_REFERENCE_FLAG)
# Remove flags to restore the actual type id value
file_type &= ~WEB_LOCATION_FLAG
file_type &= ~FILE_REFERENCE_FLAG
# endregion
try:
file_type = FileType(file_type)
except ValueError:
raise ValueError(f"Unknown file_type {file_type} of file_id {file_id}")
if has_web_location:
url = String.read(buffer)
access_hash, = struct.unpack("<q", buffer.read(8))
return FileId(
major=major,
minor=minor,
file_type=file_type,
dc_id=dc_id,
url=url,
access_hash=access_hash
)
file_reference = Bytes.read(buffer) if has_file_reference else b""
media_id, access_hash = struct.unpack("<qq", buffer.read(16))
if file_type in PHOTO_TYPES:
volume_id, = struct.unpack("<q", buffer.read(8))
thumbnail_source, = (0,) if major < 4 else struct.unpack("<i", buffer.read(4))
try:
thumbnail_source = ThumbnailSource(thumbnail_source)
except ValueError:
raise ValueError(f"Unknown thumbnail_source {thumbnail_source} of file_id {file_id}")
if thumbnail_source == ThumbnailSource.LEGACY:
secret, local_id = struct.unpack("<qi", buffer.read(12))
return FileId(
major=major,
minor=minor,
file_type=file_type,
dc_id=dc_id,
file_reference=file_reference,
media_id=media_id,
access_hash=access_hash,
volume_id=volume_id,
thumbnail_source=thumbnail_source,
secret=secret,
local_id=local_id
)
if thumbnail_source == ThumbnailSource.THUMBNAIL:
thumbnail_file_type, thumbnail_size, local_id = struct.unpack("<iii", buffer.read(12))
thumbnail_size = chr(thumbnail_size)
return FileId(
major=major,
minor=minor,
file_type=file_type,
dc_id=dc_id,
file_reference=file_reference,
media_id=media_id,
access_hash=access_hash,
volume_id=volume_id,
thumbnail_source=thumbnail_source,
thumbnail_file_type=thumbnail_file_type,
thumbnail_size=thumbnail_size,
local_id=local_id
)
if thumbnail_source in (ThumbnailSource.CHAT_PHOTO_SMALL, ThumbnailSource.CHAT_PHOTO_BIG):
chat_id, chat_access_hash, local_id = struct.unpack("<qqi", buffer.read(20))
return FileId(
major=major,
minor=minor,
file_type=file_type,
dc_id=dc_id,
file_reference=file_reference,
media_id=media_id,
access_hash=access_hash,
volume_id=volume_id,
thumbnail_source=thumbnail_source,
chat_id=chat_id,
chat_access_hash=chat_access_hash,
local_id=local_id
)
if thumbnail_source == ThumbnailSource.STICKER_SET_THUMBNAIL:
sticker_set_id, sticker_set_access_hash, local_id = struct.unpack("<qqi", buffer.read(20))
return FileId(
major=major,
minor=minor,
file_type=file_type,
dc_id=dc_id,
file_reference=file_reference,
media_id=media_id,
access_hash=access_hash,
volume_id=volume_id,
thumbnail_source=thumbnail_source,
sticker_set_id=sticker_set_id,
sticker_set_access_hash=sticker_set_access_hash,
local_id=local_id
)
if file_type in DOCUMENT_TYPES:
return FileId(
major=major,
minor=minor,
file_type=file_type,
dc_id=dc_id,
file_reference=file_reference,
media_id=media_id,
access_hash=access_hash
)
def encode(self, *, major: int = None, minor: int = None):
major = major if major is not None else self.major
minor = minor if minor is not None else self.minor
buffer = BytesIO()
file_type = self.file_type
if self.url:
file_type |= WEB_LOCATION_FLAG
if self.file_reference:
file_type |= FILE_REFERENCE_FLAG
buffer.write(struct.pack("<ii", file_type, self.dc_id))
if self.url:
buffer.write(String(self.url))
if self.file_reference:
buffer.write(Bytes(self.file_reference))
buffer.write(struct.pack("<qq", self.media_id, self.access_hash))
if self.file_type in PHOTO_TYPES:
buffer.write(struct.pack("<q", self.volume_id))
if major >= 4:
buffer.write(struct.pack("<i", self.thumbnail_source))
if self.thumbnail_source == ThumbnailSource.LEGACY:
buffer.write(struct.pack("<qi", self.secret, self.local_id))
elif self.thumbnail_source == ThumbnailSource.THUMBNAIL:
buffer.write(struct.pack(
"<iii",
self.thumbnail_file_type,
ord(self.thumbnail_size),
self.local_id
))
elif self.thumbnail_source in (ThumbnailSource.CHAT_PHOTO_SMALL, ThumbnailSource.CHAT_PHOTO_BIG):
buffer.write(struct.pack(
"<qqi",
self.chat_id,
self.chat_access_hash,
self.local_id
))
elif self.thumbnail_source == ThumbnailSource.STICKER_SET_THUMBNAIL:
buffer.write(struct.pack(
"<qqi",
self.sticker_set_id,
self.sticker_set_access_hash,
self.local_id
))
elif file_type in DOCUMENT_TYPES:
buffer.write(struct.pack("<ii", minor, major))
buffer.write(struct.pack("<bb", minor, major))
return b64_encode(rle_encode(buffer.getvalue()))
def __str__(self):
return str({k: v for k, v in self.__dict__.items() if v is not None})
class FileUniqueType(IntEnum):
"""Known file unique types"""
WEB = 0
PHOTO = 1
DOCUMENT = 2
SECURE = 3
ENCRYPTED = 4
TEMP = 5
class FileUniqueId:
def __init__(
self, *,
file_unique_type: FileUniqueType,
url: str = None,
media_id: int = None,
volume_id: int = None,
local_id: int = None
):
self.file_unique_type = file_unique_type
self.url = url
self.media_id = media_id
self.volume_id = volume_id
self.local_id = local_id
@staticmethod
def decode(file_unique_id: str):
buffer = BytesIO(rle_decode(b64_decode(file_unique_id)))
file_unique_type, = struct.unpack("<i", buffer.read(4))
try:
file_unique_type = FileUniqueType(file_unique_type)
except ValueError:
raise ValueError(f"Unknown file_unique_type {file_unique_type} of file_unique_id {file_unique_id}")
if file_unique_type == FileUniqueType.WEB:
url = String.read(buffer)
return FileUniqueId(
file_unique_type=file_unique_type,
url=url
)
if file_unique_type == FileUniqueType.PHOTO:
volume_id, local_id = struct.unpack("<qi", buffer.read())
return FileUniqueId(
file_unique_type=file_unique_type,
volume_id=volume_id,
local_id=local_id
)
if file_unique_type == FileUniqueType.DOCUMENT:
media_id, = struct.unpack("<q", buffer.read())
return FileUniqueId(
file_unique_type=file_unique_type,
media_id=media_id
)
# TODO: Missing decoder for SECURE, ENCRYPTED and TEMP
raise ValueError(f"Unknown decoder for file_unique_type {file_unique_type} of file_unique_id {file_unique_id}")
def encode(self):
if self.file_unique_type == FileUniqueType.WEB:
string = struct.pack("<is", self.file_unique_type, String(self.url))
elif self.file_unique_type == FileUniqueType.PHOTO:
string = struct.pack("<iqi", self.file_unique_type, self.volume_id, self.local_id)
elif self.file_unique_type == FileUniqueType.DOCUMENT:
string = struct.pack("<iq", self.file_unique_type, self.media_id)
else:
# TODO: Missing encoder for SECURE, ENCRYPTED and TEMP
raise ValueError(f"Unknown encoder for file_unique_type {self.file_unique_type}")
return b64_encode(rle_encode(string))
def __str__(self):
return str({k: v for k, v in self.__dict__.items() if v is not None}) | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/grid/enhanced/nls/zh-tw/Filter.js | define(
"dojox/grid/enhanced/nls/zh-tw/Filter", //begin v1.x content
({
"clearFilterDialogTitle": "清除過濾器",
"filterDefDialogTitle": "過濾器",
"ruleTitleTemplate": "規則 ${0}",
"conditionEqual": "等於",
"conditionNotEqual": "不等於",
"conditionLess": "是小於",
"conditionLessEqual": "小於或等於",
"conditionLarger": "是大於",
"conditionLargerEqual": "大於或等於",
"conditionContains": "內容",
"conditionIs": "是",
"conditionStartsWith": "開始於",
"conditionEndWith": "結束於",
"conditionNotContain": "不包含",
"conditionIsNot": "不是",
"conditionNotStartWith": "不開始於",
"conditionNotEndWith": "不結束於",
"conditionBefore": "之前",
"conditionAfter": "之後",
"conditionRange": "範圍",
"conditionIsEmpty": "為空",
"all": "全部",
"any": "任何",
"relationAll": "所有規則",
"waiRelAll": "符合下列所有規則:",
"relationAny": "任何規則",
"waiRelAny": "符合下列任何規則:",
"relationMsgFront": "符合",
"relationMsgTail": "",
"and": "和",
"or": "或",
"addRuleButton": "新增規則",
"waiAddRuleButton": "新增規則",
"removeRuleButton": "移除規則",
"waiRemoveRuleButtonTemplate": "移除規則 ${0}",
"cancelButton": "取消",
"waiCancelButton": "取消此對話",
"clearButton": "清除",
"waiClearButton": "清除過濾器",
"filterButton": "過濾器",
"waiFilterButton": "提交過濾器",
"columnSelectLabel": "直欄",
"waiColumnSelectTemplate": "規則 ${0} 的直欄",
"conditionSelectLabel": "條件",
"waiConditionSelectTemplate": "規則 ${0} 的條件",
"valueBoxLabel": "值",
"waiValueBoxTemplate": "輸入要針對規則 ${0} 過濾的值",
"rangeTo": "至",
"rangeTemplate": "從 ${0} 至 ${1}",
"statusTipHeaderColumn": "直欄",
"statusTipHeaderCondition": "規則",
"statusTipTitle": "過濾器列",
"statusTipMsg": "按一下這裡的過濾器列以過濾 ${0} 中的值。",
"anycolumn": "任何直欄",
"statusTipTitleNoFilter": "過濾器列",
"statusTipTitleHasFilter": "過濾器",
"statusTipRelAny": "符合任何規則。",
"statusTipRelAll": "符合所有規則。",
"defaultItemsName": "項目",
"filterBarMsgHasFilterTemplate": "顯示 ${1} ${2} 之 ${0}。",
"filterBarMsgNoFilterTemplate": "未套用過濾器",
"filterBarDefButton": "定義過濾器",
"waiFilterBarDefButton": "過濾表格",
"a11yFilterBarDefButton": "過濾器...",
"filterBarClearButton": "清除過濾器",
"waiFilterBarClearButton": "清除過濾器",
"closeFilterBarBtn": "關閉過濾器列",
"clearFilterMsg": "這將會移除過濾器並顯示所有的可用記錄。",
"anyColumnOption": "任何直欄",
"trueLabel": "True",
"falseLabel": "False"
})
//end v1.x content
); | PypiClean |
/IPS-Vagrant-0.4.1.tar.gz/IPS-Vagrant-0.4.1/ips_vagrant/installer/V_4_1_3_2.py | import click
from ips_vagrant.common.progress import ProgressBar, Echo
from ips_vagrant.installer.dev_tools.latest import DevToolsInstaller
from ips_vagrant.installer.latest import Installer as Latest
version = (4, 1, 3, 2)
class Installer(Latest):
def start(self):
"""
Start the installation wizard
"""
self.log.debug('Starting the installation process')
self.browser.open(self.url)
self.system_check()
def admin(self):
"""
Provide admin login credentials
"""
self._check_title(self.browser.title())
self.browser.select_form(nr=0)
# Get the admin credentials
prompted = []
user = self.ctx.config.get('User', 'AdminUser')
if not user:
user = click.prompt('Admin display name')
prompted.append('user')
password = self.ctx.config.get('User', 'AdminPass')
if not password:
password = click.prompt('Admin password', hide_input=True, confirmation_prompt='Confirm admin password')
prompted.append('password')
email = self.ctx.config.get('User', 'AdminEmail')
if not email:
email = click.prompt('Admin email')
prompted.append('email')
self.browser.form[self.FIELD_ADMIN_USER] = user
self.browser.form[self.FIELD_ADMIN_PASS] = password
self.browser.form[self.FIELD_ADMIN_PASS_CONFIRM] = password
self.browser.form[self.FIELD_ADMIN_EMAIL] = email
p = Echo('Submitting admin information...')
self.browser.submit()
p.done()
if len(prompted) >= 3:
save = click.confirm('Would you like to save and use these admin credentials for future installations?')
if save:
self.log.info('Saving admin login credentials')
self.ctx.config.set('User', 'AdminUser', user)
self.ctx.config.set('User', 'AdminPass', password)
self.ctx.config.set('User', 'AdminEmail', email)
with open(self.ctx.config_path, 'wb') as cf:
self.ctx.config.write(cf)
self.install()
def _start_install(self):
"""
Start the installation
"""
self._check_title(self.browser.title())
continue_link = next(self.browser.links(text_regex='Start Installation'))
self.browser.follow_link(continue_link)
# noinspection PyUnboundLocalVariable
def install(self):
"""
Run the actual installation
"""
self._start_install()
mr_link = self._get_mr_link()
# Set up the progress bar
pbar = ProgressBar(100, 'Running installation...')
pbar.start()
mr_j, mr_r = self._ajax(mr_link)
# Loop until we get a redirect json response
while True:
mr_link = self._parse_response(mr_link, mr_j)
stage = self._get_stage(mr_j)
progress = self._get_progress(mr_j)
mr_j, mr_r = self._ajax(mr_link)
pbar.update(min([progress, 100]), stage) # NOTE: Response may return progress values above 100
# If we're done, finalize the installation and break
redirect = self._check_if_complete(mr_link, mr_j)
if redirect:
pbar.finish()
break
p = Echo('Finalizing...')
mr_r = self._request(redirect, raise_request=False)
p.done()
# Install developer tools
if self.site.in_dev:
DevToolsInstaller(self.ctx, self.site).install()
# Get the link to our community homepage
self._finalize(mr_r) | PypiClean |
/EpiTator-1.3.5.tar.gz/EpiTator-1.3.5/epitator/annodoc.py | from __future__ import absolute_import
from __future__ import print_function
from . import maximum_weight_interval_set as mwis
import six
import re
from .annospan import AnnoSpan, SpanGroup
from .annotier import AnnoTier
class AnnoDoc(object):
"""
A document to be annotated.
The tiers property links to the annotations applied to it.
"""
def __init__(self, text=None, date=None):
if type(text) is six.text_type:
self.text = text
elif type(text) is str:
self.text = six.text_type(text, 'utf8')
else:
raise TypeError("text must be string or unicode")
self.tiers = {}
self.date = date
def __len__(self):
return len(self.text)
def add_tier(self, annotator, **kwargs):
return self.add_tiers(annotator, **kwargs)
def add_tiers(self, annotator, **kwargs):
result = annotator.annotate(self, **kwargs)
if isinstance(result, dict):
self.tiers.update(result)
return self
def require_tiers(self, *tier_names, **kwargs):
"""
Return the specified tiers or add them using the via annotator.
"""
assert len(set(kwargs.keys()) | set(['via'])) == 1
assert len(tier_names) > 0
via_annotator = kwargs.get('via')
tiers = [self.tiers.get(tier_name) for tier_name in tier_names]
if all(t is not None for t in tiers):
if len(tiers) == 1:
return tiers[0]
return tiers
else:
if via_annotator:
self.add_tiers(via_annotator())
return self.require_tiers(*tier_names)
else:
raise Exception("Tier could not be found. Available tiers: " + str(self.tiers.keys()))
def create_regex_tier(self, regex, label=None):
"""
Create an AnnoTier from all the spans of text that match the regex.
"""
spans = []
for match in re.finditer(regex, self.text):
spans.append(
SpanGroup([AnnoSpan(
match.start(),
match.end(),
self,
match.group(0))], label))
return AnnoTier(spans, presorted=True)
def to_dict(self):
"""
Convert the document into a json serializable dictionary.
This does not store all the document's data. For a complete
serialization use pickle.
>>> from .annospan import AnnoSpan
>>> from .annotier import AnnoTier
>>> import datetime
>>> doc = AnnoDoc('one two three', date=datetime.datetime(2011, 11, 11))
>>> doc.tiers = {
... 'test': AnnoTier([AnnoSpan(0, 3, doc), AnnoSpan(4, 7, doc)])}
>>> d = doc.to_dict()
>>> str(d['text'])
'one two three'
>>> str(d['date'])
'2011-11-11T00:00:00Z'
>>> sorted(d['tiers']['test'][0].items())
[('label', None), ('textOffsets', [[0, 3]])]
>>> sorted(d['tiers']['test'][1].items())
[('label', None), ('textOffsets', [[4, 7]])]
"""
json_obj = {
'text': self.text
}
if self.date:
json_obj['date'] = self.date.strftime("%Y-%m-%dT%H:%M:%S") + 'Z'
json_obj['tiers'] = {}
for name, tier in self.tiers.items():
json_obj['tiers'][name] = [
span.to_dict() for span in tier]
return json_obj
def filter_overlapping_spans(self, tiers=None, tier_names=None, score_func=None):
"""Remove the smaller of any overlapping spans."""
if not tiers:
tiers = tier_names
if not tiers:
tiers = list(self.tiers.keys())
intervals = []
for tier in tiers:
if isinstance(tier, six.string_types):
tier_name = tier
if tier_name not in self.tiers:
print("Warning! Tier does not exist:", tier_name)
continue
tier = self.tiers[tier_name]
intervals.extend([
mwis.Interval(
start=span.start,
end=span.end,
weight=score_func(span) if score_func else (
span.end - span.start),
corresponding_object=(tier, span)
)
for span in tier.spans
])
tier.spans = []
my_mwis = mwis.find_maximum_weight_interval_set(intervals)
for interval in my_mwis:
tier, span = interval.corresponding_object
tier.spans.append(span) | PypiClean |
/Electrum-VTC-2.9.3.3.tar.gz/Electrum-VTC-2.9.3.3/lib/old_mnemonic.py |
# list of words from http://en.wiktionary.org/wiki/Wiktionary:Frequency_lists/Contemporary_poetry
words = [
"like",
"just",
"love",
"know",
"never",
"want",
"time",
"out",
"there",
"make",
"look",
"eye",
"down",
"only",
"think",
"heart",
"back",
"then",
"into",
"about",
"more",
"away",
"still",
"them",
"take",
"thing",
"even",
"through",
"long",
"always",
"world",
"too",
"friend",
"tell",
"try",
"hand",
"thought",
"over",
"here",
"other",
"need",
"smile",
"again",
"much",
"cry",
"been",
"night",
"ever",
"little",
"said",
"end",
"some",
"those",
"around",
"mind",
"people",
"girl",
"leave",
"dream",
"left",
"turn",
"myself",
"give",
"nothing",
"really",
"off",
"before",
"something",
"find",
"walk",
"wish",
"good",
"once",
"place",
"ask",
"stop",
"keep",
"watch",
"seem",
"everything",
"wait",
"got",
"yet",
"made",
"remember",
"start",
"alone",
"run",
"hope",
"maybe",
"believe",
"body",
"hate",
"after",
"close",
"talk",
"stand",
"own",
"each",
"hurt",
"help",
"home",
"god",
"soul",
"new",
"many",
"two",
"inside",
"should",
"true",
"first",
"fear",
"mean",
"better",
"play",
"another",
"gone",
"change",
"use",
"wonder",
"someone",
"hair",
"cold",
"open",
"best",
"any",
"behind",
"happen",
"water",
"dark",
"laugh",
"stay",
"forever",
"name",
"work",
"show",
"sky",
"break",
"came",
"deep",
"door",
"put",
"black",
"together",
"upon",
"happy",
"such",
"great",
"white",
"matter",
"fill",
"past",
"please",
"burn",
"cause",
"enough",
"touch",
"moment",
"soon",
"voice",
"scream",
"anything",
"stare",
"sound",
"red",
"everyone",
"hide",
"kiss",
"truth",
"death",
"beautiful",
"mine",
"blood",
"broken",
"very",
"pass",
"next",
"forget",
"tree",
"wrong",
"air",
"mother",
"understand",
"lip",
"hit",
"wall",
"memory",
"sleep",
"free",
"high",
"realize",
"school",
"might",
"skin",
"sweet",
"perfect",
"blue",
"kill",
"breath",
"dance",
"against",
"fly",
"between",
"grow",
"strong",
"under",
"listen",
"bring",
"sometimes",
"speak",
"pull",
"person",
"become",
"family",
"begin",
"ground",
"real",
"small",
"father",
"sure",
"feet",
"rest",
"young",
"finally",
"land",
"across",
"today",
"different",
"guy",
"line",
"fire",
"reason",
"reach",
"second",
"slowly",
"write",
"eat",
"smell",
"mouth",
"step",
"learn",
"three",
"floor",
"promise",
"breathe",
"darkness",
"push",
"earth",
"guess",
"save",
"song",
"above",
"along",
"both",
"color",
"house",
"almost",
"sorry",
"anymore",
"brother",
"okay",
"dear",
"game",
"fade",
"already",
"apart",
"warm",
"beauty",
"heard",
"notice",
"question",
"shine",
"began",
"piece",
"whole",
"shadow",
"secret",
"street",
"within",
"finger",
"point",
"morning",
"whisper",
"child",
"moon",
"green",
"story",
"glass",
"kid",
"silence",
"since",
"soft",
"yourself",
"empty",
"shall",
"angel",
"answer",
"baby",
"bright",
"dad",
"path",
"worry",
"hour",
"drop",
"follow",
"power",
"war",
"half",
"flow",
"heaven",
"act",
"chance",
"fact",
"least",
"tired",
"children",
"near",
"quite",
"afraid",
"rise",
"sea",
"taste",
"window",
"cover",
"nice",
"trust",
"lot",
"sad",
"cool",
"force",
"peace",
"return",
"blind",
"easy",
"ready",
"roll",
"rose",
"drive",
"held",
"music",
"beneath",
"hang",
"mom",
"paint",
"emotion",
"quiet",
"clear",
"cloud",
"few",
"pretty",
"bird",
"outside",
"paper",
"picture",
"front",
"rock",
"simple",
"anyone",
"meant",
"reality",
"road",
"sense",
"waste",
"bit",
"leaf",
"thank",
"happiness",
"meet",
"men",
"smoke",
"truly",
"decide",
"self",
"age",
"book",
"form",
"alive",
"carry",
"escape",
"damn",
"instead",
"able",
"ice",
"minute",
"throw",
"catch",
"leg",
"ring",
"course",
"goodbye",
"lead",
"poem",
"sick",
"corner",
"desire",
"known",
"problem",
"remind",
"shoulder",
"suppose",
"toward",
"wave",
"drink",
"jump",
"woman",
"pretend",
"sister",
"week",
"human",
"joy",
"crack",
"grey",
"pray",
"surprise",
"dry",
"knee",
"less",
"search",
"bleed",
"caught",
"clean",
"embrace",
"future",
"king",
"son",
"sorrow",
"chest",
"hug",
"remain",
"sat",
"worth",
"blow",
"daddy",
"final",
"parent",
"tight",
"also",
"create",
"lonely",
"safe",
"cross",
"dress",
"evil",
"silent",
"bone",
"fate",
"perhaps",
"anger",
"class",
"scar",
"snow",
"tiny",
"tonight",
"continue",
"control",
"dog",
"edge",
"mirror",
"month",
"suddenly",
"comfort",
"given",
"loud",
"quickly",
"gaze",
"plan",
"rush",
"stone",
"town",
"battle",
"ignore",
"spirit",
"stood",
"stupid",
"yours",
"brown",
"build",
"dust",
"hey",
"kept",
"pay",
"phone",
"twist",
"although",
"ball",
"beyond",
"hidden",
"nose",
"taken",
"fail",
"float",
"pure",
"somehow",
"wash",
"wrap",
"angry",
"cheek",
"creature",
"forgotten",
"heat",
"rip",
"single",
"space",
"special",
"weak",
"whatever",
"yell",
"anyway",
"blame",
"job",
"choose",
"country",
"curse",
"drift",
"echo",
"figure",
"grew",
"laughter",
"neck",
"suffer",
"worse",
"yeah",
"disappear",
"foot",
"forward",
"knife",
"mess",
"somewhere",
"stomach",
"storm",
"beg",
"idea",
"lift",
"offer",
"breeze",
"field",
"five",
"often",
"simply",
"stuck",
"win",
"allow",
"confuse",
"enjoy",
"except",
"flower",
"seek",
"strength",
"calm",
"grin",
"gun",
"heavy",
"hill",
"large",
"ocean",
"shoe",
"sigh",
"straight",
"summer",
"tongue",
"accept",
"crazy",
"everyday",
"exist",
"grass",
"mistake",
"sent",
"shut",
"surround",
"table",
"ache",
"brain",
"destroy",
"heal",
"nature",
"shout",
"sign",
"stain",
"choice",
"doubt",
"glance",
"glow",
"mountain",
"queen",
"stranger",
"throat",
"tomorrow",
"city",
"either",
"fish",
"flame",
"rather",
"shape",
"spin",
"spread",
"ash",
"distance",
"finish",
"image",
"imagine",
"important",
"nobody",
"shatter",
"warmth",
"became",
"feed",
"flesh",
"funny",
"lust",
"shirt",
"trouble",
"yellow",
"attention",
"bare",
"bite",
"money",
"protect",
"amaze",
"appear",
"born",
"choke",
"completely",
"daughter",
"fresh",
"friendship",
"gentle",
"probably",
"six",
"deserve",
"expect",
"grab",
"middle",
"nightmare",
"river",
"thousand",
"weight",
"worst",
"wound",
"barely",
"bottle",
"cream",
"regret",
"relationship",
"stick",
"test",
"crush",
"endless",
"fault",
"itself",
"rule",
"spill",
"art",
"circle",
"join",
"kick",
"mask",
"master",
"passion",
"quick",
"raise",
"smooth",
"unless",
"wander",
"actually",
"broke",
"chair",
"deal",
"favorite",
"gift",
"note",
"number",
"sweat",
"box",
"chill",
"clothes",
"lady",
"mark",
"park",
"poor",
"sadness",
"tie",
"animal",
"belong",
"brush",
"consume",
"dawn",
"forest",
"innocent",
"pen",
"pride",
"stream",
"thick",
"clay",
"complete",
"count",
"draw",
"faith",
"press",
"silver",
"struggle",
"surface",
"taught",
"teach",
"wet",
"bless",
"chase",
"climb",
"enter",
"letter",
"melt",
"metal",
"movie",
"stretch",
"swing",
"vision",
"wife",
"beside",
"crash",
"forgot",
"guide",
"haunt",
"joke",
"knock",
"plant",
"pour",
"prove",
"reveal",
"steal",
"stuff",
"trip",
"wood",
"wrist",
"bother",
"bottom",
"crawl",
"crowd",
"fix",
"forgive",
"frown",
"grace",
"loose",
"lucky",
"party",
"release",
"surely",
"survive",
"teacher",
"gently",
"grip",
"speed",
"suicide",
"travel",
"treat",
"vein",
"written",
"cage",
"chain",
"conversation",
"date",
"enemy",
"however",
"interest",
"million",
"page",
"pink",
"proud",
"sway",
"themselves",
"winter",
"church",
"cruel",
"cup",
"demon",
"experience",
"freedom",
"pair",
"pop",
"purpose",
"respect",
"shoot",
"softly",
"state",
"strange",
"bar",
"birth",
"curl",
"dirt",
"excuse",
"lord",
"lovely",
"monster",
"order",
"pack",
"pants",
"pool",
"scene",
"seven",
"shame",
"slide",
"ugly",
"among",
"blade",
"blonde",
"closet",
"creek",
"deny",
"drug",
"eternity",
"gain",
"grade",
"handle",
"key",
"linger",
"pale",
"prepare",
"swallow",
"swim",
"tremble",
"wheel",
"won",
"cast",
"cigarette",
"claim",
"college",
"direction",
"dirty",
"gather",
"ghost",
"hundred",
"loss",
"lung",
"orange",
"present",
"swear",
"swirl",
"twice",
"wild",
"bitter",
"blanket",
"doctor",
"everywhere",
"flash",
"grown",
"knowledge",
"numb",
"pressure",
"radio",
"repeat",
"ruin",
"spend",
"unknown",
"buy",
"clock",
"devil",
"early",
"false",
"fantasy",
"pound",
"precious",
"refuse",
"sheet",
"teeth",
"welcome",
"add",
"ahead",
"block",
"bury",
"caress",
"content",
"depth",
"despite",
"distant",
"marry",
"purple",
"threw",
"whenever",
"bomb",
"dull",
"easily",
"grasp",
"hospital",
"innocence",
"normal",
"receive",
"reply",
"rhyme",
"shade",
"someday",
"sword",
"toe",
"visit",
"asleep",
"bought",
"center",
"consider",
"flat",
"hero",
"history",
"ink",
"insane",
"muscle",
"mystery",
"pocket",
"reflection",
"shove",
"silently",
"smart",
"soldier",
"spot",
"stress",
"train",
"type",
"view",
"whether",
"bus",
"energy",
"explain",
"holy",
"hunger",
"inch",
"magic",
"mix",
"noise",
"nowhere",
"prayer",
"presence",
"shock",
"snap",
"spider",
"study",
"thunder",
"trail",
"admit",
"agree",
"bag",
"bang",
"bound",
"butterfly",
"cute",
"exactly",
"explode",
"familiar",
"fold",
"further",
"pierce",
"reflect",
"scent",
"selfish",
"sharp",
"sink",
"spring",
"stumble",
"universe",
"weep",
"women",
"wonderful",
"action",
"ancient",
"attempt",
"avoid",
"birthday",
"branch",
"chocolate",
"core",
"depress",
"drunk",
"especially",
"focus",
"fruit",
"honest",
"match",
"palm",
"perfectly",
"pillow",
"pity",
"poison",
"roar",
"shift",
"slightly",
"thump",
"truck",
"tune",
"twenty",
"unable",
"wipe",
"wrote",
"coat",
"constant",
"dinner",
"drove",
"egg",
"eternal",
"flight",
"flood",
"frame",
"freak",
"gasp",
"glad",
"hollow",
"motion",
"peer",
"plastic",
"root",
"screen",
"season",
"sting",
"strike",
"team",
"unlike",
"victim",
"volume",
"warn",
"weird",
"attack",
"await",
"awake",
"built",
"charm",
"crave",
"despair",
"fought",
"grant",
"grief",
"horse",
"limit",
"message",
"ripple",
"sanity",
"scatter",
"serve",
"split",
"string",
"trick",
"annoy",
"blur",
"boat",
"brave",
"clearly",
"cling",
"connect",
"fist",
"forth",
"imagination",
"iron",
"jock",
"judge",
"lesson",
"milk",
"misery",
"nail",
"naked",
"ourselves",
"poet",
"possible",
"princess",
"sail",
"size",
"snake",
"society",
"stroke",
"torture",
"toss",
"trace",
"wise",
"bloom",
"bullet",
"cell",
"check",
"cost",
"darling",
"during",
"footstep",
"fragile",
"hallway",
"hardly",
"horizon",
"invisible",
"journey",
"midnight",
"mud",
"nod",
"pause",
"relax",
"shiver",
"sudden",
"value",
"youth",
"abuse",
"admire",
"blink",
"breast",
"bruise",
"constantly",
"couple",
"creep",
"curve",
"difference",
"dumb",
"emptiness",
"gotta",
"honor",
"plain",
"planet",
"recall",
"rub",
"ship",
"slam",
"soar",
"somebody",
"tightly",
"weather",
"adore",
"approach",
"bond",
"bread",
"burst",
"candle",
"coffee",
"cousin",
"crime",
"desert",
"flutter",
"frozen",
"grand",
"heel",
"hello",
"language",
"level",
"movement",
"pleasure",
"powerful",
"random",
"rhythm",
"settle",
"silly",
"slap",
"sort",
"spoken",
"steel",
"threaten",
"tumble",
"upset",
"aside",
"awkward",
"bee",
"blank",
"board",
"button",
"card",
"carefully",
"complain",
"crap",
"deeply",
"discover",
"drag",
"dread",
"effort",
"entire",
"fairy",
"giant",
"gotten",
"greet",
"illusion",
"jeans",
"leap",
"liquid",
"march",
"mend",
"nervous",
"nine",
"replace",
"rope",
"spine",
"stole",
"terror",
"accident",
"apple",
"balance",
"boom",
"childhood",
"collect",
"demand",
"depression",
"eventually",
"faint",
"glare",
"goal",
"group",
"honey",
"kitchen",
"laid",
"limb",
"machine",
"mere",
"mold",
"murder",
"nerve",
"painful",
"poetry",
"prince",
"rabbit",
"shelter",
"shore",
"shower",
"soothe",
"stair",
"steady",
"sunlight",
"tangle",
"tease",
"treasure",
"uncle",
"begun",
"bliss",
"canvas",
"cheer",
"claw",
"clutch",
"commit",
"crimson",
"crystal",
"delight",
"doll",
"existence",
"express",
"fog",
"football",
"gay",
"goose",
"guard",
"hatred",
"illuminate",
"mass",
"math",
"mourn",
"rich",
"rough",
"skip",
"stir",
"student",
"style",
"support",
"thorn",
"tough",
"yard",
"yearn",
"yesterday",
"advice",
"appreciate",
"autumn",
"bank",
"beam",
"bowl",
"capture",
"carve",
"collapse",
"confusion",
"creation",
"dove",
"feather",
"girlfriend",
"glory",
"government",
"harsh",
"hop",
"inner",
"loser",
"moonlight",
"neighbor",
"neither",
"peach",
"pig",
"praise",
"screw",
"shield",
"shimmer",
"sneak",
"stab",
"subject",
"throughout",
"thrown",
"tower",
"twirl",
"wow",
"army",
"arrive",
"bathroom",
"bump",
"cease",
"cookie",
"couch",
"courage",
"dim",
"guilt",
"howl",
"hum",
"husband",
"insult",
"led",
"lunch",
"mock",
"mostly",
"natural",
"nearly",
"needle",
"nerd",
"peaceful",
"perfection",
"pile",
"price",
"remove",
"roam",
"sanctuary",
"serious",
"shiny",
"shook",
"sob",
"stolen",
"tap",
"vain",
"void",
"warrior",
"wrinkle",
"affection",
"apologize",
"blossom",
"bounce",
"bridge",
"cheap",
"crumble",
"decision",
"descend",
"desperately",
"dig",
"dot",
"flip",
"frighten",
"heartbeat",
"huge",
"lazy",
"lick",
"odd",
"opinion",
"process",
"puzzle",
"quietly",
"retreat",
"score",
"sentence",
"separate",
"situation",
"skill",
"soak",
"square",
"stray",
"taint",
"task",
"tide",
"underneath",
"veil",
"whistle",
"anywhere",
"bedroom",
"bid",
"bloody",
"burden",
"careful",
"compare",
"concern",
"curtain",
"decay",
"defeat",
"describe",
"double",
"dreamer",
"driver",
"dwell",
"evening",
"flare",
"flicker",
"grandma",
"guitar",
"harm",
"horrible",
"hungry",
"indeed",
"lace",
"melody",
"monkey",
"nation",
"object",
"obviously",
"rainbow",
"salt",
"scratch",
"shown",
"shy",
"stage",
"stun",
"third",
"tickle",
"useless",
"weakness",
"worship",
"worthless",
"afternoon",
"beard",
"boyfriend",
"bubble",
"busy",
"certain",
"chin",
"concrete",
"desk",
"diamond",
"doom",
"drawn",
"due",
"felicity",
"freeze",
"frost",
"garden",
"glide",
"harmony",
"hopefully",
"hunt",
"jealous",
"lightning",
"mama",
"mercy",
"peel",
"physical",
"position",
"pulse",
"punch",
"quit",
"rant",
"respond",
"salty",
"sane",
"satisfy",
"savior",
"sheep",
"slept",
"social",
"sport",
"tuck",
"utter",
"valley",
"wolf",
"aim",
"alas",
"alter",
"arrow",
"awaken",
"beaten",
"belief",
"brand",
"ceiling",
"cheese",
"clue",
"confidence",
"connection",
"daily",
"disguise",
"eager",
"erase",
"essence",
"everytime",
"expression",
"fan",
"flag",
"flirt",
"foul",
"fur",
"giggle",
"glorious",
"ignorance",
"law",
"lifeless",
"measure",
"mighty",
"muse",
"north",
"opposite",
"paradise",
"patience",
"patient",
"pencil",
"petal",
"plate",
"ponder",
"possibly",
"practice",
"slice",
"spell",
"stock",
"strife",
"strip",
"suffocate",
"suit",
"tender",
"tool",
"trade",
"velvet",
"verse",
"waist",
"witch",
"aunt",
"bench",
"bold",
"cap",
"certainly",
"click",
"companion",
"creator",
"dart",
"delicate",
"determine",
"dish",
"dragon",
"drama",
"drum",
"dude",
"everybody",
"feast",
"forehead",
"former",
"fright",
"fully",
"gas",
"hook",
"hurl",
"invite",
"juice",
"manage",
"moral",
"possess",
"raw",
"rebel",
"royal",
"scale",
"scary",
"several",
"slight",
"stubborn",
"swell",
"talent",
"tea",
"terrible",
"thread",
"torment",
"trickle",
"usually",
"vast",
"violence",
"weave",
"acid",
"agony",
"ashamed",
"awe",
"belly",
"blend",
"blush",
"character",
"cheat",
"common",
"company",
"coward",
"creak",
"danger",
"deadly",
"defense",
"define",
"depend",
"desperate",
"destination",
"dew",
"duck",
"dusty",
"embarrass",
"engine",
"example",
"explore",
"foe",
"freely",
"frustrate",
"generation",
"glove",
"guilty",
"health",
"hurry",
"idiot",
"impossible",
"inhale",
"jaw",
"kingdom",
"mention",
"mist",
"moan",
"mumble",
"mutter",
"observe",
"ode",
"pathetic",
"pattern",
"pie",
"prefer",
"puff",
"rape",
"rare",
"revenge",
"rude",
"scrape",
"spiral",
"squeeze",
"strain",
"sunset",
"suspend",
"sympathy",
"thigh",
"throne",
"total",
"unseen",
"weapon",
"weary"
]
n = 1626
# Note about US patent no 5892470: Here each word does not represent a given digit.
# Instead, the digit represented by a word is variable, it depends on the previous word.
def mn_encode( message ):
assert len(message) % 8 == 0
out = []
for i in range(len(message)/8):
word = message[8*i:8*i+8]
x = int(word, 16)
w1 = (x%n)
w2 = ((x/n) + w1)%n
w3 = ((x/n/n) + w2)%n
out += [ words[w1], words[w2], words[w3] ]
return out
def mn_decode( wlist ):
out = ''
for i in range(len(wlist)/3):
word1, word2, word3 = wlist[3*i:3*i+3]
w1 = words.index(word1)
w2 = (words.index(word2))%n
w3 = (words.index(word3))%n
x = w1 +n*((w2-w1)%n) +n*n*((w3-w2)%n)
out += '%08x'%x
return out
if __name__ == '__main__':
import sys
if len( sys.argv ) == 1:
print 'I need arguments: a hex string to encode, or a list of words to decode'
elif len( sys.argv ) == 2:
print ' '.join(mn_encode(sys.argv[1]))
else:
print mn_decode(sys.argv[1:]) | PypiClean |
/CLAM-3.2.6.tar.gz/CLAM-3.2.6/clam/config/multiplier.py |
###############################################################
# CLAM: Computational Linguistics Application Mediator
# -- Service Configuration File (Template) --
# by Maarten van Gompel (proycon)
# Centre for Language Studies
# Radboud University Nijmegen
#
# Induction for Linguistic Knowledge Research Group
# Universiteit van Tilburg
#
# http://proycon.github.com/clam
#
# Licensed under GPLv3
#
###############################################################
#This is an example for CLAM showing the use of Actions
from clam.common.parameters import *
from clam.common.formats import *
from clam.common.converters import *
from clam.common.viewers import *
from clam.common.data import *
from clam.common.digestauth import pwhash
import sys
REQUIRE_VERSION = "0.99"
# ======== GENERAL INFORMATION ===========
# General information concerning your system.
#The System ID, a short alphanumeric identifier for internal use only
SYSTEM_ID = "multiplier"
#System name, the way the system is presented to the world
SYSTEM_NAME = "Multiplier"
#An informative description for this system (this should be fairly short, about one paragraph, and may not contain HTML)
SYSTEM_DESCRIPTION = "Example of CLAM Actions, simple multiplication of two numbers"
# ======== LOCATION ===========
#The root directory for CLAM, all project files, (input & output) and
#pre-installed corpora will be stored here. Set to an absolute path:
ROOT = "/tmp/clammultiplier.projects/"
#The URL of the system (If you start clam with the built-in webserver, you can override this with -P)
PORT= 8080
#The hostname of the system. Will be automatically determined if not set. (If you start clam with the built-in webserver, you can override this with -H)
#Users *must* make use of this hostname and no other (even if it points to the same IP) for the web application to work.
#HOST = 'localhost'
#If the webservice runs in another webserver (e.g. apache, nginx, lighttpd), and it
#doesn't run at the root of the server, you can specify a URL prefix here:
#URLPREFIX = "/myservice/"
#Optionally, you can force the full URL CLAM has to use, rather than rely on any autodetected measures:
#FORCEURL = "http://myclamservice.com"
#The location of where CLAM is installed (will be determined automatically if not set)
#CLAMDIR = "/path/to/clam"
# ======== AUTHENTICATION & SECURITY ===========
#Users and passwords
#set security realm, a required component for hashing passwords (will default to SYSTEM_ID if not set)
#REALM = SYSTEM_ID
USERS = None #no user authentication/security (this is not recommended for production environments!)
ADMINS = None #List of usernames that are administrator and can access the administrative web-interface (on URL /admin/)
#If you want to enable user-based security, you can define a dictionary
#of users and (hashed) passwords here. The actual authentication will proceed
#as HTTP Digest Authentication. Although being a convenient shortcut,
#using pwhash and plaintext password in this code is not secure!!
#USERS = { user1': '4f8dh8337e2a5a83734b','user2': pwhash('username', REALM, 'secret') }
#Amount of free memory required prior to starting a new process (in MB!), Free Memory + Cached (without swap!). Set to 0 to disable this check (not recommended)
REQUIREMEMORY = 10
#Maximum load average at which processes are still started (first number reported by 'uptime'). Set to 0 to disable this check (not recommended)
MAXLOADAVG = 1.0
#Minimum amount of free diskspace in MB. Set to 0 to disable this check (not recommended)
DISK = '/dev/sda1' #set this to the disk where ROOT is on
MINDISKSPACE = 10
# ======== WEB-APPLICATION STYLING =============
#Choose a style (has to be defined as a CSS file in clam/style/ ). You can copy, rename and adapt it to make your own style
STYLE = 'classic'
# ======== PROFILE DEFINITIONS ===========
#No profiles, we only use actions
PROFILES = []
# ======== COMMAND ===========
#No command is used, we only use actions
COMMAND = None
# ======== PARAMETER DEFINITIONS ===========
#No global parameters, we only use actions
#The parameters are subdivided into several groups. In the form of a list of (groupname, parameters) tuples. The parameters are a list of instances from common/parameters.py
PARAMETERS = []
def multiply(x,y):
return x * y
# ======== ACTIONS ===========
ACTIONS = [
Action(id="multiply",name="Multiplier",description="Multiply two numbers", function=multiply, parameters=[
IntegerParameter(id="x", name="First value", required=True),
IntegerParameter(id="y", name="Second value", required=True)
])
]
# ======== DISPATCHING (ADVANCED! YOU CAN SAFELY SKIP THIS!) ========
#The dispatcher to use (defaults to clamdispatcher.py), you almost never want to change this
#DISPATCHER = 'clamdispatcher.py'
#DISPATCHER_POLLINTERVAL = 30 #interval at which the dispatcher polls for resource consumption (default: 30 secs)
#DISPATCHER_MAXRESMEM = 0 #maximum consumption of resident memory (in megabytes), processes that exceed this will be automatically aborted. (0 = unlimited, default)
#DISPATCHER_MAXTIME = 0 #maximum number of seconds a process may run, it will be aborted if this duration is exceeded. (0=unlimited, default)
#DISPATCHER_PYTHONPATH = [] #list of extra directories to add to the python path prior to launch of dispatcher
#Run background process on a remote host? Then set the following (leave the lambda in):
#REMOTEHOST = lambda: return 'some.remote.host'
#REMOTEUSER = 'username'
#For this to work, the user under which CLAM runs must have (passwordless) ssh access (use ssh keys) to the remote host using the specified username (ssh REMOTEUSER@REMOTEHOST)
#Moreover, both systems must have access to the same filesystem (ROOT) under the same mountpoint. | PypiClean |
/Editra-0.7.20.tar.gz/Editra-0.7.20/src/ebmlib/clipboard.py | __author__ = "Hasan Aljudy"
__cvsid__ = "$Id: clipboard.py 67123 2011-03-04 00:02:35Z CJP $"
__revision__ = "$Revision: 67123 $"
__all__ = [ 'Clipboard', 'ClipboardException']
#-----------------------------------------------------------------------------#
# Imports
import wx
#-----------------------------------------------------------------------------#
class ClipboardException(Exception):
"""Thrown for errors in the Clipboard class"""
pass
#-----------------------------------------------------------------------------#
class Clipboard(object):
"""Multiple clipboards as named registers (as per vim)
" is an alias for system clipboard and is also the default clipboard.
@note: The only way to access multiple clipboards right now is through
Normal mode when Vi(m) emulation is enabled.
"""
NAMES = list(u'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_')
registers = {}
current = u'"'
@classmethod
def ClearAll(cls):
"""Clear all registers"""
for reg in cls.registers:
cls.registers[reg] = u''
@classmethod
def DeleteAll(cls):
"""Delete all registers"""
cls.registers.clear()
@classmethod
def Switch(cls, reg):
"""Switch to register
@param reg: char
"""
if reg in cls.NAMES or reg == u'"':
cls.current = reg
else:
raise ClipboardException(u"Switched to invalid register name")
@classmethod
def NextFree(cls):
"""Switch to the next free register. If current register is free, no
switching happens.
A free register is one that's either unused or has no content
@note: This is not used yet.
"""
if cls.Get() == u'':
return
for name in cls.NAMES:
if cls.registers.get(name, u'') == u'':
cls.Switch(name)
break
@classmethod
def AllUsed(cls):
"""Get a dictionary mapping all used clipboards (plus the system
clipboard) to their content.
@note: This is not used yet.
@return: dict
"""
cmd_map = { u'"': cls.SystemGet() }
for name in cls.NAMES:
if cls.registers.get(name, u''):
cmd_map[name] = cls.registers[name]
return cmd_map
@classmethod
def Get(cls):
"""Get the content of the current register. Used for pasting"""
if cls.current == u'"':
return cls.SystemGet()
else:
return cls.registers.get(cls.current, u'')
@classmethod
def Set(cls, text):
"""Set the content of the current register
@param text: string
"""
if cls.current == u'"':
return cls.SystemSet(text)
else:
cls.registers[cls.current] = text
@classmethod
def SystemGet(cls):
"""Get text from the system clipboard
@return: string
"""
text = None
if wx.TheClipboard.Open():
if wx.TheClipboard.IsSupported(wx.DataFormat(wx.DF_TEXT)):
text = wx.TextDataObject()
wx.TheClipboard.GetData(text)
wx.TheClipboard.Close()
if text is not None:
return text.GetText()
else:
return u''
@classmethod
def SystemSet(cls, text):
"""Set text into the system clipboard
@param text: string
@return: bool
"""
ok = False
if wx.TheClipboard.Open():
wx.TheClipboard.SetData(wx.TextDataObject(text))
wx.TheClipboard.Close()
ok = True
return ok | PypiClean |
/BEAT_Guang-1.0.1-py3-none-any.whl/econml/iv/dml/_dml.py | import numpy as np
from sklearn.base import clone
from sklearn.linear_model import LinearRegression, LogisticRegressionCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer
from itertools import product
from ..._ortho_learner import _OrthoLearner
from ..._cate_estimator import LinearModelFinalCateEstimatorMixin, StatsModelsCateEstimatorMixin, LinearCateEstimator
from ...inference import StatsModelsInference, GenericSingleTreatmentModelFinalInference
from ...sklearn_extensions.linear_model import StatsModels2SLS, StatsModelsLinearRegression, WeightedLassoCVWrapper
from ...sklearn_extensions.model_selection import WeightedStratifiedKFold
from ...utilities import (_deprecate_positional, get_feature_names_or_default, filter_none_kwargs, add_intercept,
cross_product, broadcast_unit_treatments, reshape_treatmentwise_effects, shape,
parse_final_model_params, deprecated, Summary)
from ...dml.dml import _FirstStageWrapper, _FinalWrapper
from ...dml._rlearner import _ModelFinal
from ..._shap import _shap_explain_joint_linear_model_cate, _shap_explain_model_cate
class _OrthoIVModelNuisance:
def __init__(self, model_y_xw, model_t_xw, model_z, projection):
self._model_y_xw = model_y_xw
self._model_t_xw = model_t_xw
self._projection = projection
if self._projection:
self._model_t_xwz = model_z
else:
self._model_z_xw = model_z
def _combine(self, W, Z, n_samples):
if Z is not None:
Z = Z.reshape(n_samples, -1)
return Z if W is None else np.hstack([W, Z])
return None if W is None else W
def fit(self, Y, T, X=None, W=None, Z=None, sample_weight=None, groups=None):
self._model_y_xw.fit(X=X, W=W, Target=Y, sample_weight=sample_weight, groups=groups)
self._model_t_xw.fit(X=X, W=W, Target=T, sample_weight=sample_weight, groups=groups)
if self._projection:
# concat W and Z
WZ = self._combine(W, Z, Y.shape[0])
self._model_t_xwz.fit(X=X, W=WZ, Target=T, sample_weight=sample_weight, groups=groups)
else:
self._model_z_xw.fit(X=X, W=W, Target=Z, sample_weight=sample_weight, groups=groups)
return self
def score(self, Y, T, X=None, W=None, Z=None, sample_weight=None, group=None):
if hasattr(self._model_y_xw, 'score'):
Y_X_score = self._model_y_xw.score(X=X, W=W, Target=Y, sample_weight=sample_weight)
else:
Y_X_score = None
if hasattr(self._model_t_xw, 'score'):
T_X_score = self._model_t_xw.score(X=X, W=W, Target=T, sample_weight=sample_weight)
else:
T_X_score = None
if self._projection:
# concat W and Z
WZ = self._combine(W, Z, Y.shape[0])
if hasattr(self._model_t_xwz, 'score'):
T_XZ_score = self._model_t_xwz.score(X=X, W=WZ, Target=T, sample_weight=sample_weight)
else:
T_XZ_score = None
return Y_X_score, T_X_score, T_XZ_score
else:
if hasattr(self._model_z_xw, 'score'):
Z_X_score = self._model_z_xw.score(X=X, W=W, Target=Z, sample_weight=sample_weight)
else:
Z_X_score = None
return Y_X_score, T_X_score, Z_X_score
def predict(self, Y, T, X=None, W=None, Z=None, sample_weight=None, group=None):
Y_pred = self._model_y_xw.predict(X=X, W=W)
T_pred = self._model_t_xw.predict(X=X, W=W)
if self._projection:
# concat W and Z
WZ = self._combine(W, Z, Y.shape[0])
T_proj = self._model_t_xwz.predict(X, WZ)
else:
Z_pred = self._model_z_xw.predict(X=X, W=W)
if (X is None) and (W is None): # In this case predict above returns a single row
Y_pred = np.tile(Y_pred.reshape(1, -1), (Y.shape[0], 1))
T_pred = np.tile(T_pred.reshape(1, -1), (T.shape[0], 1))
if not self._projection:
Z_pred = np.tile(Z_pred.reshape(1, -1), (Z.shape[0], 1))
Y_res = Y - Y_pred.reshape(Y.shape)
T_res = T - T_pred.reshape(T.shape)
if self._projection:
Z_res = T_proj.reshape(T.shape) - T_pred.reshape(T.shape)
else:
Z_res = Z - Z_pred.reshape(Z.shape)
return Y_res, T_res, Z_res
class _OrthoIVModelFinal:
def __init__(self, model_final, featurizer, fit_cate_intercept):
self._model_final = clone(model_final, safe=False)
self._original_featurizer = clone(featurizer, safe=False)
self._fit_cate_intercept = fit_cate_intercept
if self._fit_cate_intercept:
add_intercept_trans = FunctionTransformer(add_intercept,
validate=True)
if featurizer:
self._featurizer = Pipeline([('featurize', self._original_featurizer),
('add_intercept', add_intercept_trans)])
else:
self._featurizer = add_intercept_trans
else:
self._featurizer = self._original_featurizer
def _combine(self, X, T, fitting=True):
if X is not None:
if self._featurizer is not None:
F = self._featurizer.fit_transform(X) if fitting else self._featurizer.transform(X)
else:
F = X
else:
if not self._fit_cate_intercept:
raise AttributeError("Cannot have X=None and also not allow for a CATE intercept!")
F = np.ones((T.shape[0], 1))
return cross_product(F, T)
def fit(self, Y, T, X=None, W=None, Z=None, nuisances=None,
sample_weight=None, freq_weight=None, sample_var=None, groups=None):
Y_res, T_res, Z_res = nuisances
# Track training dimensions to see if Y or T is a vector instead of a 2-dimensional array
self._d_t = shape(T_res)[1:]
self._d_y = shape(Y_res)[1:]
XT_res = self._combine(X, T_res)
XZ_res = self._combine(X, Z_res)
filtered_kwargs = filter_none_kwargs(sample_weight=sample_weight,
freq_weight=freq_weight, sample_var=sample_var)
self._model_final.fit(XZ_res, XT_res, Y_res, **filtered_kwargs)
return self
def predict(self, X=None):
X2, T = broadcast_unit_treatments(X if X is not None else np.empty((1, 0)),
self._d_t[0] if self._d_t else 1)
XT = self._combine(None if X is None else X2, T, fitting=False)
prediction = self._model_final.predict(XT)
return reshape_treatmentwise_effects(prediction,
self._d_t, self._d_y)
def score(self, Y, T, X=None, W=None, Z=None, nuisances=None, sample_weight=None, groups=None):
Y_res, T_res, Z_res = nuisances
if Y_res.ndim == 1:
Y_res = Y_res.reshape((-1, 1))
if T_res.ndim == 1:
T_res = T_res.reshape((-1, 1))
effects = self.predict(X).reshape((-1, Y_res.shape[1], T_res.shape[1]))
Y_res_pred = np.einsum('ijk,ik->ij', effects, T_res).reshape(Y_res.shape)
if sample_weight is not None:
return np.linalg.norm(np.average(cross_product(Z_res, Y_res - Y_res_pred), weights=sample_weight, axis=0),
ord=2)
else:
return np.linalg.norm(np.mean(cross_product(Z_res, Y_res - Y_res_pred), axis=0), ord=2)
class OrthoIV(LinearModelFinalCateEstimatorMixin, _OrthoLearner):
"""
Implementation of the orthogonal/double ml method for CATE estimation with
IV as described in section 4.2:
Double/Debiased Machine Learning for Treatment and Causal Parameters
Victor Chernozhukov, Denis Chetverikov, Mert Demirer, Esther Duflo, Christian Hansen, Whitney Newey, James Robins
https://arxiv.org/abs/1608.00060
Solve the following moment equation:
.. math::
\\E[(Y-\\E[Y|X]-\\theta(X) * (T-\\E[T|X]))(Z-\\E[Z|X])] = 0
Parameters
----------
model_y_xw : estimator or 'auto' (default is 'auto')
model to estimate :math:`\\E[Y | X, W]`. Must support `fit` and `predict` methods.
If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen.
model_t_xw : estimator or 'auto' (default is 'auto')
model to estimate :math:`\\E[T | X, W]`. Must support `fit` and `predict` methods.
If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV`
will be applied for discrete treatment,
and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV`
will be applied for continuous treatment.
model_t_xwz : estimator or 'auto' (default is 'auto')
model to estimate :math:`\\E[T | X, W, Z]`. Must support `fit` and `predict` methods.
If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV`
will be applied for discrete treatment,
and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV`
will be applied for continuous treatment.
model_z_xw : estimator or 'auto' (default is 'auto')
model to estimate :math:`\\E[Z | X, W]`. Must support `fit` and `predict` methods.
If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV`
will be applied for discrete instrument,
and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV`
will be applied for continuous instrument.
projection: bool, optional, default False
If True, we fit a slight variant of OrthoIV where we use E[T|X, W, Z] as the instrument as opposed to Z,
model_z_xw will be disabled; If False, model_t_xwz will be disabled.
featurizer : :term:`transformer`, optional, default None
Must support fit_transform and transform. Used to create composite features in the final CATE regression.
It is ignored if X is None. The final CATE will be trained on the outcome of featurizer.fit_transform(X).
If featurizer=None, then CATE is trained on X.
fit_cate_intercept : bool, optional, default False
Whether the linear CATE model should have a constant term.
discrete_treatment: bool, optional, default False
Whether the treatment values should be treated as categorical, rather than continuous, quantities
discrete_instrument: bool, optional, default False
Whether the instrument values should be treated as categorical, rather than continuous, quantities
categories: 'auto' or list, default 'auto'
The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).
The first category will be treated as the control treatment.
cv: int, cross-validation generator or an iterable, optional, default 2
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the treatment is discrete
:class:`~sklearn.model_selection.StratifiedKFold` is used, else,
:class:`~sklearn.model_selection.KFold` is used
(with a random shuffle in either case).
Unless an iterable is used, we call `split(concat[W, X], T)` to generate the splits. If all
W, X are None, then we call `split(ones((T.shape[0], 1)), T)`.
mc_iters: int, optional (default=None)
The number of times to rerun the first stage models to reduce the variance of the nuisances.
mc_agg: {'mean', 'median'}, optional (default='mean')
How to aggregate the nuisance value for each sample across the `mc_iters` monte carlo iterations of
cross-fitting.
random_state: int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;
If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used
by :mod:`np.random<numpy.random>`.
"""
def __init__(self, *,
model_y_xw="auto",
model_t_xw="auto",
model_t_xwz="auto",
model_z_xw="auto",
projection=False,
featurizer=None,
fit_cate_intercept=True,
discrete_treatment=False,
discrete_instrument=False,
categories='auto',
cv=2,
mc_iters=None,
mc_agg='mean',
random_state=None):
self.model_y_xw = clone(model_y_xw, safe=False)
self.model_t_xw = clone(model_t_xw, safe=False)
self.model_t_xwz = clone(model_t_xwz, safe=False)
self.model_z_xw = clone(model_z_xw, safe=False)
self.projection = projection
self.featurizer = clone(featurizer, safe=False)
self.fit_cate_intercept = fit_cate_intercept
super().__init__(discrete_instrument=discrete_instrument,
discrete_treatment=discrete_treatment,
categories=categories,
cv=cv,
mc_iters=mc_iters,
mc_agg=mc_agg,
random_state=random_state)
def _gen_featurizer(self):
return clone(self.featurizer, safe=False)
def _gen_model_final(self):
return StatsModels2SLS(cov_type="HC0")
def _gen_ortho_learner_model_final(self):
return _OrthoIVModelFinal(self._gen_model_final(), self._gen_featurizer(), self.fit_cate_intercept)
def _gen_ortho_learner_model_nuisance(self):
if self.model_y_xw == 'auto':
model_y_xw = WeightedLassoCVWrapper(random_state=self.random_state)
else:
model_y_xw = clone(self.model_y_xw, safe=False)
if self.model_t_xw == 'auto':
if self.discrete_treatment:
model_t_xw = LogisticRegressionCV(cv=WeightedStratifiedKFold(random_state=self.random_state),
random_state=self.random_state)
else:
model_t_xw = WeightedLassoCVWrapper(random_state=self.random_state)
else:
model_t_xw = clone(self.model_t_xw, safe=False)
if self.projection:
# train E[T|X,W,Z]
if self.model_t_xwz == 'auto':
if self.discrete_treatment:
model_t_xwz = LogisticRegressionCV(cv=WeightedStratifiedKFold(random_state=self.random_state),
random_state=self.random_state)
else:
model_t_xwz = WeightedLassoCVWrapper(random_state=self.random_state)
else:
model_t_xwz = clone(self.model_t_xwz, safe=False)
return _OrthoIVModelNuisance(_FirstStageWrapper(clone(model_y_xw, safe=False), True,
self._gen_featurizer(), False, False),
_FirstStageWrapper(clone(model_t_xw, safe=False), False,
self._gen_featurizer(), False, self.discrete_treatment),
_FirstStageWrapper(clone(model_t_xwz, safe=False), False,
self._gen_featurizer(), False, self.discrete_treatment),
self.projection)
else:
# train [Z|X,W]
if self.model_z_xw == "auto":
if self.discrete_instrument:
model_z_xw = LogisticRegressionCV(cv=WeightedStratifiedKFold(random_state=self.random_state),
random_state=self.random_state)
else:
model_z_xw = WeightedLassoCVWrapper(random_state=self.random_state)
else:
model_z_xw = clone(self.model_z_xw, safe=False)
return _OrthoIVModelNuisance(_FirstStageWrapper(clone(model_y_xw, safe=False), True,
self._gen_featurizer(), False, False),
_FirstStageWrapper(clone(model_t_xw, safe=False), False,
self._gen_featurizer(), False, self.discrete_treatment),
_FirstStageWrapper(clone(model_z_xw, safe=False), False,
self._gen_featurizer(), False, self.discrete_instrument),
self.projection)
def fit(self, Y, T, *, Z, X=None, W=None, sample_weight=None, freq_weight=None, sample_var=None, groups=None,
cache_values=False, inference="auto"):
"""
Estimate the counterfactual model from data, i.e. estimates function :math:`\\theta(\\cdot)`.
Parameters
----------
Y: (n, d_y) matrix or vector of length n
Outcomes for each sample
T: (n, d_t) matrix or vector of length n
Treatments for each sample
Z: (n, d_z) matrix
Instruments for each sample
X: optional(n, d_x) matrix or None (Default=None)
Features for each sample
W: optional(n, d_w) matrix or None (Default=None)
Controls for each sample
sample_weight : (n,) array like, default None
Individual weights for each sample. If None, it assumes equal weight.
freq_weight: (n,) array like of integers, default None
Weight for the observation. Observation i is treated as the mean
outcome of freq_weight[i] independent observations.
When ``sample_var`` is not None, this should be provided.
sample_var : {(n,), (n, d_y)} nd array like, default None
Variance of the outcome(s) of the original freq_weight[i] observations that were used to
compute the mean outcome represented by observation i.
groups: (n,) vector, optional
All rows corresponding to the same group will be kept together during splitting.
If groups is not None, the `cv` argument passed to this class's initializer
must support a 'groups' argument to its split method.
cache_values: bool, default False
Whether to cache inputs and first stage results, which will allow refitting a different final model
inference: string,:class:`.Inference` instance, or None
Method for performing inference. This estimator supports 'bootstrap'
(or an instance of:class:`.BootstrapInference`) and 'auto'
(or an instance of :class:`.LinearModelFinalInference`)
Returns
-------
self: OrthoIV instance
"""
if self.projection:
assert self.model_z_xw == "auto", ("In the case of projection=True, model_z_xw will not be fitted, "
"please leave it when initializing the estimator!")
else:
assert self.model_t_xwz == "auto", ("In the case of projection=False, model_t_xwz will not be fitted, "
"please leave it when initializing the estimator!")
# Replacing fit from _OrthoLearner, to reorder arguments and improve the docstring
return super().fit(Y, T, X=X, W=W, Z=Z,
sample_weight=sample_weight, freq_weight=freq_weight, sample_var=sample_var, groups=groups,
cache_values=cache_values, inference=inference)
def refit_final(self, *, inference='auto'):
return super().refit_final(inference=inference)
refit_final.__doc__ = _OrthoLearner.refit_final.__doc__
def score(self, Y, T, Z, X=None, W=None, sample_weight=None):
"""
Score the fitted CATE model on a new data set. Generates nuisance parameters
for the new data set based on the fitted residual nuisance models created at fit time.
It uses the mean prediction of the models fitted by the different crossfit folds.
Then calculates the MSE of the final residual Y on residual T regression.
If model_final does not have a score method, then it raises an :exc:`.AttributeError`
Parameters
----------
Y: (n, d_y) matrix or vector of length n
Outcomes for each sample
T: (n, d_t) matrix or vector of length n
Treatments for each sample
Z: optional(n, d_z) matrix
Instruments for each sample
X: optional(n, d_x) matrix or None (Default=None)
Features for each sample
W: optional(n, d_w) matrix or None (Default=None)
Controls for each sample
sample_weight: optional(n,) vector or None (Default=None)
Weights for each samples
Returns
-------
score: float
The MSE of the final CATE model on the new data.
"""
# Replacing score from _OrthoLearner, to enforce Z to be required and improve the docstring
return super().score(Y, T, X=X, W=W, Z=Z, sample_weight=sample_weight)
@property
def featurizer_(self):
"""
Get the fitted featurizer.
Returns
-------
featurizer: object of type(`featurizer`)
An instance of the fitted featurizer that was used to preprocess X in the final CATE model training.
Available only when featurizer is not None and X is not None.
"""
return self.ortho_learner_model_final_._featurizer
@property
def original_featurizer(self):
# NOTE: important to use the ortho_learner_model_final_ attribute instead of the
# attribute so that the trained featurizer will be passed through
return self.ortho_learner_model_final_._original_featurizer
def cate_feature_names(self, feature_names=None):
"""
Get the output feature names.
Parameters
----------
feature_names: list of strings of length X.shape[1] or None
The names of the input features. If None and X is a dataframe, it defaults to the column names
from the dataframe.
Returns
-------
out_feature_names: list of strings or None
The names of the output features :math:`\\phi(X)`, i.e. the features with respect to which the
final CATE model for each treatment is linear. It is the names of the features that are associated
with each entry of the :meth:`coef_` parameter. Available only when the featurizer is not None and has
a method: `get_feature_names(feature_names)`. Otherwise None is returned.
"""
if self._d_x is None:
# Handles the corner case when X=None but featurizer might be not None
return None
if feature_names is None:
feature_names = self._input_names["feature_names"]
if self.original_featurizer is None:
return feature_names
return get_feature_names_or_default(self.original_featurizer, feature_names)
@property
def model_final_(self):
# NOTE This is used by the inference methods and is more for internal use to the library
return self.ortho_learner_model_final_._model_final
@property
def model_cate(self):
"""
Get the fitted final CATE model.
Returns
-------
model_cate: object of type(model_final)
An instance of the model_final object that was fitted after calling fit which corresponds
to the constant marginal CATE model.
"""
return self.ortho_learner_model_final_._model_final
@property
def models_y_xw(self):
"""
Get the fitted models for :math:`\\E[Y | X]`.
Returns
-------
models_y_xw: nested list of objects of type(`model_y_xw`)
A nested list of instances of the `model_y_xw` object. Number of sublist equals to number of monte carlo
iterations, each element in the sublist corresponds to a crossfitting
fold and is the model instance that was fitted for that training fold.
"""
return [[mdl._model_y_xw._model for mdl in mdls] for mdls in super().models_nuisance_]
@property
def models_t_xw(self):
"""
Get the fitted models for :math:`\\E[T | X]`.
Returns
-------
models_t_xw: nested list of objects of type(`model_t_xw`)
A nested list of instances of the `model_t_xw` object. Number of sublist equals to number of monte carlo
iterations, each element in the sublist corresponds to a crossfitting
fold and is the model instance that was fitted for that training fold.
"""
return [[mdl._model_t_xw._model for mdl in mdls] for mdls in super().models_nuisance_]
@property
def models_z_xw(self):
"""
Get the fitted models for :math:`\\E[Z | X]`.
Returns
-------
models_z_xw: nested list of objects of type(`model_z_xw`)
A nested list of instances of the `model_z_xw` object. Number of sublist equals to number of monte carlo
iterations, each element in the sublist corresponds to a crossfitting
fold and is the model instance that was fitted for that training fold.
"""
if self.projection:
raise AttributeError("Projection model is fitted for instrument! Use models_t_xwz.")
return [[mdl._model_z_xw._model for mdl in mdls] for mdls in super().models_nuisance_]
@property
def models_t_xwz(self):
"""
Get the fitted models for :math:`\\E[T | X, Z]`.
Returns
-------
models_t_xwz: nested list of objects of type(`model_t_xwz`)
A nested list of instances of the `model_t_xwz` object. Number of sublist equals to number of monte carlo
iterations, each element in the sublist corresponds to a crossfitting
fold and is the model instance that was fitted for that training fold.
"""
if not self.projection:
raise AttributeError("Direct model is fitted for instrument! Use models_z_xw.")
return [[mdl._model_t_xwz._model for mdl in mdls] for mdls in super().models_nuisance_]
@property
def nuisance_scores_y_xw(self):
"""
Get the scores for y_xw model on the out-of-sample training data
"""
return self.nuisance_scores_[0]
@property
def nuisance_scores_t_xw(self):
"""
Get the scores for t_xw model on the out-of-sample training data
"""
return self.nuisance_scores_[1]
@property
def nuisance_scores_z_xw(self):
"""
Get the scores for z_xw model on the out-of-sample training data
"""
if self.projection:
raise AttributeError("Projection model is fitted for instrument! Use nuisance_scores_t_xwz.")
return self.nuisance_scores_[2]
@property
def nuisance_scores_t_xwz(self):
"""
Get the scores for t_xwz model on the out-of-sample training data
"""
if not self.projection:
raise AttributeError("Direct model is fitted for instrument! Use nuisance_scores_z_xw.")
return self.nuisance_scores_[2]
@property
def fit_cate_intercept_(self):
return self.ortho_learner_model_final_._fit_cate_intercept
@property
def bias_part_of_coef(self):
return self.ortho_learner_model_final_._fit_cate_intercept
@property
def model_final(self):
return self._gen_model_final()
@model_final.setter
def model_final(self, model):
if model is not None:
raise ValueError("Parameter `model_final` cannot be altered for this estimator!")
@property
def residuals_(self):
"""
A tuple (y_res, T_res,Z_res, X, W, Z), of the residuals from the first stage estimation
along with the associated X, W and Z. Samples are not guaranteed to be in the same
order as the input order.
"""
if not hasattr(self, '_cached_values'):
raise AttributeError("Estimator is not fitted yet!")
if self._cached_values is None:
raise AttributeError("`fit` was called with `cache_values=False`. "
"Set to `True` to enable residual storage.")
Y_res, T_res, Z_res = self._cached_values.nuisances
return Y_res, T_res, Z_res, self._cached_values.X, self._cached_values.W, self._cached_values.Z
class _BaseDMLIVModelNuisance:
"""
Nuisance model fits the three models at fit time and at predict time
returns :math:`Y-\\E[Y|X]` and :math:`\\E[T|X,Z]-\\E[T|X]` as residuals.
"""
def __init__(self, model_y_xw, model_t_xw, model_t_xwz):
self._model_y_xw = clone(model_y_xw, safe=False)
self._model_t_xw = clone(model_t_xw, safe=False)
self._model_t_xwz = clone(model_t_xwz, safe=False)
def _combine(self, W, Z, n_samples):
if Z is not None:
Z = Z.reshape(n_samples, -1)
return Z if W is None else np.hstack([W, Z])
return None if W is None else W
def fit(self, Y, T, X=None, W=None, Z=None, sample_weight=None, groups=None):
self._model_y_xw.fit(X, W, Y, **filter_none_kwargs(sample_weight=sample_weight, groups=groups))
self._model_t_xw.fit(X, W, T, **filter_none_kwargs(sample_weight=sample_weight, groups=groups))
# concat W and Z
WZ = self._combine(W, Z, Y.shape[0])
self._model_t_xwz.fit(X, WZ, T, **filter_none_kwargs(sample_weight=sample_weight, groups=groups))
return self
def score(self, Y, T, X=None, W=None, Z=None, sample_weight=None, groups=None):
# note that groups are not passed to score because they are only used for fitting
if hasattr(self._model_y_xw, 'score'):
Y_X_score = self._model_y_xw.score(X, W, Y, **filter_none_kwargs(sample_weight=sample_weight))
else:
Y_X_score = None
if hasattr(self._model_t_xw, 'score'):
T_X_score = self._model_t_xw.score(X, W, T, **filter_none_kwargs(sample_weight=sample_weight))
else:
T_X_score = None
if hasattr(self._model_t_xwz, 'score'):
# concat W and Z
WZ = self._combine(W, Z, Y.shape[0])
T_XZ_score = self._model_t_xwz.score(X, WZ, T, **filter_none_kwargs(sample_weight=sample_weight))
else:
T_XZ_score = None
return Y_X_score, T_X_score, T_XZ_score
def predict(self, Y, T, X=None, W=None, Z=None, sample_weight=None, groups=None):
# note that sample_weight and groups are not passed to predict because they are only used for fitting
Y_pred = self._model_y_xw.predict(X, W)
# concat W and Z
WZ = self._combine(W, Z, Y.shape[0])
TXZ_pred = self._model_t_xwz.predict(X, WZ)
TX_pred = self._model_t_xw.predict(X, W)
if (X is None) and (W is None): # In this case predict above returns a single row
Y_pred = np.tile(Y_pred.reshape(1, -1), (Y.shape[0], 1))
TX_pred = np.tile(TX_pred.reshape(1, -1), (T.shape[0], 1))
Y_res = Y - Y_pred.reshape(Y.shape)
T_res = TXZ_pred.reshape(T.shape) - TX_pred.reshape(T.shape)
return Y_res, T_res
class _BaseDMLIVModelFinal(_ModelFinal):
"""
Final model at fit time, fits a residual on residual regression with a heterogeneous coefficient
that depends on X, i.e.
.. math ::
Y - \\E[Y | X] = \\theta(X) \\cdot (\\E[T | X, Z] - \\E[T | X]) + \\epsilon
and at predict time returns :math:`\\theta(X)`. The score method returns the MSE of this final
residual on residual regression.
"""
pass
class _BaseDMLIV(_OrthoLearner):
# A helper class that access all the internal fitted objects of a DMLIV Cate Estimator.
# Used by both Parametric and Non Parametric DMLIV.
# override only so that we can enforce Z to be required
def fit(self, Y, T, *, Z, X=None, W=None, sample_weight=None, freq_weight=None, sample_var=None, groups=None,
cache_values=False, inference=None):
"""
Estimate the counterfactual model from data, i.e. estimates function :math:`\\theta(\\cdot)`.
Parameters
----------
Y: (n, d_y) matrix or vector of length n
Outcomes for each sample
T: (n, d_t) matrix or vector of length n
Treatments for each sample
Z: (n, d_z) matrix
Instruments for each sample
X: optional(n, d_x) matrix or None (Default=None)
Features for each sample
W: optional (n, d_w) matrix or None (Default=None)
Controls for each sample
sample_weight : (n,) array like, default None
Individual weights for each sample. If None, it assumes equal weight.
freq_weight: (n,) array like of integers, default None
Weight for the observation. Observation i is treated as the mean
outcome of freq_weight[i] independent observations.
When ``sample_var`` is not None, this should be provided.
sample_var : {(n,), (n, d_y)} nd array like, default None
Variance of the outcome(s) of the original freq_weight[i] observations that were used to
compute the mean outcome represented by observation i.
groups: (n,) vector, optional
All rows corresponding to the same group will be kept together during splitting.
If groups is not None, the `cv` argument passed to this class's initializer
must support a 'groups' argument to its split method.
cache_values: bool, default False
Whether to cache inputs and first stage results, which will allow refitting a different final model
inference: string, :class:`.Inference` instance, or None
Method for performing inference. This estimator supports 'bootstrap'
(or an instance of :class:`.BootstrapInference`)
Returns
-------
self
"""
return super().fit(Y, T, X=X, W=W, Z=Z,
sample_weight=sample_weight, freq_weight=freq_weight, sample_var=sample_var, groups=groups,
cache_values=cache_values, inference=inference)
def score(self, Y, T, Z, X=None, W=None, sample_weight=None):
"""
Score the fitted CATE model on a new data set. Generates nuisance parameters
for the new data set based on the fitted residual nuisance models created at fit time.
It uses the mean prediction of the models fitted by the different crossfit folds.
Then calculates the MSE of the final residual Y on residual T regression.
If model_final does not have a score method, then it raises an :exc:`.AttributeError`
Parameters
----------
Y: (n, d_y) matrix or vector of length n
Outcomes for each sample
T: (n, d_t) matrix or vector of length n
Treatments for each sample
Z: (n, d_z) matrix
Instruments for each sample
X: optional(n, d_x) matrix or None (Default=None)
Features for each sample
W: optional(n, d_w) matrix or None (Default=None)
Controls for each sample
sample_weight: optional(n,) vector or None (Default=None)
Weights for each samples
Returns
-------
score: float
The MSE of the final CATE model on the new data.
"""
# Replacing score from _OrthoLearner, to enforce Z to be required and improve the docstring
return super().score(Y, T, X=X, W=W, Z=Z, sample_weight=sample_weight)
@property
def original_featurizer(self):
return self.ortho_learner_model_final_._model_final._original_featurizer
@property
def featurizer_(self):
# NOTE This is used by the inference methods and has to be the overall featurizer. intended
# for internal use by the library
return self.ortho_learner_model_final_._model_final._featurizer
@property
def model_final_(self):
# NOTE This is used by the inference methods and is more for internal use to the library
return self.ortho_learner_model_final_._model_final._model
@property
def model_cate(self):
"""
Get the fitted final CATE model.
Returns
-------
model_cate: object of type(model_final)
An instance of the model_final object that was fitted after calling fit which corresponds
to the constant marginal CATE model.
"""
return self.ortho_learner_model_final_._model_final._model
@property
def models_y_xw(self):
"""
Get the fitted models for :math:`\\E[Y | X]`.
Returns
-------
models_y_xw: nested list of objects of type(`model_y_xw`)
A nested list of instances of the `model_y_xw` object. Number of sublist equals to number of monte carlo
iterations, each element in the sublist corresponds to a crossfitting
fold and is the model instance that was fitted for that training fold.
"""
return [[mdl._model_y_xw._model for mdl in mdls] for mdls in super().models_nuisance_]
@property
def models_t_xw(self):
"""
Get the fitted models for :math:`\\E[T | X]`.
Returns
-------
models_t_xw: nested list of objects of type(`model_t_xw`)
A nested list of instances of the `model_t_xw` object. Number of sublist equals to number of monte carlo
iterations, each element in the sublist corresponds to a crossfitting
fold and is the model instance that was fitted for that training fold.
"""
return [[mdl._model_t_xw._model for mdl in mdls] for mdls in super().models_nuisance_]
@property
def models_t_xwz(self):
"""
Get the fitted models for :math:`\\E[T | X, Z]`.
Returns
-------
models_t_xwz: nested list of objects of type(`model_t_xwz`)
A nested list of instances of the `model_t_xwz` object. Number of sublist equals to number of monte carlo
iterations, each element in the sublist corresponds to a crossfitting
fold and is the model instance that was fitted for that training fold.
"""
return [[mdl._model_t_xwz._model for mdl in mdls] for mdls in super().models_nuisance_]
@property
def nuisance_scores_y_xw(self):
"""
Get the scores for y_xw model on the out-of-sample training data
"""
return self.nuisance_scores_[0]
@property
def nuisance_scores_t_xw(self):
"""
Get the scores for t_xw model on the out-of-sample training data
"""
return self.nuisance_scores_[1]
@property
def nuisance_scores_t_xwz(self):
"""
Get the scores for t_xwz model on the out-of-sample training data
"""
return self.nuisance_scores_[2]
@property
def residuals_(self):
"""
A tuple (y_res, T_res, X, W, Z), of the residuals from the first stage estimation
along with the associated X, W and Z. Samples are not guaranteed to be in the same
order as the input order.
"""
if not hasattr(self, '_cached_values'):
raise AttributeError("Estimator is not fitted yet!")
if self._cached_values is None:
raise AttributeError("`fit` was called with `cache_values=False`. "
"Set to `True` to enable residual storage.")
Y_res, T_res = self._cached_values.nuisances
return Y_res, T_res, self._cached_values.X, self._cached_values.W, self._cached_values.Z
def cate_feature_names(self, feature_names=None):
"""
Get the output feature names.
Parameters
----------
feature_names: list of strings of length X.shape[1] or None
The names of the input features. If None and X is a dataframe, it defaults to the column names
from the dataframe.
Returns
-------
out_feature_names: list of strings or None
The names of the output features :math:`\\phi(X)`, i.e. the features with respect to which the
final constant marginal CATE model is linear. It is the names of the features that are associated
with each entry of the :meth:`coef_` parameter. Not available when the featurizer is not None and
does not have a method: `get_feature_names(feature_names)`. Otherwise None is returned.
"""
if self._d_x is None:
# Handles the corner case when X=None but featurizer might be not None
return None
if feature_names is None:
feature_names = self._input_names["feature_names"]
if self.original_featurizer is None:
return feature_names
return get_feature_names_or_default(self.original_featurizer, feature_names)
class DMLIV(_BaseDMLIV):
"""
The base class for parametric DMLIV estimators to estimate a CATE. It accepts three generic machine
learning models as nuisance functions:
1) model_y_xw that estimates :math:`\\E[Y | X]`
2) model_t_xw that estimates :math:`\\E[T | X]`
3) model_t_xwz that estimates :math:`\\E[T | X, Z]`
These are estimated in a cross-fitting manner for each sample in the training set.
Then it minimizes the square loss:
.. math::
\\sum_i (Y_i - \\E[Y|X_i] - \\theta(X) * (\\E[T|X_i, Z_i] - \\E[T|X_i]))^2
This loss is minimized by the model_final class, which is passed as an input.
Parameters
----------
model_y_xw : estimator or 'auto' (default is 'auto')
model to estimate :math:`\\E[Y | X, W]`. Must support `fit` and `predict` methods.
If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen.
model_t_xw : estimator or 'auto' (default is 'auto')
model to estimate :math:`\\E[T | X, W]`. Must support `fit` and `predict` methods.
If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV`
will be applied for discrete treatment,
and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV`
will be applied for continuous treatment.
model_t_xwz : estimator or 'auto' (default is 'auto')
model to estimate :math:`\\E[T | X, W, Z]`. Must support `fit` and `predict` methods.
If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV`
will be applied for discrete treatment,
and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV`
will be applied for continuous treatment.
model_final : estimator (default is :class:`.StatsModelsLinearRegression`)
final model that at fit time takes as input :math:`(Y-\\E[Y|X])`, :math:`(\\E[T|X,Z]-\\E[T|X])` and X
and supports method predict(X) that produces the CATE at X
featurizer: transformer
The transformer used to featurize the raw features when fitting the final model. Must implement
a `fit_transform` method.
fit_cate_intercept : bool, optional, default True
Whether the linear CATE model should have a constant term.
discrete_instrument: bool, optional, default False
Whether the instrument values should be treated as categorical, rather than continuous, quantities
discrete_treatment: bool, optional, default False
Whether the treatment values should be treated as categorical, rather than continuous, quantities
categories: 'auto' or list, default 'auto'
The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).
The first category will be treated as the control treatment.
cv: int, cross-validation generator or an iterable, optional, default 2
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the treatment is discrete
:class:`~sklearn.model_selection.StratifiedKFold` is used, else,
:class:`~sklearn.model_selection.KFold` is used
(with a random shuffle in either case).
Unless an iterable is used, we call `split(concat[W, X], T)` to generate the splits. If all
W, X are None, then we call `split(ones((T.shape[0], 1)), T)`.
random_state: int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;
If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used
by :mod:`np.random<numpy.random>`.
mc_iters: int, optional (default=None)
The number of times to rerun the first stage models to reduce the variance of the nuisances.
mc_agg: {'mean', 'median'}, optional (default='mean')
How to aggregate the nuisance value for each sample across the `mc_iters` monte carlo iterations of
cross-fitting.
"""
def __init__(self, *,
model_y_xw="auto",
model_t_xw="auto",
model_t_xwz="auto",
model_final=StatsModelsLinearRegression(fit_intercept=False),
featurizer=None,
fit_cate_intercept=True,
discrete_treatment=False,
discrete_instrument=False,
categories='auto',
cv=2,
mc_iters=None,
mc_agg='mean',
random_state=None):
self.model_y_xw = clone(model_y_xw, safe=False)
self.model_t_xw = clone(model_t_xw, safe=False)
self.model_t_xwz = clone(model_t_xwz, safe=False)
self.model_final = clone(model_final, safe=False)
self.featurizer = clone(featurizer, safe=False)
self.fit_cate_intercept = fit_cate_intercept
super().__init__(discrete_treatment=discrete_treatment,
discrete_instrument=discrete_instrument,
categories=categories,
cv=cv,
mc_iters=mc_iters,
mc_agg=mc_agg,
random_state=random_state)
def _gen_featurizer(self):
return clone(self.featurizer, safe=False)
def _gen_model_y_xw(self):
if self.model_y_xw == 'auto':
model_y_xw = WeightedLassoCVWrapper(random_state=self.random_state)
else:
model_y_xw = clone(self.model_y_xw, safe=False)
return _FirstStageWrapper(model_y_xw, True, self._gen_featurizer(),
False, False)
def _gen_model_t_xw(self):
if self.model_t_xw == 'auto':
if self.discrete_treatment:
model_t_xw = LogisticRegressionCV(cv=WeightedStratifiedKFold(random_state=self.random_state),
random_state=self.random_state)
else:
model_t_xw = WeightedLassoCVWrapper(random_state=self.random_state)
else:
model_t_xw = clone(self.model_t_xw, safe=False)
return _FirstStageWrapper(model_t_xw, False, self._gen_featurizer(),
False, self.discrete_treatment)
def _gen_model_t_xwz(self):
if self.model_t_xwz == 'auto':
if self.discrete_treatment:
model_t_xwz = LogisticRegressionCV(cv=WeightedStratifiedKFold(random_state=self.random_state),
random_state=self.random_state)
else:
model_t_xwz = WeightedLassoCVWrapper(random_state=self.random_state)
else:
model_t_xwz = clone(self.model_t_xwz, safe=False)
return _FirstStageWrapper(model_t_xwz, False, self._gen_featurizer(),
False, self.discrete_treatment)
def _gen_model_final(self):
return clone(self.model_final, safe=False)
def _gen_ortho_learner_model_nuisance(self):
return _BaseDMLIVModelNuisance(self._gen_model_y_xw(), self._gen_model_t_xw(), self._gen_model_t_xwz())
def _gen_ortho_learner_model_final(self):
return _BaseDMLIVModelFinal(_FinalWrapper(self._gen_model_final(),
self.fit_cate_intercept,
self._gen_featurizer(),
False))
@property
def bias_part_of_coef(self):
return self.ortho_learner_model_final_._model_final._fit_cate_intercept
@property
def fit_cate_intercept_(self):
return self.ortho_learner_model_final_._model_final._fit_cate_intercept
def shap_values(self, X, *, feature_names=None, treatment_names=None, output_names=None, background_samples=100):
if hasattr(self, "featurizer_") and self.featurizer_ is not None:
X = self.featurizer_.transform(X)
feature_names = self.cate_feature_names(feature_names)
return _shap_explain_joint_linear_model_cate(self.model_final_, X, self._d_t, self._d_y,
self.bias_part_of_coef,
feature_names=feature_names, treatment_names=treatment_names,
output_names=output_names,
input_names=self._input_names,
background_samples=background_samples)
shap_values.__doc__ = LinearCateEstimator.shap_values.__doc__
@property
def coef_(self):
""" The coefficients in the linear model of the constant marginal treatment
effect.
Returns
-------
coef: (n_x,) or (n_t, n_x) or (n_y, n_t, n_x) array like
Where n_x is the number of features that enter the final model (either the
dimension of X or the dimension of featurizer.fit_transform(X) if the CATE
estimator has a featurizer.), n_t is the number of treatments, n_y is
the number of outcomes. Dimensions are omitted if the original input was
a vector and not a 2D array. For binary treatment the n_t dimension is
also omitted.
"""
return parse_final_model_params(self.model_final_.coef_, self.model_final_.intercept_,
self._d_y, self._d_t, self._d_t_in, self.bias_part_of_coef,
self.fit_cate_intercept_)[0]
@property
def intercept_(self):
""" The intercept in the linear model of the constant marginal treatment
effect.
Returns
-------
intercept: float or (n_y,) or (n_y, n_t) array like
Where n_t is the number of treatments, n_y is
the number of outcomes. Dimensions are omitted if the original input was
a vector and not a 2D array. For binary treatment the n_t dimension is
also omitted.
"""
if not self.fit_cate_intercept_:
raise AttributeError("No intercept was fitted!")
return parse_final_model_params(self.model_final_.coef_, self.model_final_.intercept_,
self._d_y, self._d_t, self._d_t_in, self.bias_part_of_coef,
self.fit_cate_intercept_)[1]
def summary(self, decimals=3, feature_names=None, treatment_names=None, output_names=None):
""" The summary of coefficient and intercept in the linear model of the constant marginal treatment
effect.
Parameters
----------
decimals: optinal int (default=3)
Number of decimal places to round each column to.
feature_names: optional list of strings or None (default is None)
The input of the feature names
treatment_names: optional list of strings or None (default is None)
The names of the treatments
output_names: optional list of strings or None (default is None)
The names of the outputs
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
"""
# Get input names
treatment_names = self.cate_treatment_names(treatment_names)
output_names = self.cate_output_names(output_names)
feature_names = self.cate_feature_names(feature_names)
# Summary
smry = Summary()
smry.add_extra_txt(["<sub>A linear parametric conditional average treatment effect (CATE) model was fitted:",
"$Y = \\Theta(X)\\cdot T + g(X, W) + \\epsilon$",
"where for every outcome $i$ and treatment $j$ the CATE $\\Theta_{ij}(X)$ has the form:",
"$\\Theta_{ij}(X) = \\phi(X)' coef_{ij} + cate\\_intercept_{ij}$",
"where $\\phi(X)$ is the output of the `featurizer` or $X$ if `featurizer`=None. "
"Coefficient Results table portrays the $coef_{ij}$ parameter vector for "
"each outcome $i$ and treatment $j$. "
"Intercept Results table portrays the $cate\\_intercept_{ij}$ parameter.</sub>"])
d_t = self._d_t[0] if self._d_t else 1
d_y = self._d_y[0] if self._d_y else 1
def _reshape_array(arr, type):
if np.isscalar(arr):
arr = np.array([arr])
if type == 'coefficient':
arr = np.moveaxis(arr, -1, 0)
arr = arr.reshape(-1, 1)
return arr
# coefficient
try:
if self.coef_.size == 0: # X is None
raise AttributeError("X is None, please call intercept_inference to learn the constant!")
else:
coef_array = np.round(_reshape_array(self.coef_, "coefficient"), decimals)
coef_headers = ["point_estimate"]
if d_t > 1 and d_y > 1:
index = list(product(feature_names, output_names, treatment_names))
elif d_t > 1:
index = list(product(feature_names, treatment_names))
elif d_y > 1:
index = list(product(feature_names, output_names))
else:
index = list(product(feature_names))
coef_stubs = ["|".join(ind_value) for ind_value in index]
coef_title = 'Coefficient Results'
smry.add_table(coef_array, coef_headers, coef_stubs, coef_title)
except Exception as e:
print("Coefficient Results: ", str(e))
# intercept
try:
if not self.fit_cate_intercept:
raise AttributeError("No intercept was fitted!")
else:
intercept_array = np.round(_reshape_array(self.intercept_, "intercept"), decimals)
intercept_headers = ["point_estimate"]
if d_t > 1 and d_y > 1:
index = list(product(["cate_intercept"], output_names, treatment_names))
elif d_t > 1:
index = list(product(["cate_intercept"], treatment_names))
elif d_y > 1:
index = list(product(["cate_intercept"], output_names))
else:
index = list(product(["cate_intercept"]))
intercept_stubs = ["|".join(ind_value) for ind_value in index]
intercept_title = 'CATE Intercept Results'
smry.add_table(intercept_array, intercept_headers, intercept_stubs, intercept_title)
except Exception as e:
print("CATE Intercept Results: ", str(e))
if len(smry.tables) > 0:
return smry
class NonParamDMLIV(_BaseDMLIV):
"""
The base class for non-parametric DMLIV that allows for an arbitrary square loss based ML
method in the final stage of the DMLIV algorithm. The method has to support
sample weights and the fit method has to take as input sample_weights (e.g. random forests), i.e.
fit(X, y, sample_weight=None)
It achieves this by re-writing the final stage square loss of the DMLIV algorithm as:
.. math ::
\\sum_i (\\E[T|X_i, Z_i] - \\E[T|X_i])^2 * ((Y_i - \\E[Y|X_i])/(\\E[T|X_i, Z_i] - \\E[T|X_i]) - \\theta(X))^2
Then this can be viewed as a weighted square loss regression, where the target label is
.. math ::
\\tilde{Y}_i = (Y_i - \\E[Y|X_i])/(\\E[T|X_i, Z_i] - \\E[T|X_i])
and each sample has a weight of
.. math ::
V(X_i) = (\\E[T|X_i, Z_i] - \\E[T|X_i])^2
Thus we can call any regression model with inputs:
fit(X, :math:`\\tilde{Y}_i`, sample_weight= :math:`V(X_i)`)
Parameters
----------
model_y_xw : estimator or 'auto' (default is 'auto')
model to estimate :math:`\\E[Y | X, W]`. Must support `fit` and `predict` methods.
If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen.
model_t_xw : estimator or 'auto' (default is 'auto')
model to estimate :math:`\\E[T | X, W]`. Must support `fit` and either `predict` or `predict_proba` methods,
depending on whether the treatment is discrete.
If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV`
will be applied for discrete treatment,
and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV`
will be applied for continuous treatment.
model_t_xwz : estimator or 'auto' (default is 'auto')
model to estimate :math:`\\E[T | X, W, Z]`. Must support `fit` and either `predict` or `predict_proba`
methods, depending on whether the treatment is discrete.
If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV`
will be applied for discrete treatment,
and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV`
will be applied for continuous treatment.
model_final : estimator
final model for predicting :math:`\\tilde{Y}` from X with sample weights V(X)
featurizer: transformer
The transformer used to featurize the raw features when fitting the final model. Must implement
a `fit_transform` method.
discrete_treatment: bool, optional, default False
Whether the treatment values should be treated as categorical, rather than continuous, quantities
discrete_instrument: bool, optional, default False
Whether the instrument values should be treated as categorical, rather than continuous, quantities
categories: 'auto' or list, default 'auto'
The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).
The first category will be treated as the control treatment.
cv: int, cross-validation generator or an iterable, optional, default 2
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the treatment is discrete
:class:`~sklearn.model_selection.StratifiedKFold` is used, else,
:class:`~sklearn.model_selection.KFold` is used
(with a random shuffle in either case).
Unless an iterable is used, we call `split(concat[W, X], T)` to generate the splits. If all
W, X are None, then we call `split(ones((T.shape[0], 1)), T)`.
mc_iters: int, optional (default=None)
The number of times to rerun the first stage models to reduce the variance of the nuisances.
mc_agg: {'mean', 'median'}, optional (default='mean')
How to aggregate the nuisance value for each sample across the `mc_iters` monte carlo iterations of
cross-fitting.
random_state: int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;
If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used
by :mod:`np.random<numpy.random>`.
"""
def __init__(self, *,
model_y_xw="auto",
model_t_xw="auto",
model_t_xwz="auto",
model_final,
discrete_treatment=False,
discrete_instrument=False,
featurizer=None,
categories='auto',
cv=2,
mc_iters=None,
mc_agg='mean',
random_state=None):
self.model_y_xw = clone(model_y_xw, safe=False)
self.model_t_xw = clone(model_t_xw, safe=False)
self.model_t_xwz = clone(model_t_xwz, safe=False)
self.model_final = clone(model_final, safe=False)
self.featurizer = clone(featurizer, safe=False)
super().__init__(discrete_treatment=discrete_treatment,
discrete_instrument=discrete_instrument,
categories=categories,
cv=cv,
mc_iters=mc_iters,
mc_agg=mc_agg,
random_state=random_state)
def _gen_featurizer(self):
return clone(self.featurizer, safe=False)
def _gen_model_y_xw(self):
if self.model_y_xw == 'auto':
model_y_xw = WeightedLassoCVWrapper(random_state=self.random_state)
else:
model_y_xw = clone(self.model_y_xw, safe=False)
return _FirstStageWrapper(model_y_xw, True, self._gen_featurizer(),
False, False)
def _gen_model_t_xw(self):
if self.model_t_xw == 'auto':
if self.discrete_treatment:
model_t_xw = LogisticRegressionCV(cv=WeightedStratifiedKFold(random_state=self.random_state),
random_state=self.random_state)
else:
model_t_xw = WeightedLassoCVWrapper(random_state=self.random_state)
else:
model_t_xw = clone(self.model_t_xw, safe=False)
return _FirstStageWrapper(model_t_xw, False, self._gen_featurizer(),
False, self.discrete_treatment)
def _gen_model_t_xwz(self):
if self.model_t_xwz == 'auto':
if self.discrete_treatment:
model_t_xwz = LogisticRegressionCV(cv=WeightedStratifiedKFold(random_state=self.random_state),
random_state=self.random_state)
else:
model_t_xwz = WeightedLassoCVWrapper(random_state=self.random_state)
else:
model_t_xwz = clone(self.model_t_xwz, safe=False)
return _FirstStageWrapper(model_t_xwz, False, self._gen_featurizer(),
False, self.discrete_treatment)
def _gen_model_final(self):
return clone(self.model_final, safe=False)
def _gen_ortho_learner_model_nuisance(self):
return _BaseDMLIVModelNuisance(self._gen_model_y_xw(), self._gen_model_t_xw(), self._gen_model_t_xwz())
def _gen_ortho_learner_model_final(self):
return _BaseDMLIVModelFinal(_FinalWrapper(self._gen_model_final(),
False,
self._gen_featurizer(),
True))
def shap_values(self, X, *, feature_names=None, treatment_names=None, output_names=None, background_samples=100):
return _shap_explain_model_cate(self.const_marginal_effect, self.model_cate, X, self._d_t, self._d_y,
featurizer=self.featurizer_,
feature_names=feature_names,
treatment_names=treatment_names,
output_names=output_names,
input_names=self._input_names,
background_samples=background_samples)
shap_values.__doc__ = LinearCateEstimator.shap_values.__doc__
@deprecated("The DMLATEIV class has been deprecated by OrthoIV class with parameter `projection=False`, "
"an upcoming release will remove support for the old name")
def DMLATEIV(model_Y_W,
model_T_W,
model_Z_W,
discrete_treatment=False,
discrete_instrument=False,
categories='auto',
cv=2,
mc_iters=None,
mc_agg='mean',
random_state=None):
return OrthoIV(model_y_xw=model_Y_W,
model_t_xw=model_T_W,
model_z_xw=model_Z_W,
projection=False,
featurizer=None,
fit_cate_intercept=True,
discrete_treatment=discrete_treatment,
discrete_instrument=discrete_instrument,
categories=categories,
cv=cv,
mc_iters=mc_iters,
mc_agg=mc_agg,
random_state=random_state)
@deprecated("The DMLATEIV class has been deprecated by OrthoIV class with parameter `projection=True`, "
"an upcoming release will remove support for the old name")
def ProjectedDMLATEIV(model_Y_W,
model_T_W,
model_T_WZ,
discrete_treatment=False,
discrete_instrument=False,
categories='auto',
cv=2,
mc_iters=None,
mc_agg='mean',
random_state=None):
return OrthoIV(model_y_xw=model_Y_W,
model_t_xw=model_T_W,
model_t_xwz=model_T_WZ,
projection=True,
featurizer=None,
fit_cate_intercept=True,
discrete_treatment=discrete_treatment,
discrete_instrument=discrete_instrument,
categories=categories,
cv=cv,
mc_iters=mc_iters,
mc_agg=mc_agg,
random_state=random_state) | PypiClean |
/Adyen-9.0.2.tar.gz/Adyen-9.0.2/test/ModificationTest.py | import Adyen
import unittest
try:
from BaseTest import BaseTest
except ImportError:
from .BaseTest import BaseTest
class TestModifications(unittest.TestCase):
ady = Adyen.Adyen()
client = ady.client
test = BaseTest(ady)
client.username = "YourWSUser"
client.password = "YourWSPassword"
client.platform = "test"
def test_capture_success(self):
request = {}
request['merchantAccount'] = "YourMerchantAccount"
request['reference'] = "YourReference"
request['modificationAmount'] = {"value": "1234", "currency": "EUR"}
request['originalReference'] = "YourOriginalReference"
self.ady.client = self.test.create_client_from_file(200, request,
'test/mocks/'
'capture-success'
'.json')
result = self.ady.payment.modifications_api.capture(request)
self.assertEqual("[capture-received]", result.message['response'])
def test_capture_error_167(self):
request = {}
request['merchantAccount'] = "YourMerchantAccount"
request['reference'] = "YourReference"
request['modificationAmount'] = {"value": "1234", "currency": "EUR"}
request['originalReference'] = "YourOriginalReference"
self.ady.client = self.test.create_client_from_file(422, request,
'test/mocks/'
'capture-error-167'
'.json')
self.assertRaisesRegex(
Adyen.AdyenAPIUnprocessableEntity,
"AdyenAPIUnprocessableEntity:{'status': 422, 'errorCode': '167', 'message': 'Original pspReference required for this operation', 'errorType': 'validation'}",
self.ady.payment.modifications_api.capture,
request
)
def test_cancel_or_refund_received(self):
request = {}
request['merchantAccount'] = "YourMerchantAccount"
request['reference'] = "YourReference"
request['originalReference'] = "YourOriginalReference"
self.ady.client = self.test.create_client_from_file(200, request,
'test/mocks/'
'cancelOrRefund'
'-received.json')
result = self.ady.payment.modifications_api.cancel_or_refund(request)
self.assertEqual("[cancelOrRefund-received]",
result.message['response'])
def test_refund_received(self):
request = {}
request['merchantAccount'] = "YourMerchantAccount"
request['reference'] = "YourReference"
request['originalReference'] = "YourOriginalReference"
request['modificationAmount'] = {"value": "1234", "currency": "EUR"}
self.ady.client = self.test.create_client_from_file(200, request,
'test/mocks/'
'refund-received'
'.json')
result = self.ady.payment.modifications_api.refund(request)
self.assertEqual("[refund-received]", result.message['response'])
def test_cancel_received(self):
request = {}
request['merchantAccount'] = "YourMerchantAccount"
request['reference'] = "YourReference"
request['originalReference'] = "YourOriginalReference"
self.ady.client = self.test.create_client_from_file(200, request,
'test/mocks/'
'cancel-received'
'.json')
result = self.ady.payment.modifications_api.cancel(request)
self.assertEqual("[cancel-received]", result.message['response'])
def test_adjust_authorisation_received(self):
request = {}
request['merchantAccount'] = "YourMerchantAccount"
request['reference'] = "YourReference"
request['modificationAmount'] = {"value": "1234", "currency": "EUR"}
request['originalReference'] = "YourOriginalReference"
self.ady.client = self.test.create_client_from_file(200, request,
'test/mocks/'
'adjust-'
'authorisation-'
'received.json')
result = self.ady.payment.modifications_api.adjust_authorisation(request)
self.assertEqual("[adjustAuthorisation-received]",
result.message['response'])
TestModifications.client.http_force = "requests"
suite = unittest.TestLoader().loadTestsFromTestCase(TestModifications)
unittest.TextTestRunner(verbosity=2).run(suite)
TestModifications.client.http_force = "pycurl"
TestModifications.client.http_init = False
suite = unittest.TestLoader().loadTestsFromTestCase(TestModifications)
unittest.TextTestRunner(verbosity=2).run(suite)
TestModifications.client.http_force = "other"
TestModifications.client.http_init = False
suite = unittest.TestLoader().loadTestsFromTestCase(TestModifications)
unittest.TextTestRunner(verbosity=2).run(suite) | PypiClean |
/MNN_FMA-1.0.1-cp27-cp27m-manylinux2010_x86_64.whl/MNN/tools/mnn_fb/PriorBox.py |
# namespace: MNN
import flatbuffers
class PriorBox(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsPriorBox(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = PriorBox()
x.Init(buf, n + offset)
return x
# PriorBox
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# PriorBox
def MinSizes(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# PriorBox
def MinSizesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
return 0
# PriorBox
def MinSizesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# PriorBox
def MaxSizes(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# PriorBox
def MaxSizesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
return 0
# PriorBox
def MaxSizesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# PriorBox
def AspectRatios(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# PriorBox
def AspectRatiosAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
return 0
# PriorBox
def AspectRatiosLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.VectorLen(o)
return 0
# PriorBox
def Variances(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# PriorBox
def VariancesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
return 0
# PriorBox
def VariancesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.VectorLen(o)
return 0
# PriorBox
def Flip(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# PriorBox
def Clip(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# PriorBox
def ImageWidth(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# PriorBox
def ImageHeight(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# PriorBox
def StepWidth(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# PriorBox
def StepHeight(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# PriorBox
def Offset(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
def PriorBoxStart(builder): builder.StartObject(11)
def PriorBoxAddMinSizes(builder, minSizes): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(minSizes), 0)
def PriorBoxStartMinSizesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def PriorBoxAddMaxSizes(builder, maxSizes): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(maxSizes), 0)
def PriorBoxStartMaxSizesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def PriorBoxAddAspectRatios(builder, aspectRatios): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(aspectRatios), 0)
def PriorBoxStartAspectRatiosVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def PriorBoxAddVariances(builder, variances): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(variances), 0)
def PriorBoxStartVariancesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def PriorBoxAddFlip(builder, flip): builder.PrependBoolSlot(4, flip, 0)
def PriorBoxAddClip(builder, clip): builder.PrependBoolSlot(5, clip, 0)
def PriorBoxAddImageWidth(builder, imageWidth): builder.PrependInt32Slot(6, imageWidth, 0)
def PriorBoxAddImageHeight(builder, imageHeight): builder.PrependInt32Slot(7, imageHeight, 0)
def PriorBoxAddStepWidth(builder, stepWidth): builder.PrependInt32Slot(8, stepWidth, 0)
def PriorBoxAddStepHeight(builder, stepHeight): builder.PrependInt32Slot(9, stepHeight, 0)
def PriorBoxAddOffset(builder, offset): builder.PrependFloat32Slot(10, offset, 0.0)
def PriorBoxEnd(builder): return builder.EndObject() | PypiClean |
/LiPydomics-1.6.8-py3-none-any.whl/lipydomics/identification/train_lipid_rt_pred.py | from sqlite3 import connect
import os
import pickle
import numpy as np
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import ShuffleSplit
from sklearn.metrics import mean_squared_error
from ..util import print_and_log
from .encoder_params import rt_lipid_classes, rt_fa_mods
def prep_encoders():
"""
prep_encoders
description:
fits and returns encoders for lipid_class, fa_mod, and adduct
returns:
c_encoder, f_encoder (sklearn.preprocessing.OneHotEncoder) -- encoders for lipid_class and fa_mod
"""
lipid_classes = [[_] for _ in rt_lipid_classes]
c_encoder = OneHotEncoder(sparse=False, handle_unknown='ignore').fit(lipid_classes)
fa_mods = [[_] for _ in rt_fa_mods]
f_encoder = OneHotEncoder(sparse=False, handle_unknown='ignore').fit(fa_mods)
return c_encoder, f_encoder
def featurize(lipid_class, lipid_nc, lipid_nu, fa_mod, c_encoder, f_encoder):
"""
featurize
description:
generates a numerical representation for a given lipid
parameters:
lipid_class (str) -- lipid class
lipid_nc (int) -- sum composition: number of carbons
lipid_nu (int) -- sum composition: number of unsaturations
fa_mod (str) -- fatty acid modifiers
mz (float) -- m/z
c_encoder, f_encoder (sklearn.preprocessing.OneHotEncoder) -- encoders for lipid_class and fa_mod
returns:
(np.array(float)) -- feature vector
"""
lc_enc = c_encoder.transform([[lipid_class]])[0]
fm_enc = f_encoder.transform([[fa_mod]])[0]
lnc = np.array([float(lipid_nc)])
lnu = np.array([float(lipid_nu)])
return np.concatenate([lc_enc, fm_enc, lnc, lnu])
def train_new_model(cursor, bl):
"""
train_new_model
description:
trains a predictive model
parameters:
cursor (sqlite3.cursor) -- cursor for querying lipids.db
bl (file) -- build log
returns:
mdl, scaler -- trained predictive model and input scaler instances
"""
# prepare encoders
c_encoder, f_encoder = prep_encoders()
# get the raw data and featurize (encode lipid_class, fa_mod, and adduct)
qry = 'SELECT lipid_class, lipid_nc, lipid_nu, fa_mod, rt FROM measured WHERE rt IS NOT NULL'
X, y = [], []
for lc, lnc, lnu, fam, c in cursor.execute(qry).fetchall():
# only use the classes and fa_mods that are explicitly encoded
lc_ok = lc in rt_lipid_classes
fam_ok = fam is None or fam in rt_fa_mods
if lc_ok and fam_ok:
X.append(featurize(lc, lnc, lnu, fam, c_encoder, f_encoder))
y.append(float(c))
X, y = np.array(X), np.array(y)
print_and_log('X: {}'.format(X.shape), bl)
print_and_log('y: {}'.format(y.shape), bl)
# split into test/train sets, scale data (do not center)
print_and_log('splitting data into training and test sets', bl)
SSplit = ShuffleSplit(n_splits=1, test_size=0.2, random_state=1236)
for train_index, test_index in SSplit.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
print_and_log('X_train: {}'.format(X_train.shape), bl)
print_and_log('y_train: {}'.format(y_train.shape), bl)
print_and_log('X_test: {}'.format(X_test.shape), bl)
print_and_log('y_test: {}'.format(y_test.shape), bl)
print_and_log('scaling input data', bl)
scaler = StandardScaler(with_mean=False)
X_train_s = scaler.fit_transform(X_train)
print_and_log('X_train_s: {}'.format(X_train_s.shape), bl)
# train model
print_and_log('training model', bl)
model = LinearRegression()
model.fit(X_train_s, y_train)
# performance on training set
print_and_log('TRAINING SET PERFORMANCE', bl)
y_train_pred = model.predict(X_train_s)
y_train_abs_err = np.abs(y_train_pred - y_train)
print_and_log('mean absolute error: {:.2f} min'.format(np.mean(y_train_abs_err)), bl)
print_and_log('median absolute error: {:.2f} min'.format(np.median(y_train_abs_err)), bl)
print_and_log('RMSE: {:.2f} min'.format(np.sqrt(mean_squared_error(y_train, y_train_pred))), bl)
# performance on test set
print_and_log('TEST SET PERFORMANCE', bl)
y_test_pred = model.predict(scaler.transform(X_test))
y_test_abs_err = np.abs(y_test_pred - y_test)
print_and_log('mean absolute error: {:.2f} min'.format(np.mean(y_test_abs_err)), bl)
print_and_log('median absolute error: {:.2f} min'.format(np.median(y_test_abs_err)), bl)
print_and_log('RMSE: {:.2f} min'.format(np.sqrt(mean_squared_error(y_test, y_test_pred))), bl)
# save the model and the scaler
this_dir = os.path.dirname(__file__)
model_path = os.path.join(this_dir, 'lipid_rt_pred.pickle')
scaler_path = os.path.join(this_dir, 'lipid_rt_scale.pickle')
with open(model_path, 'wb') as pf1, open(scaler_path, 'wb') as pf2:
pickle.dump(model, pf1)
pickle.dump(scaler, pf2)
# return model and scaler
return model, scaler
def dump_split_data_to_files(savedir):
"""
dump_split_data_to_files
description:
assembles training/test datasets just as would be done for actual model training then dumps those into
separate .csv files: 'train.csv' and 'test.csv'
parameters:
savedir (str) -- directory to save the dumped files into
"""
# connect to database
db_path = os.path.join(os.path.dirname(__file__), 'lipids.db')
con = connect(db_path)
cur = con.cursor()
# prepare encoders
c_encoder, f_encoder = prep_encoders()
# get the raw data and featurize (encode lipid_class, fa_mod, and adduct)
qry = 'SELECT lipid_class, lipid_nc, lipid_nu, fa_mod, rt FROM measured WHERE rt IS NOT NULL'
X, y, l = [], [], []
for lc, lnc, lnu, fam, c in cur.execute(qry).fetchall():
# only use the classes and fa_mods that are explicitly encoded
lc_ok = lc in rt_lipid_classes
fam_ok = fam is None or fam in rt_fa_mods
if lc_ok and fam_ok:
X.append(featurize(lc, lnc, lnu, fam, c_encoder, f_encoder))
y.append(float(c))
l.append('{}({}{}:{})'.format(lc, fam if fam is not None else '', lnc, lnu))
X, y, l = np.array(X), np.array(y), np.array(l)
# split into test/train sets, scale data (do not center)
SSplit = ShuffleSplit(n_splits=1, test_size=0.2, random_state=1236)
for train_index, test_index in SSplit.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
l_train, l_test = l[train_index], l[test_index]
# dump each array to file
np.savetxt(os.path.join(savedir, 'X_train.csv'), X_train, delimiter=',')
np.savetxt(os.path.join(savedir, 'y_train.csv'), y_train, delimiter=',')
np.savetxt(os.path.join(savedir, 'l_train.csv'), l_train, delimiter=',', fmt='%s')
np.savetxt(os.path.join(savedir, 'X_test.csv'), X_test, delimiter=',')
np.savetxt(os.path.join(savedir, 'y_test.csv'), y_test, delimiter=',')
np.savetxt(os.path.join(savedir, 'l_test.csv'), l_test, delimiter=',', fmt='%s')
# close db connection
con.close()
def main(tstamp):
""" main build function """
# connect to database
db_path = os.path.join(os.path.dirname(__file__), 'lipids.db')
con = connect(db_path)
cur = con.cursor()
# prepare encoders
c_encoder, f_encoder = prep_encoders()
build_log = os.path.join(os.path.dirname(__file__), 'builds/build_log_{}.txt'.format(tstamp))
with open(build_log, 'a') as bl:
# train a new model
print_and_log('training new predictive RT model (and input scaler) ...', bl)
model, scaler = train_new_model(cur, bl)
print_and_log('... ok', bl)
# add predicted RT to the database
print_and_log('\nadding predicted RT to database ...', bl, end=' ')
qry = 'SELECT t_id, lipid_class, lipid_nc, lipid_nu, fa_mod FROM predicted_mz'
tid_to_rt = {}
for tid, lc, lnc, lnu, fam in cur.execute(qry).fetchall():
if int(sum(c_encoder.transform([[lc]])[0])) != 0: # make sure lipid class is encodable
x = np.array([featurize(lc, lnc, lnu, fam, c_encoder, f_encoder)]).reshape(1, -1)
tid_to_rt[int(tid)] = model.predict(scaler.transform(x))[0]
qry = 'INSERT INTO predicted_rt VALUES (?, ?)'
for tid in tid_to_rt:
cur.execute(qry, (tid, tid_to_rt[tid]))
print_and_log('ok\n', bl)
# commit changes to the database and close connection
con.commit()
con.close() | PypiClean |
/Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/util/lang.py | import inspect
import threading
def to_str(obj, *fields):
"""该函数用于方便生成__repr__和__str__方法的返回内容
:param obj: 目标对象
:param fields: 目标属性,如果不指定,则会返回所有
"""
# if fields is empty, auto get fields
if not fields:
try:
fields = obj.__dict__.keys()
except AttributeError:
# maybe slots class
fields = obj.__slots__
str_buf = [
"{class_name}@{id_} <",
]
for idx, field in enumerate(fields):
if isinstance(field, str):
# 单纯str
str_buf.append("{field}={value}".format(
field=field, value=getattr(obj, field)))
elif isinstance(field, tuple):
# 包含callback处理
field, callback = field
str_buf.append("{field}={value}".format(
field=field, value=callback(getattr(obj, field))))
else:
# 其它类型不支持
raise AttributeError("Unsupport field type: '{clazz}'".format(
clazz=field.__class__.__name__))
if idx < len(fields) - 1:
str_buf.append(", ")
str_buf.append(">")
return "".join(str_buf).format(
class_name=obj.__class__.__name__,
id_=id(obj)
)
def get_from_nested_dict(d, *keys, default=None):
"""从嵌套字典中获取值,如果在某一层级获取不到,则返回None
:param d: 目标字典
:param keys: 层级key列表
"""
for k in keys:
try:
d = d[k]
except KeyError:
return default
return d
def get_full_class_name(cls):
"""获取一个类的完整名称 包名.类名
"""
return cls.__module__ + "." + cls.__name__
_source_cache = {}
_source_cache_lock = threading.Lock()
def get_source_code(obj):
"""获取对象源码,inspect的这个效率啊,真是比鸭嘴笔还要efficiency!
"""
if obj not in _source_cache:
with _source_cache_lock:
if obj not in _source_cache:
_source_cache[obj] = inspect.getsource(obj)
return _source_cache[obj]
def trim_paragraph(text):
if not text:
return ""
buf = []
for line in text.splitlines():
buf.append(line.strip())
return "\n".join(buf) | PypiClean |
/DiscoPlot-1.0.2.tar.gz/DiscoPlot-1.0.2/docs/_build/html/_static/underscore.js | (function(){function q(a,c,d){if(a===c)return a!==0||1/a==1/c;if(a==null||c==null)return a===c;if(a._chain)a=a._wrapped;if(c._chain)c=c._wrapped;if(a.isEqual&&b.isFunction(a.isEqual))return a.isEqual(c);if(c.isEqual&&b.isFunction(c.isEqual))return c.isEqual(a);var e=l.call(a);if(e!=l.call(c))return false;switch(e){case "[object String]":return a==String(c);case "[object Number]":return a!=+a?c!=+c:a==0?1/a==1/c:a==+c;case "[object Date]":case "[object Boolean]":return+a==+c;case "[object RegExp]":return a.source==
c.source&&a.global==c.global&&a.multiline==c.multiline&&a.ignoreCase==c.ignoreCase}if(typeof a!="object"||typeof c!="object")return false;for(var f=d.length;f--;)if(d[f]==a)return true;d.push(a);var f=0,g=true;if(e=="[object Array]"){if(f=a.length,g=f==c.length)for(;f--;)if(!(g=f in a==f in c&&q(a[f],c[f],d)))break}else{if("constructor"in a!="constructor"in c||a.constructor!=c.constructor)return false;for(var h in a)if(b.has(a,h)&&(f++,!(g=b.has(c,h)&&q(a[h],c[h],d))))break;if(g){for(h in c)if(b.has(c,
h)&&!f--)break;g=!f}}d.pop();return g}var r=this,G=r._,n={},k=Array.prototype,o=Object.prototype,i=k.slice,H=k.unshift,l=o.toString,I=o.hasOwnProperty,w=k.forEach,x=k.map,y=k.reduce,z=k.reduceRight,A=k.filter,B=k.every,C=k.some,p=k.indexOf,D=k.lastIndexOf,o=Array.isArray,J=Object.keys,s=Function.prototype.bind,b=function(a){return new m(a)};if(typeof exports!=="undefined"){if(typeof module!=="undefined"&&module.exports)exports=module.exports=b;exports._=b}else r._=b;b.VERSION="1.3.1";var j=b.each=
b.forEach=function(a,c,d){if(a!=null)if(w&&a.forEach===w)a.forEach(c,d);else if(a.length===+a.length)for(var e=0,f=a.length;e<f;e++){if(e in a&&c.call(d,a[e],e,a)===n)break}else for(e in a)if(b.has(a,e)&&c.call(d,a[e],e,a)===n)break};b.map=b.collect=function(a,c,b){var e=[];if(a==null)return e;if(x&&a.map===x)return a.map(c,b);j(a,function(a,g,h){e[e.length]=c.call(b,a,g,h)});if(a.length===+a.length)e.length=a.length;return e};b.reduce=b.foldl=b.inject=function(a,c,d,e){var f=arguments.length>2;a==
null&&(a=[]);if(y&&a.reduce===y)return e&&(c=b.bind(c,e)),f?a.reduce(c,d):a.reduce(c);j(a,function(a,b,i){f?d=c.call(e,d,a,b,i):(d=a,f=true)});if(!f)throw new TypeError("Reduce of empty array with no initial value");return d};b.reduceRight=b.foldr=function(a,c,d,e){var f=arguments.length>2;a==null&&(a=[]);if(z&&a.reduceRight===z)return e&&(c=b.bind(c,e)),f?a.reduceRight(c,d):a.reduceRight(c);var g=b.toArray(a).reverse();e&&!f&&(c=b.bind(c,e));return f?b.reduce(g,c,d,e):b.reduce(g,c)};b.find=b.detect=
function(a,c,b){var e;E(a,function(a,g,h){if(c.call(b,a,g,h))return e=a,true});return e};b.filter=b.select=function(a,c,b){var e=[];if(a==null)return e;if(A&&a.filter===A)return a.filter(c,b);j(a,function(a,g,h){c.call(b,a,g,h)&&(e[e.length]=a)});return e};b.reject=function(a,c,b){var e=[];if(a==null)return e;j(a,function(a,g,h){c.call(b,a,g,h)||(e[e.length]=a)});return e};b.every=b.all=function(a,c,b){var e=true;if(a==null)return e;if(B&&a.every===B)return a.every(c,b);j(a,function(a,g,h){if(!(e=
e&&c.call(b,a,g,h)))return n});return e};var E=b.some=b.any=function(a,c,d){c||(c=b.identity);var e=false;if(a==null)return e;if(C&&a.some===C)return a.some(c,d);j(a,function(a,b,h){if(e||(e=c.call(d,a,b,h)))return n});return!!e};b.include=b.contains=function(a,c){var b=false;if(a==null)return b;return p&&a.indexOf===p?a.indexOf(c)!=-1:b=E(a,function(a){return a===c})};b.invoke=function(a,c){var d=i.call(arguments,2);return b.map(a,function(a){return(b.isFunction(c)?c||a:a[c]).apply(a,d)})};b.pluck=
function(a,c){return b.map(a,function(a){return a[c]})};b.max=function(a,c,d){if(!c&&b.isArray(a))return Math.max.apply(Math,a);if(!c&&b.isEmpty(a))return-Infinity;var e={computed:-Infinity};j(a,function(a,b,h){b=c?c.call(d,a,b,h):a;b>=e.computed&&(e={value:a,computed:b})});return e.value};b.min=function(a,c,d){if(!c&&b.isArray(a))return Math.min.apply(Math,a);if(!c&&b.isEmpty(a))return Infinity;var e={computed:Infinity};j(a,function(a,b,h){b=c?c.call(d,a,b,h):a;b<e.computed&&(e={value:a,computed:b})});
return e.value};b.shuffle=function(a){var b=[],d;j(a,function(a,f){f==0?b[0]=a:(d=Math.floor(Math.random()*(f+1)),b[f]=b[d],b[d]=a)});return b};b.sortBy=function(a,c,d){return b.pluck(b.map(a,function(a,b,g){return{value:a,criteria:c.call(d,a,b,g)}}).sort(function(a,b){var c=a.criteria,d=b.criteria;return c<d?-1:c>d?1:0}),"value")};b.groupBy=function(a,c){var d={},e=b.isFunction(c)?c:function(a){return a[c]};j(a,function(a,b){var c=e(a,b);(d[c]||(d[c]=[])).push(a)});return d};b.sortedIndex=function(a,
c,d){d||(d=b.identity);for(var e=0,f=a.length;e<f;){var g=e+f>>1;d(a[g])<d(c)?e=g+1:f=g}return e};b.toArray=function(a){return!a?[]:a.toArray?a.toArray():b.isArray(a)?i.call(a):b.isArguments(a)?i.call(a):b.values(a)};b.size=function(a){return b.toArray(a).length};b.first=b.head=function(a,b,d){return b!=null&&!d?i.call(a,0,b):a[0]};b.initial=function(a,b,d){return i.call(a,0,a.length-(b==null||d?1:b))};b.last=function(a,b,d){return b!=null&&!d?i.call(a,Math.max(a.length-b,0)):a[a.length-1]};b.rest=
b.tail=function(a,b,d){return i.call(a,b==null||d?1:b)};b.compact=function(a){return b.filter(a,function(a){return!!a})};b.flatten=function(a,c){return b.reduce(a,function(a,e){if(b.isArray(e))return a.concat(c?e:b.flatten(e));a[a.length]=e;return a},[])};b.without=function(a){return b.difference(a,i.call(arguments,1))};b.uniq=b.unique=function(a,c,d){var d=d?b.map(a,d):a,e=[];b.reduce(d,function(d,g,h){if(0==h||(c===true?b.last(d)!=g:!b.include(d,g)))d[d.length]=g,e[e.length]=a[h];return d},[]);
return e};b.union=function(){return b.uniq(b.flatten(arguments,true))};b.intersection=b.intersect=function(a){var c=i.call(arguments,1);return b.filter(b.uniq(a),function(a){return b.every(c,function(c){return b.indexOf(c,a)>=0})})};b.difference=function(a){var c=b.flatten(i.call(arguments,1));return b.filter(a,function(a){return!b.include(c,a)})};b.zip=function(){for(var a=i.call(arguments),c=b.max(b.pluck(a,"length")),d=Array(c),e=0;e<c;e++)d[e]=b.pluck(a,""+e);return d};b.indexOf=function(a,c,
d){if(a==null)return-1;var e;if(d)return d=b.sortedIndex(a,c),a[d]===c?d:-1;if(p&&a.indexOf===p)return a.indexOf(c);for(d=0,e=a.length;d<e;d++)if(d in a&&a[d]===c)return d;return-1};b.lastIndexOf=function(a,b){if(a==null)return-1;if(D&&a.lastIndexOf===D)return a.lastIndexOf(b);for(var d=a.length;d--;)if(d in a&&a[d]===b)return d;return-1};b.range=function(a,b,d){arguments.length<=1&&(b=a||0,a=0);for(var d=arguments[2]||1,e=Math.max(Math.ceil((b-a)/d),0),f=0,g=Array(e);f<e;)g[f++]=a,a+=d;return g};
var F=function(){};b.bind=function(a,c){var d,e;if(a.bind===s&&s)return s.apply(a,i.call(arguments,1));if(!b.isFunction(a))throw new TypeError;e=i.call(arguments,2);return d=function(){if(!(this instanceof d))return a.apply(c,e.concat(i.call(arguments)));F.prototype=a.prototype;var b=new F,g=a.apply(b,e.concat(i.call(arguments)));return Object(g)===g?g:b}};b.bindAll=function(a){var c=i.call(arguments,1);c.length==0&&(c=b.functions(a));j(c,function(c){a[c]=b.bind(a[c],a)});return a};b.memoize=function(a,
c){var d={};c||(c=b.identity);return function(){var e=c.apply(this,arguments);return b.has(d,e)?d[e]:d[e]=a.apply(this,arguments)}};b.delay=function(a,b){var d=i.call(arguments,2);return setTimeout(function(){return a.apply(a,d)},b)};b.defer=function(a){return b.delay.apply(b,[a,1].concat(i.call(arguments,1)))};b.throttle=function(a,c){var d,e,f,g,h,i=b.debounce(function(){h=g=false},c);return function(){d=this;e=arguments;var b;f||(f=setTimeout(function(){f=null;h&&a.apply(d,e);i()},c));g?h=true:
a.apply(d,e);i();g=true}};b.debounce=function(a,b){var d;return function(){var e=this,f=arguments;clearTimeout(d);d=setTimeout(function(){d=null;a.apply(e,f)},b)}};b.once=function(a){var b=false,d;return function(){if(b)return d;b=true;return d=a.apply(this,arguments)}};b.wrap=function(a,b){return function(){var d=[a].concat(i.call(arguments,0));return b.apply(this,d)}};b.compose=function(){var a=arguments;return function(){for(var b=arguments,d=a.length-1;d>=0;d--)b=[a[d].apply(this,b)];return b[0]}};
b.after=function(a,b){return a<=0?b():function(){if(--a<1)return b.apply(this,arguments)}};b.keys=J||function(a){if(a!==Object(a))throw new TypeError("Invalid object");var c=[],d;for(d in a)b.has(a,d)&&(c[c.length]=d);return c};b.values=function(a){return b.map(a,b.identity)};b.functions=b.methods=function(a){var c=[],d;for(d in a)b.isFunction(a[d])&&c.push(d);return c.sort()};b.extend=function(a){j(i.call(arguments,1),function(b){for(var d in b)a[d]=b[d]});return a};b.defaults=function(a){j(i.call(arguments,
1),function(b){for(var d in b)a[d]==null&&(a[d]=b[d])});return a};b.clone=function(a){return!b.isObject(a)?a:b.isArray(a)?a.slice():b.extend({},a)};b.tap=function(a,b){b(a);return a};b.isEqual=function(a,b){return q(a,b,[])};b.isEmpty=function(a){if(b.isArray(a)||b.isString(a))return a.length===0;for(var c in a)if(b.has(a,c))return false;return true};b.isElement=function(a){return!!(a&&a.nodeType==1)};b.isArray=o||function(a){return l.call(a)=="[object Array]"};b.isObject=function(a){return a===Object(a)};
b.isArguments=function(a){return l.call(a)=="[object Arguments]"};if(!b.isArguments(arguments))b.isArguments=function(a){return!(!a||!b.has(a,"callee"))};b.isFunction=function(a){return l.call(a)=="[object Function]"};b.isString=function(a){return l.call(a)=="[object String]"};b.isNumber=function(a){return l.call(a)=="[object Number]"};b.isNaN=function(a){return a!==a};b.isBoolean=function(a){return a===true||a===false||l.call(a)=="[object Boolean]"};b.isDate=function(a){return l.call(a)=="[object Date]"};
b.isRegExp=function(a){return l.call(a)=="[object RegExp]"};b.isNull=function(a){return a===null};b.isUndefined=function(a){return a===void 0};b.has=function(a,b){return I.call(a,b)};b.noConflict=function(){r._=G;return this};b.identity=function(a){return a};b.times=function(a,b,d){for(var e=0;e<a;e++)b.call(d,e)};b.escape=function(a){return(""+a).replace(/&/g,"&").replace(/</g,"<").replace(/>/g,">").replace(/"/g,""").replace(/'/g,"'").replace(/\//g,"/")};b.mixin=function(a){j(b.functions(a),
function(c){K(c,b[c]=a[c])})};var L=0;b.uniqueId=function(a){var b=L++;return a?a+b:b};b.templateSettings={evaluate:/<%([\s\S]+?)%>/g,interpolate:/<%=([\s\S]+?)%>/g,escape:/<%-([\s\S]+?)%>/g};var t=/.^/,u=function(a){return a.replace(/\\\\/g,"\\").replace(/\\'/g,"'")};b.template=function(a,c){var d=b.templateSettings,d="var __p=[],print=function(){__p.push.apply(__p,arguments);};with(obj||{}){__p.push('"+a.replace(/\\/g,"\\\\").replace(/'/g,"\\'").replace(d.escape||t,function(a,b){return"',_.escape("+
u(b)+"),'"}).replace(d.interpolate||t,function(a,b){return"',"+u(b)+",'"}).replace(d.evaluate||t,function(a,b){return"');"+u(b).replace(/[\r\n\t]/g," ")+";__p.push('"}).replace(/\r/g,"\\r").replace(/\n/g,"\\n").replace(/\t/g,"\\t")+"');}return __p.join('');",e=new Function("obj","_",d);return c?e(c,b):function(a){return e.call(this,a,b)}};b.chain=function(a){return b(a).chain()};var m=function(a){this._wrapped=a};b.prototype=m.prototype;var v=function(a,c){return c?b(a).chain():a},K=function(a,c){m.prototype[a]=
function(){var a=i.call(arguments);H.call(a,this._wrapped);return v(c.apply(b,a),this._chain)}};b.mixin(b);j("pop,push,reverse,shift,sort,splice,unshift".split(","),function(a){var b=k[a];m.prototype[a]=function(){var d=this._wrapped;b.apply(d,arguments);var e=d.length;(a=="shift"||a=="splice")&&e===0&&delete d[0];return v(d,this._chain)}});j(["concat","join","slice"],function(a){var b=k[a];m.prototype[a]=function(){return v(b.apply(this._wrapped,arguments),this._chain)}});m.prototype.chain=function(){this._chain=
true;return this};m.prototype.value=function(){return this._wrapped}}).call(this); | PypiClean |
/GeoNode-3.2.0-py3-none-any.whl/geonode/static/geonode/js/ol-2.13/lib/OpenLayers/Lang/cs-CZ.js | * @requires OpenLayers/Lang.js
*/
/**
* Namespace: OpenLayers.Lang["cs-CZ"]
* Dictionary for Česky. Keys for entries are used in calls to
* <OpenLayers.Lang.translate>. Entry bodies are normal strings or
* strings formatted for use with <OpenLayers.String.format> calls.
*/
OpenLayers.Lang["cs-CZ"] = OpenLayers.Util.applyDefaults({
'unhandledRequest': "Nezpracovaná návratová hodnota ${statusText}",
'Permalink': "Trvalý odkaz",
'Overlays': "Překryvné vrstvy",
'Base Layer': "Podkladové vrstvy",
'noFID': "Nelze aktualizovat prvek, pro který neexistuje FID.",
'browserNotSupported': "Váš prohlížeč nepodporuje vykreslování vektorů. Momentálně podporované nástroje jsou::\n${renderers}",
'minZoomLevelError': "Vlastnost minZoomLevel by se měla používat pouze s potomky FixedZoomLevels vrstvami. To znamená, že vrstva wfs kontroluje, zda-li minZoomLevel není zbytek z minulosti.Nelze to ovšem vyjmout bez možnosti, že bychom rozbili aplikace postavené na OL, které by na tom mohly záviset. Proto tuto vlastnost nedoporučujeme používat -- kontrola minZoomLevel bude odstraněna ve verzi 3.0. Použijte prosím raději nastavení min/max podle příkaldu popsaného na: http://trac.openlayers.org/wiki/SettingZoomLevels",
'commitSuccess': "WFS Transaction: ÚSPĚCH ${response}",
'commitFailed': "WFS Transaction: CHYBA ${response}",
'googleWarning': "Nepodařilo se správně načíst vrstvu Google.\x3cbr\x3e\x3cbr\x3eAbyste se zbavili této zprávy, zvolte jinou základní vrstvu v přepínači vrstev.\x3cbr\x3e\x3cbr\x3eTo se většinou stává, pokud nebyl načten skript, nebo neobsahuje správný klíč pro API pro tuto stránku.\x3cbr\x3e\x3cbr\x3eVývojáři: Pro pomoc, aby tohle fungovalo , \x3ca href=\'http://trac.openlayers.org/wiki/Google\' target=\'_blank\'\x3eklikněte sem\x3c/a\x3e",
'getLayerWarning': "The ${layerType} Layer was unable to load correctly.\x3cbr\x3e\x3cbr\x3eTo get rid of this message, select a new BaseLayer in the layer switcher in the upper-right corner.\x3cbr\x3e\x3cbr\x3eMost likely, this is because the ${layerLib} library script was either not correctly included.\x3cbr\x3e\x3cbr\x3eDevelopers: For help getting this working correctly, \x3ca href=\'http://trac.openlayers.org/wiki/${layerLib}\' target=\'_blank\'\x3eclick here\x3c/a\x3e",
'Scale = 1 : ${scaleDenom}': "Měřítko = 1 : ${scaleDenom}",
'reprojectDeprecated': "Použil jste volbu \'reproject\' ve vrstvě ${layerName}. Tato volba není doporučená: byla zde proto, aby bylo možno zobrazovat data z okomerčních serverů, ale tato funkce je nyní zajištěna pomocí podpory Spherical Mercator. Více informací naleznete na http://trac.openlayers.org/wiki/SphericalMercator.",
'methodDeprecated': "Tato metoda je zavržená a bude ve verzi 3.0 odstraněna. Prosím, použijte raději ${newMethod}."
}); | PypiClean |
/JWaves-0.0.1.tar.gz/JWaves-0.0.1/src/RPA.py | import numpy as np
import warnings
import time
def cosd(angle):
return np.cos(np.deg2rad(angle))
def sind(angle):
return np.sin(np.deg2rad(angle))
class Units:
kb=1.38065e-23# # in J/K
muB_J=9.274e-24# # in J
meV=1.602176565e-19/1000# # in J
mu0=4*np.pi*1e-7 #
def calculateKelvinToMeV(T):
return np.reciprocal(Units.kb*T)
def getTimeUnit(timeRemaining):
if timeRemaining>60*60*100:
timeRemaining*=1./(60.0*60*24)
unit = 'days.. go home!'
elif timeRemaining>60*100: # convert to hours due to more than 100 minutes
timeRemaining*=1./(60.0*60)
unit = 'hr'
elif timeRemaining>100:
timeRemaining*=1./60.0
unit = 'min'
else:
unit = 'sec'
return timeRemaining, unit
# Print iterations progress
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
if hasattr(printProgressBar,'_iterations'):
if iteration == printProgressBar._iterations[-1]:
return
if iteration == 0 or (iteration == 1 and not hasattr(printProgressBar,'_time')):
printProgressBar._time = [time.time()]
printProgressBar._iterations = [iteration]
timeEstimate = ''
else:
printProgressBar._time.append(time.time())
printProgressBar._iterations.append(iteration)
timeDelta = np.diff(-np.asarray(printProgressBar._time))
iterationDelta = np.diff(-np.asarray(printProgressBar._iterations))
timeRemaining = np.mean(timeDelta/iterationDelta)*(total-iteration)
timeRemaining,unit = getTimeUnit(timeRemaining)
timeEstimate = '({:.2f} {:})'.format(timeRemaining,unit)
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print(f'\r{prefix} |{bar}| {percent}% {timeEstimate} {suffix}', end = printEnd)
# Print New Line on Complete
if iteration == total:
totalTime = printProgressBar._time[-1]-printProgressBar._time[0]
totalTime,unit = getTimeUnit(totalTime)
tt = '({:.2f} {:})'.format(totalTime,unit)
print(f'\r{prefix} |{bar}| DONE {tt} {suffix}', end = printEnd)
print()
class Coupling():
def __init__(self,atom1,atom2,atom1Pos,atom2Pos,dl,distanceVector,distance,exchange=None,doubleCounted = False, label = None):
"""Coupling class
Args:
- atom1 (int): Atom index of first atom
- atom2 (int): Atom index of second atom
- atom1Pos (list): Fractional position of atom 1 within unit cell
- atom2Pos (list): Fractional position of atom 2 within unit cell
- dl (list): Lattice displacement to second atom
- distanceVector (list): Distance vector in AA
- distance (float): Real space distance between atoms
Kwargs:
- exchange (list): Exchange matrix (default None)
- doubleCounting (bool): Double counting flag (default False)
"""
self.atom1 = atom1
self.atom2 = atom2
self.atom1Pos = atom1Pos
self.atom2Pos = atom2Pos
self.dl = dl
self.distanceVector = distanceVector
self.distance = distance
self.exchange = exchange
self.label = label
self.isDouble = doubleCounted # double counting flag
@property
def exchange(self):
return self._exchange
@exchange.getter
def exchange(self):
return self._exchange
@exchange.setter
def exchange(self,newExchange):
self._exchange = newExchange
if newExchange is None:
self.type = ''
elif np.all(np.isclose(np.diag(np.diag(newExchange)),newExchange)):# exchange is diagonal
self.type = 'Heisenberg'
elif np.all(np.isclose(newExchange,newExchange.T)):
self.type = 'Symmetric'
elif np.all(np.isclose(newExchange,-newExchange.T)):
self.type = 'Antisymmetric'
else:
self.type = ''
def __eq__(self,other):
"""Check equivalence of couplings"""
# All is the same
idxTestDirect = self.atom1 == other.atom1 and self.atom2 == other.atom2
idxTestDirect *= np.all(np.isclose(self.dl,other.dl))
# opposite direction
idxTestOpposite = self.atom1 == other.atom2 and self.atom1 == other.atom2
idxTestOpposite *= np.all(np.isclose(self.dl,-other.dl))
if not (idxTestDirect or idxTestOpposite):
#print('idxtest')
#print(idxTestDirect)
#print(idxTestOpposite)
return False
if (self.exchange is None) ^ (other.exchange is None):
# One of the exchanges is zero
#print('single None')
return False
if (self.exchange is None) and (other.exchange is None):
#print('Both none')
return True
if np.all(np.isclose(self.exchange,other.exchange)):
#print('exchange is the same')
return True
return False
def __str__(self):
return "Coupling between {:} and {:} (distance {:} - dl = {:})".format(self.atom1,self.atom2,self.distance,self.dl)+(not self.exchange is None)*(" with exchange\n"+str(self.exchange))
class Lattice: # From Add_ChainS1N1 (without Active, site, label and equivalence)
def __init__(self,S=None,g=None, positions = None, active=None, lattice = None,
label=None,site=None,equivalence = None):
self.g = g
self.S = S
self.active = active
self.r = np.asarray(positions).reshape(-1,3)
self.Natom = len(self.S)
self.label = label
self.site = site
if equivalence is None:
self.equivalence = np.zeros(len(positions),dtype=int)
else:
self.equivalence = equivalence
self.couplings = []
lattice = np.asarray(lattice)
if len(lattice.shape)>1:
self.A = lattice
else:
self.generateLattice(*lattice)
self.calculateB()
def generateLattice(self,a,b,c,alpha,beta,gamma):
self.a = a
self.b = b
self.c = c
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.realVectorA = np.array([self.a,0,0])
self.realVectorB = self.b*np.array([cosd(self.gamma),sind(self.gamma),0.0])#np.dot(np.array([self.b,0,0]),rotationMatrix(0,0,self.gamma))
self.realVectorC = self.c*np.array([cosd(self.beta),(cosd(self.alpha)-cosd(self.beta)*cosd(self.gamma))/sind(self.gamma),
np.sqrt(1-cosd(self.beta)**2-((cosd(self.alpha)-cosd(self.beta)*cosd(self.gamma))/sind(self.gamma))**2)])
self.A = np.asarray([self.realVectorA,self.realVectorB,self.realVectorC]).T
def calculateB(self):
vol = np.dot(self.A[2],np.cross(self.A[0],self.A[1]))
self.B = np.pi*2*np.asarray([np.cross(a1,a2)/vol for a1,a2 in zip([self.A[1],self.A[2],self.A[0]],[self.A[2],self.A[0],self.A[1]])])
def generateCouplings(self,maxDistance):
# Find number of unit cells along a, b, and c for calculation
norm = np.linalg.norm(self.A,axis=1)
NVector = np.ceil(maxDistance/norm).astype(int)+1
couplings = []
for c in range(-NVector[2],NVector[2]+1):
for b in range(-NVector[1],NVector[1]+1):
for a in range(-NVector[0],NVector[0]+1):#range(NVector[2]):#r
for I,atom1 in enumerate(self.r):
for J,atom2 in enumerate(self.r):
atom2Pos = np.dot(self.A,atom2+[a,b,c])#+np.dot([a,b,c],self.A)
dl = np.array([a,b,c])
atom1Pos = np.dot(self.A,atom1)
d = atom2Pos-atom1Pos
normD = np.linalg.norm(d)
if np.isclose(normD,0):
continue
if normD<maxDistance:
# check if opposite coupling is already present
idxes = [idx for idx in couplings if idx.atom1==J and idx.atom2 == I and (np.all(idx.dl==dl) or np.all(idx.dl==-dl))]
if not np.any([np.isclose(normD,idx.distance) for idx in idxes]):
couplings.append(Coupling(I,J,atom1Pos,atom2Pos,dl,d,normD))
# Sort couplings in increasing distance
couplings.sort(key=lambda coupling: coupling.distance)
self.couplings = couplings
def checkDoublCounting(self):
testMatrix = np.full((len(self.couplings),len(self.couplings)),False)
for i,c1 in enumerate(self.couplings):
for j,c2 in enumerate(self.couplings):#[:i]):
testMatrix[i,j] = c1==c2
return testMatrix
def addExchangeInteractions(self,Js,distances,labels=None, atol=0.001):
"""add exchange interactions
Args:
- Js (list): List of J-matrices
- distances (list): Distance of J coupling
Kwargs:
- labels (list): List of labels corresponding to the provided couplings (default None)
- atol (float): absolute tolerence for comparing distances (default 0.001)
"""
if not hasattr(self,'couplings'):
raise AttributeError('Lattice has no couplings generated. Please invoke .generateCouplings(maxDistance)')
if not len(Js) == len(distances):
raise AttributeError('Provided list of Js does not match length of distances....')
if not labels is None:
labels = np.asarray(labels)
Js = np.asarray(Js)
for coupling in self.couplings:
if self.site[coupling.atom1] == self.site[coupling.atom2]: # Lattice.Site(atom1)==Lattice.Site(atom2)
if self.site[coupling.atom1] == 1: # Not sure what this checks....
# Find J corresponding to current coupling, from distance
comparison = np.isclose(coupling.distance,distances,atol=atol)
#print(coupling)
if np.sum(comparison) == 0:
J = None
#if not np.sum(comparison) == 1:
# raise AttributeError('Number of couplings found is not equal 1. Found',np.sum(comparison))
else:
J = Js[comparison][0] ## TODO: move exchange to coupling
if not labels is None:
label = labels[comparison][0]
else:
label = None
coupling.exchange = J
coupling.label = label
self.couplings = [coupling for coupling in self.couplings if not coupling.exchange is None]
def buildDipolarInteractionMatrix(self):
for i,coupling in enumerate(self.couplings):
atom1 = coupling.atom1
atom2 = coupling.atom2
dl = coupling.dl
r1 = coupling.atom1Pos
r2 = r1+np.dot(self.A,dl)
g1 = self.g[atom1]
g2 = self.g[atom2]
J,NormR,DiffR = DipolarMatrix(g1, g2, Units.muB_J, Units.mu0, Units.meV, r1, r2)
coupling.exchange[:3,:3]+=J
class System:
"""Class to manage the RPA calculations"""
epsilon = 0.05 # Small imaginary part to add to ensure invertability of matrices
def __init__(self,temperature = None, magneticField = [0.0,0.0,0.0], lattice = None):
self.verbose = True
self.temperature = temperature
self.magneticField = np.asarray(magneticField)
self.lattice = lattice
def getProperty(self,propertyName,inputValue):
if not hasattr(self,propertyName):
setattr(self,propertyName,inputValue)
return inputValue
else:
if inputValue is None:
val = getattr(self,propertyName,None)
return val
@property
def NOperators(self):
return self.operators.shape[-1]
@NOperators.getter
def NOperators(self):
if not hasattr(self,'operators'):
raise AttributeError('System does not have any operators!')
return self.operators.shape[-1]
def solveSelfConsistency(self,ConvergenceCriteria = 0.005, fullConfiguration=None,operators=None,energy=None,limit=100):
fullConfiguration = self.getProperty('fullConfiguration',fullConfiguration)
operators = self.getProperty('operators', operators)
energy = self.getProperty('energy',energy)
states_energy = self.energies#np.repeat(self.energies[np.newaxis],repeats=self.lattice.Natom,axis=0)
if fullConfiguration is None:
raise AttributeError('fullConfiguration is not set!')
convergence = 1
expectedJ = fullConfiguration
totalRounds = 0
while convergence >= ConvergenceCriteria and totalRounds<limit:#
##################
# Calculate_Heff
expectedJ_previous = expectedJ
Hamiltonian_Heff = np.zeros((self.NOperators,self.lattice.Natom*self.lattice.NCell),dtype=np.complex);
for c in range(self.lattice.nExt[2]):
for b in range(self.lattice.nExt[1]):
for a in range(self.lattice.nExt[0]): # loop through extended unit cells
for atomID in np.arange(self.lattice.Natom): # loop through all atoms in a unit cell
total = np.zeros((1,self.NOperators),dtype=np.complex)
for coupling in self.lattice.couplings:
if atomID==coupling.atom1:
atom2ID = coupling.atom2
cellRelative = (np.mod((coupling.dl+np.array([a,b,c]))/self.lattice.nExt,1)*self.lattice.nExt).astype(int)
relIdx = relativeIdx(cellRelative,self.lattice.nExt,self.lattice.Natom)
Sj = fullConfiguration[:,atom2ID+relIdx]
Jij = coupling.exchange
total+=np.dot(Jij,Sj)
if atomID==coupling.atom2:
atom2ID = coupling.atom1
cellRelative = (np.mod((-coupling.dl+np.array([a,b,c]))/self.lattice.nExt,1)*self.lattice.nExt).astype(int)
relIdx = relativeIdx(cellRelative,self.lattice.nExt,self.lattice.Natom)
Sj = fullConfiguration[:,atom2ID+relIdx]
Jij = coupling.exchange
total+=np.dot(Jij,Sj)
idxAtom1 = relativeIdx([a,b,c],self.lattice.nExt,self.lattice.Natom)
Hamiltonian_Heff[:,atomID+idxAtom1] = total
###################
# Solve_MF_Hamiltonian
MatSize = len(self.energies[0])
# Initialize the needed amtrices
Hamiltonian_Hcf=np.zeros((MatSize,MatSize,self.lattice.Natom*self.lattice.NCell),dtype=np.complex);
Hamiltonian_Hfield=np.zeros((MatSize,MatSize,self.lattice.Natom*self.lattice.NCell),dtype=np.complex);
Hamiltonian_Hint1=np.zeros((MatSize,MatSize,self.lattice.Natom*self.lattice.NCell),dtype=np.complex);
Hamiltonian_Hint2=np.zeros((MatSize,MatSize,self.lattice.Natom*self.lattice.NCell),dtype=np.complex);
Hamiltonian_Hfull=np.zeros((MatSize,MatSize,self.lattice.Natom*self.lattice.NCell),dtype=np.complex);
Hamiltonian_Hdiag=np.zeros((MatSize,MatSize,self.lattice.Natom*self.lattice.NCell),dtype=np.complex);
Hamiltonian_eigenV=np.zeros((MatSize,MatSize,self.lattice.Natom*self.lattice.NCell),dtype=np.complex);
Hamiltonian_Gs=np.zeros((MatSize,MatSize,self.lattice.Natom*self.lattice.NCell,self.NOperators),dtype=np.complex);
expectedJ = np.zeros((self.NOperators,self.lattice.Natom*self.lattice.NCell),dtype=np.complex)
for c in range(self.lattice.nExt[2]):
for b in range(self.lattice.nExt[1]):
for a in range(self.lattice.nExt[0]): # loop through extended unit cells
for j in range(self.lattice.Natom): # loop over atoms
if self.lattice.active[j]: # Only if the atom is active
equivalenceID = self.lattice.equivalence[j]
trueIndex = relativeIdx([a,b,c],self.lattice.nExt,self.lattice.Natom)+j
HField = np.zeros((MatSize,MatSize,3),dtype=np.complex);
Si = fullConfiguration[:,trueIndex]
# Hamiltonian from Crystal Electric Field (Solved independently)
Hamiltonian_Hcf[:,:,trueIndex]=np.diag(states_energy[equivalenceID]);
# Hamiltonian for field, assuming first three operators ar Jx, Jy, and Jz!
for k in range(3):
HField[:,:,k]=self.operators[equivalenceID,:,:,k].T*self.lattice.g[equivalenceID]*Units.muB_J*self.magneticField[k]/Units.meV;
Hamiltonian_Hfield[:,:,trueIndex] = -np.sum(HField,axis=-1)
#Hamiltonian for first part of interaction
for op,eff in zip(operators[equivalenceID].transpose(2,1,0),Hamiltonian_Heff[:,trueIndex]):
Hamiltonian_Hint1[:,:,trueIndex]+=-1.0*op*eff;
#Hamiltonian for second part of interaction
int2=0.5*np.dot(Si,Hamiltonian_Heff[:,trueIndex]);
Hamiltonian_Hint2[:,:,trueIndex]=int2*np.eye(MatSize);
Hamiltonian_Hfull[:,:,trueIndex]=Hamiltonian_Hcf[:,:,trueIndex]\
+Hamiltonian_Hfield[:,:,trueIndex] \
+Hamiltonian_Hint1[:,:,trueIndex] \
+Hamiltonian_Hint2[:,:,trueIndex];
eigenValues,eigenVectors = np.linalg.eig(Hamiltonian_Hfull[:,:,trueIndex])
eigenValues = np.real(eigenValues)
Hamiltonian_Hdiag[:,:,trueIndex]=np.diag(eigenValues)
Hamiltonian_eigenV[:,:,trueIndex] = eigenVectors
for m in range(MatSize):
for n in range(MatSize):
for k,op in enumerate(operators[equivalenceID].transpose(2,1,0)):
Hamiltonian_Gs[m,n,trueIndex,k]= np.dot(np.conj(Hamiltonian_eigenV[:,n,trueIndex]),np.dot(op.T,Hamiltonian_eigenV[:,m,trueIndex].T))
expectedJ[:,trueIndex] = np.einsum('i,iik->k',population(eigenValues,self.temperature),Hamiltonian_Gs[:,:,trueIndex])
# Make sure the magnetic moment of the expected J is real
if np.abs(np.imag(expectedJ[:3])).max()>1e-2:
raise AttributeError('The imaginary part of the expectation value of J is larger than 1e-2. Was',np.imag(expectedJ[:3]))
expectedJ[:3]=np.real(expectedJ[:3])
# use found J's as new input
fullConfiguration = expectedJ ## Take care of inactive ions
self.Hamiltonian_Hcf=Hamiltonian_Hcf
self.Hamiltonian_Hfield=Hamiltonian_Hfield
self.Hamiltonian_Hfull=Hamiltonian_Hfull
self.Hamiltonian_Hdiag=Hamiltonian_Hdiag
self.Hamiltonian_Gs=Hamiltonian_Gs
self.fullConfiguration = fullConfiguration
convergence = np.max(np.abs(np.diff([expectedJ,expectedJ_previous],axis=0)))
if self.verbose: print('Convergence ('+str(totalRounds)+'):',convergence)
totalRounds+=1
if self.verbose and convergence<=ConvergenceCriteria: print('Self-consistency equations solved')
else:
warnings.warn('Self-consistency equations are not solve! Solution might be dodgy...')
def calculateChi0(self,omega,ElasticThreshold=0.01):
self.Chi0_elastic = np.zeros((self.NOperators,self.NOperators,self.lattice.Natom*self.lattice.NCell),dtype=np.complex)
self.Chi0_inelastic = np.zeros((self.NOperators,self.NOperators,self.lattice.Natom*self.lattice.NCell,len(omega)),dtype=np.complex)
self.omega = omega
MegaG=self.Hamiltonian_Gs
MatSize = len(self.energies[0])
if self.verbose: print(r'Calculating Chi0')
for c in range(self.lattice.nExt[2]):
for b in range(self.lattice.nExt[1]):
for a in range(self.lattice.nExt[0]): # loop through extended unit cells
for j in range(self.lattice.Natom): # loop over atoms
if self.lattice.active[j]: # Only if the atom is active
trueIndex = relativeIdx([a,b,c],self.lattice.nExt,self.lattice.Natom)+j
Energies=np.diag(self.Hamiltonian_Hdiag[:,:,trueIndex])
pop=population(Energies,self.temperature)
for x in np.arange(self.NOperators):# enumerate(self.operators.transpose(2,1,0)):
for y in np.arange(self.NOperators):#enumerate(self.operators.transpose(2,1,0)):
for m in range(MatSize):
for n in range(MatSize):
deltaE = Energies[m]-Energies[n]
if abs(deltaE)<ElasticThreshold: # We are elastic
self.Chi0_elastic[x,y,trueIndex]+=Units.calculateKelvinToMeV(self.temperature)*Units.meV*pop[m]*\
MegaG[m,n,trueIndex,x]*MegaG[n,m,trueIndex,y]
else: # the inelastic case
self.Chi0_inelastic[x,y,trueIndex,:]+=MegaG[m,n,trueIndex,x]*MegaG[n,m,trueIndex,y]*\
(pop[m]-pop[n])/(Energies[n]-Energies[m]-(self.omega+self.epsilon*1j))
def calculateJQ(self,QRLU=None):
QRLU = self.getProperty('QRLU', QRLU)
self.QPoints = np.asarray([np.dot(self.lattice.B,q) for q in QRLU.T])
self.Chi0_JQ = np.zeros((self.NOperators,self.NOperators,len(self.QPoints),self.lattice.Natom*self.lattice.NCell,self.lattice.Natom*self.lattice.NCell),dtype=np.complex)
if self.verbose: print('Calculating J(Q)')
for qidx,Q in enumerate(self.QPoints):
for c in range(self.lattice.nExt[2]):
for b in range(self.lattice.nExt[1]):
for a in range(self.lattice.nExt[0]): # loop through extended unit cells
for atomID in np.arange(self.lattice.Natom): # loop through all atoms in a unit cell
for coupling in self.lattice.couplings:
if atomID in [coupling.atom1,coupling.atom2]:#coupling[0] == atomID or coupling[1] == atomID:
if atomID == coupling.atom1:
atom1 = atomID
atom2 = coupling.atom2
dl = coupling.dl
shift = dl+np.asarray([a,b,c])
indices = (np.mod(shift/self.lattice.nExt,1)*self.lattice.nExt).astype(int)
atom2 += relativeIdx(indices,self.lattice.nExt,self.lattice.Natom)
atom1 += relativeIdx([a,b,c],self.lattice.nExt,self.lattice.Natom)
deltaR = -coupling.distanceVector
Jij = coupling.exchange.T
self.Chi0_JQ[:,:,qidx,atom1,atom2]+=+Jij*np.exp(-1j*np.dot(Q,deltaR))
if atomID == coupling.atom2:
atom2 = atomID
atom1 = coupling.atom1
dl = coupling.dl
shift = dl+np.asarray([a,b,c])
indices = (np.mod(shift/self.lattice.nExt,1)*self.lattice.nExt).astype(int)
atom2 += relativeIdx(indices,self.lattice.nExt,self.lattice.Natom)
atom1 += relativeIdx([a,b,c],self.lattice.nExt,self.lattice.Natom)
deltaR = -coupling.distanceVector
Jij = coupling.exchange
self.Chi0_JQ[:,:,qidx,atom2,atom1]+=Jij*np.exp(-1j*np.dot(Q,-deltaR))
def calculateChi(self,ElasticThreshold=0.01, epsilon = 0.0):
"""
Calculate the magnetic Susceptibility
Kwargs:
- ElasticThreshold (float): Distance to the elastic line within which the elastic susceptibility is used (default 0.01 meV)
epsilon (float): Regularization parameter used to ensure invertibility of Chi0_inelastic (default 0, but use 1e-8)
"""
active = np.repeat(self.lattice.active,axis=0,repeats=self.lattice.NCell).astype(bool)
equivalent = np.arange(self.lattice.Natom*self.lattice.NCell)[active]
totalActiveAtoms = len(equivalent)
self.Chi = np.zeros((self.NOperators,self.NOperators,totalActiveAtoms,totalActiveAtoms,len(self.QPoints),len(self.omega)),dtype=np.complex)
self.Chi_total = np.zeros((self.NOperators,self.NOperators,len(self.QPoints),len(self.omega)),dtype=np.complex)
# print('Calculating Chi and Chi_total')
if self.verbose: printProgressBar(0,len(self.QPoints),prefix='Calculating Chi and Chi_total',length=71)
for qidx,q in enumerate(self.QPoints):
try:
regularization = np.eye(len(self.Chi0_inelastic))*epsilon
for omegaidx,om in enumerate(self.omega):
for i in range(totalActiveAtoms):
Mat = np.zeros((totalActiveAtoms,self.NOperators,totalActiveAtoms,self.NOperators),dtype=np.complex)
aa,bb = np.meshgrid(equivalent,equivalent)
Mat[aa,:,bb,:]=-self.Chi0_JQ[:,:,qidx,aa,bb].transpose(2,3,0,1)
if np.abs(om) < ElasticThreshold:
chi0 = np.asarray([np.linalg.inv(self.Chi0_elastic[:,:,a]+self.Chi0_inelastic[:,:,a,omegaidx]+regularization) for a in equivalent])
else:
chi0 = np.asarray([np.linalg.inv(self.Chi0_inelastic[:,:,a,omegaidx]+regularization) for a in equivalent])
Mat[equivalent,:,equivalent,:]+=chi0
Vec = np.zeros((self.NOperators*totalActiveAtoms,self.NOperators),dtype=np.complex)
Vec[i*self.NOperators:(i+1)*self.NOperators,:] = np.eye(self.NOperators)
Mat = Mat.reshape(totalActiveAtoms*self.NOperators,totalActiveAtoms*self.NOperators)
Var = np.linalg.solve(Mat,Vec)
iEqui = equivalent[i]
for j in equivalent:
self.Chi[:,:,j,iEqui,qidx,omegaidx] = Var[j*self.NOperators:(j+1)*self.NOperators,:]
self.Chi_total[:,:,qidx,omegaidx]+=np.sum(Var.reshape(totalActiveAtoms,self.NOperators,self.NOperators),axis=0)
except np.linalg.LinAlgError as e:
print(q,i,om)
# print(Mat,Vec)
raise e
if self.verbose: printProgressBar(qidx+1,len(self.QPoints),prefix='Calculating Chi and Chi_total',length=71)
self.Chi_total = self.Chi_total.transpose([2,3,0,1])
def calculateSperp(self):
# Assuming that the first three operators are Sx, Sy, Sz
with warnings.catch_warnings():
warnings.simplefilter("ignore")
QPerp = np.array([1.0,1.0,1.0]).reshape(1,3)-np.abs(self.QPoints)/(np.linalg.norm(self.QPoints,axis=1).reshape(-1,1))
QPerp[np.isnan(QPerp).any(axis=1),:] = 0.0
self.Sperp = np.einsum('ijkk,ik->ij',np.imag(self.Chi_total[:,:,:3,:3]),QPerp)
def relativeIdx(cellRelative,nExt,nAtoms):
"""calculate relative indices between different unit cells
Args:
cellRelative (list of ints): vector connecting cells
nExt (list): list of unit cell extensions in [a,b,c]
nAtoms (int): number of atoms in unit cell
"""
# The relative index between different unit cells is
# nAtoms*deltaC+nExt[2]*nAtoms*deltaB+nExt[2]*nExt[1]*nAtoms*deltaA
return nAtoms*cellRelative[2]+nExt[2]*nAtoms*cellRelative[1]+nExt[2]*nExt[1]*nAtoms*cellRelative[0]
def population(energies,temperature):
"""Calculate population of a set of states given energy [meV] and temperature [K]"""
Energies=energies-np.min(np.real(energies))
expPart = np.exp(-Units.calculateKelvinToMeV(temperature)*Energies*Units.meV)
return expPart/np.sum(expPart)#
def DipolarMatrix(g1,g2,muB,mu0,meV,r1,r2):
# calculate the dipolar interaction matrix
#
# [DipMat, NormR, DiffR]=DipolarMatrix(g1,g2,muB,mu0,meV,r1,r2)
#
# Output:
# DipMat Dipolar matrix (3x3)
# NormR Norm of R, distance (r2-r1)
# DiffR Normalized r2-r1
#
# N.Gauthier, 2017/09/22
NormR=np.linalg.norm(r2-r1);
DiffR=(r2-r1)/NormR;
# Positive = ferro, negatice = antiferro
C= g1*g2*np.power(muB,2.0)*mu0/(4*np.pi*np.power(NormR/1e10,3.0))/meV;
DipMat= C * np.asarray([[3*DiffR[0]**2-1,3*DiffR[0]*DiffR[1],3*DiffR[0]*DiffR[2]],
[3*DiffR[0]*DiffR[1],3*DiffR[1]**2-1,3*DiffR[1]*DiffR[2]],
[3*DiffR[0]*DiffR[2],3*DiffR[1]*DiffR[2],3*DiffR[2]**2-1]])
return [DipMat, NormR, DiffR]
def ClebschGordan(j1,j2,m1,m2,J,M):
if not M == m1+m2:
return 0
f1 = np.sqrt((2*J+1)*np.math.factorial(J+j1-j2)*np.math.factorial(J-j1+j2)*np.math.factorial(j1+j2-J)/np.math.factorial(j1+j2+J+1))
f2 = np.sqrt(np.math.factorial(J+M)*np.math.factorial(J-M)*np.math.factorial(j1-m1)*np.math.factorial(j1+m1)*np.math.factorial(j2-m2)*np.math.factorial(j2+m2))
s = 0
kmax =2*(j1+j1)+1
for k in range(-kmax,kmax+1):
try:
s+=(-1)**k/(np.math.factorial(k)*np.math.factorial(j1+j2-J-k)*np.math.factorial(j1-m1-k)*\
np.math.factorial(j2+m2-k)*np.math.factorial(J-j2+m1+k)*np.math.factorial(J-j1-m2+k))
#print(k)
except:
continue
#print(f1,f2,s)
return f1*f2*s
def conversionMatrix(L,S):
JMax = L+S
JMin = L-S
jStates = np.arange(JMin,JMax+1)
mStates = [x*2+1 for x in jStates]
lStates =(2*L+1)
sStates = (2*S+1)
States = int(lStates*sStates)
matrix = np.zeros((States,States))
for l in np.arange(-L,L+1):
for s in np.arange(-S,S+1):
idx = (l*sStates+s+np.floor(States/2)).astype(int)
for I,j in enumerate(jStates):
for J,m in enumerate(np.arange(-j,j+1)):
idx2 = int(np.sum(mStates[:I]).astype('int')+J)
f = ClebschGordan(L,S,l,s,j,m)
#if not np.isclose(f,0):
#print('<{} {} ; {} {} | {} {} >= {}'.format(L,S,l,s,j,m,f))
matrix[idx,idx2] = f
return matrix | PypiClean |
/Djaloha-0.4.2.tar.gz/Djaloha-0.4.2/djaloha/static/aloha.0.20.20/lib/util/arrays.js | define([],function(){
'use strict'
/**
* Implements unique() using the browser's sort().
*
* @param a
* The array to sort and strip of duplicate values.
* Warning: this array will be modified in-place.
* @param compFn
* A custom comparison function that accepts two values a and
* b from the given array and returns -1, 0, 1 depending on
* whether a < b, a == b, a > b respectively.
*
* If no compFn is provided, the algorithm will use the
* browsers default sort behaviour and loose comparison to
* detect duplicates.
* @return
* The given array.
*/
function sortUnique(a, compFn){
var i;
if (compFn) {
a.sort(compFn);
for (i = 1; i < a.length; i++) {
if (0 === compFn(a[i], a[i - 1])) {
a.splice(i--, 1);
}
}
} else {
a.sort();
for (i = 1; i < a.length; i++) {
// Use loosely typed comparsion if no compFn is given
// to avoid sortUnique( [6, "6", 6] ) => [6, "6", 6]
if (a[i] == a[i - 1]) {
a.splice(i--, 1);
}
}
}
return a;
}
/**
* Shallow comparison of two arrays.
*
* @param a, b
* The arrays to compare.
* @param equalFn
* A custom comparison function that accepts two values a and
* b from the given arrays and returns true or false for
* equal and not equal respectively.
*
* If no equalFn is provided, the algorithm will use the strict
* equals operator.
* @return
* True if all items in a and b are equal, false if not.
*/
function equal(a, b, equalFn) {
var i = 0, len = a.length;
if (len !== b.length) {
return false;
}
if (equalFn) {
for (; i < len; i++) {
if (!equalFn(a[i], b[i])) {
return false;
}
}
} else {
for (; i < len; i++) {
if (a[i] !== b[i]) {
return false;
}
}
}
return true;
}
/**
* ECMAScript map replacement
* See https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/Array/map
* And http://es5.github.com/#x15.4.4.19
* It's not exactly according to standard, but it does exactly what one expects.
*/
function map(a, fn) {
var i, len, result = [];
for (i = 0, len = a.length; i < len; i++) {
result.push(fn(a[i]));
}
return result;
}
function mapNative(a, fn) {
// Call map directly on the object instead of going through
// Array.prototype.map. This avoids the problem that we may get
// passed an array-like object (NodeList) which may cause an
// error if the implementation of Array.prototype.map can only
// deal with arrays (Array.prototype.map may be native or
// provided by a javscript framework).
return a.map(fn);
}
return {
sortUnique: sortUnique,
equal: equal,
map: Array.prototype.map ? mapNative : map
};
}); | PypiClean |
/FoundryDataBrowser-190903.1.tar.gz/FoundryDataBrowser-190903.1/viewers/picoquant_histogram_h5.py | from ScopeFoundry.data_browser import DataBrowser, DataBrowserView
import pyqtgraph as pg
from qtpy import QtWidgets, QtCore
import numpy as np
import h5py
from pyqtgraph import dockarea
from ScopeFoundry.widgets import RegionSlicer
from scipy.optimize import least_squares
from ScopeFoundry.logged_quantity import LQCollection
class PicoquantHistogramH5View(DataBrowserView):
name = 'picoquant_histogram_h5'
def is_file_supported(self, fname):
if "picoharp_histogram.h5" in fname:
self.m_base = 'measurement/{}/'.format('picoharp_histogram')
self.h_base = 'hardware/{}/'.format('picoharp')
return True
elif "hydraharp_histogram.h5" in fname:
self.m_base = 'measurement/{}/'.format('hydraharp_histogram')
self.h_base = 'hardware/{}/'.format('hydraharp')
return True
else:
return False
def setup(self):
self.ui = self.dockarea = dockarea.DockArea()
# graph_dock
self.graph_layout = pg.GraphicsLayoutWidget()
self.plotdock = self.dockarea.addDock(name='Histograms', position='right',
widget=self.graph_layout)
self.plot = self.graph_layout.addPlot()
self.plot.setLogMode(False, True)
self.plotdata = self.plot.plot(pen='r')
self.fit_line = self.plot.plot(pen='g')
#settings
self.settings.New('fit_option',str,initial='tau_x_calc',
choices = ('poly_fit','tau_x_calc','biexponential'))
self.settings.New('chan', dtype=int, initial=0)
self.settings.New('binning', dtype=int, initial=1, vmin=1)
self.settings.New('time_unit', dtype=str, initial='ns')
self.settings.New('norm_data', bool, initial = False)
self.settings.fit_option.add_listener(self.fit_xy)
self.settings.chan.add_listener(self.update_display)
self.settings.binning.add_listener(self.update_display)
self.settings.norm_data.add_listener(self.update_display)
# data slicers
self.x_slicer = RegionSlicer(self.plotdata,slicer_updated_func=self.update_display,
name='x_slicer', initial=[10,20], activated=True)
self.bg_slicer = RegionSlicer(self.plotdata,slicer_updated_func=self.update_display,
name='bg_subtract', initial=[0,10], activated=False)
#settings_dock
self.setdock = self.dockarea.addDock(name='Settings', position='left',
widget=self.settings.New_UI())
self.setdock.layout.addWidget(self.x_slicer.New_UI())
self.setdock.layout.addWidget(self.bg_slicer.New_UI())
# Metadata from file
self.posible_meta_data = ['ElapsedMeasTime','Tacq','Resolution','CountRate0','CountRate1',
'Binning','SyncRate','SyncDivider','count_rate0','count_rate1',
'elapsed_meas_time']
self.meta_data_settings = LQCollection()
for lqname in self.posible_meta_data:
self.meta_data_settings.New(lqname, ro=True)
self.meta_data_settings.New('sample', dtype=str, ro=True)
self.meta_data_ui = self.meta_data_settings.New_UI()
self.setdock.layout.addWidget(QtWidgets.QLabel('<b>Meta data found</b>'))
self.setdock.layout.addWidget(self.meta_data_ui)
def update_display(self):
x,y = self.get_xy(apply_use_x_slice=False)
self.plotdata.setData(x,y)
self.fit_xy()
def on_change_data_filename(self, fname):
try:
self.dat = h5py.File(fname, 'r')
self.meas = H = self.dat[self.m_base]
self.time_array = H['time_array'][:] * 1e-3 #ns
self.histograms = H['time_histogram'][:].reshape(-1, len(self.time_array))
n_chan = self.histograms.shape[0]
self.settings.chan.change_min_max(0, n_chan-1)
self.update_metadata()
self.dat.close()
self.update_display()
except Exception as err:
self.databrowser.ui.statusbar.showMessage("failed to load %s:\n%s" %(fname, err))
raise(err)
def update_metadata(self):
'''if a possible meta data setting is found in h5_file it will be displayed'''
h5_hw_settings = self.dat[self.h_base+'/settings'].attrs
h5_meas_settings = self.dat[self.m_base+'/settings'].attrs
for i,lqname in enumerate(self.posible_meta_data):
self.meta_data_ui.layout().itemAt(i,0).widget().show()
self.meta_data_ui.layout().itemAt(i,1).widget().show()
if lqname in h5_hw_settings.keys():
self.meta_data_settings[lqname] = h5_hw_settings[lqname]
elif lqname in h5_meas_settings.keys():
self.meta_data_settings[lqname] = h5_meas_settings[lqname]
else:
self.meta_data_ui.layout().itemAt(i,0).widget().hide()
self.meta_data_ui.layout().itemAt(i,1).widget().hide()
try:
self.settings['sample'] = self.dat['app/settings'].attrs['sample']
except KeyError:
pass
def get_xy(self, apply_use_x_slice=True):
'''
returns data for fitting.
'''
y = 1.0*self.histograms[self.settings['chan']]
x = self.time_array
if self.bg_slicer.activated.val:
bg = y[self.bg_slicer.s_].mean()
y -= bg
binning = self.settings['binning']
if binning> 1:
x,y = bin_y_average_x(x, y, binning, -1, datapoints_lost_warning=False)
if apply_use_x_slice:
x = x[self.x_slicer.s_]
y = y[self.x_slicer.s_]
if self.settings['norm_data']:
y = norm(y)
return (x,y)
@QtCore.Slot()
def fit_xy(self):
x,y = self.get_xy(apply_use_x_slice=True)
print(x.shape, y.shape)
fit_func_dict = {'poly_fit': self.poly_fit_xy,
'tau_x_calc': self.tau_x_calc_xy,
'biexponential': self.fit_biexponential_xy}
fit_option = self.settings['fit_option']
self.xf,self.yf = fit_func_dict[fit_option](x,y)
def tau_x_calc_xy(self,x,y):
t = x.copy()
t -= t.min()
tau = tau_x_calc(y, t)
self.fit_line.setData(x,y)
#gather result
quantities = ['$\\tau_e$']
numbers = '{0:1.1f}'.format(tau).split(" ")
units = [self.settings['time_unit']]
self.res_data_table = [[quantity, number, unit] for quantity, number, unit in zip(quantities,numbers,units)]
self.x_slicer.set_label(_table2html(self.res_data_table, strip_latex=True), title='tau_x_calc')
return x,y
def poly_fit_xy(self,x,y,deg=1):
coefs = poly_fit(x=x, y=y)
t = x - x.min()
fit = np.exp( np.poly1d(coefs)(t) )
self.fit_line.setData(x,fit)
#gather result
quantities = ['$A$','$\\tau$']
numbers = '{0:1.1f} {1:1.1f}'.format(coefs[1],-1/coefs[0]).split(" ")
units = ['-', self.settings['time_unit']]
self.res_data_table = [[quantity, number, unit] for quantity, number, unit in zip(quantities,numbers,units)]
self.x_slicer.set_label(_table2html(self.res_data_table, strip_latex=True), title='poly_fit')
return x,fit
def fit_biexponential_xy(self,x,y):
#bounds = self.biexponential_fit_bounds
#bi_initial = self.biexponential_fit_initials
bi_initial = [10,0.1,1,100]
t = x - x.min()
bi_res = least_squares(fun = biexponential_residuals,
#bounds = bounds,
x0 = bi_initial,
args = (t, y))
A0,tau0,A1,tau1 = bi_res.x
A0,tau0,A1,tau1 = sort_biexponential_components(A0, tau0, A1, tau1)
A0_norm,A1_norm = A0/(A0 + A1),A1/(A0 + A1)
tau_m = A0_norm*tau0 + A1_norm*tau1
fit = biexponential(bi_res.x, t)
self.fit_line.setData(x,fit)
#self.current_bi_exp_fit_res = bi_res.x
quantities = ['$\\tau_0$','$\\tau_1$','$A_0$','$A_1$','$\\tau_m$']
numbers = '{0:1.1f} {1:1.1f} {2:1.0f} {3:1.0f} {4:1.1f}'.format(tau0,tau1,A0_norm*100,A1_norm*100,tau_m).split(" ")
time_unit = ''
units = [time_unit, time_unit, '%', '%', time_unit]
self.res_data_table = [[quantity, number, unit] for quantity, number, unit in zip(quantities,numbers,units)]
self.x_slicer.set_label(_table2html(self.res_data_table, strip_latex=True),title='biexponential fit')
return x,fit
def poly_fit(y,x,deg=1):
mask = y > 0
x = x[mask]
y = y[mask]
t = x.copy()
t -= t.min()
coefs = np.polyfit(t,np.log(y),deg)
return coefs
def tau_x_calc(time_trace, time_array, x=0.6321205588300001):
t = time_trace
return time_array[np.argmin(np.abs(np.cumsum(t)/np.sum(t)-x))]
def biexponential(params, t):
'''
params = [ A0, tau0, A1, tau1]
'''
return params[0]*np.exp(-t/params[1]) + params[2]*np.exp(-t/params[3])
def biexponential_residuals(params, t, data):
return biexponential(params,t) - data
def fit_biexpontial(y, t, bi_initial, bounds):
bi_res = least_squares(fun = biexponential_residuals,
bounds = bounds,
x0 = bi_initial,
args = (t, y))
return bi_res.x
def sort_biexponential_components(A0,tau0,A1,tau1):
'''
ensures that tau0 < tau1, also swaps values in A1 and A0 if necessary.
'''
A0 = np.atleast_1d(A0)
tau0 = np.atleast_1d(tau0)
A1 = np.atleast_1d(A1)
tau1 = np.atleast_1d(tau1)
mask = tau0 < tau1
mask_ = np.invert(mask)
new_tau0 = tau0.copy()
new_tau0[mask_] = tau1[mask_]
tau1[mask_] = tau0[mask_]
new_A0 = A0.copy()
new_A0[mask_] = A1[mask_]
A1[mask_] = A0[mask_]
try:
new_A0 = np.asscalar(new_A0)
new_tau0 = np.asscalar(new_tau0)
A1 = np.asscalar(A1)
tau1 = np.asscalar(tau1)
except ValueError:
pass
return new_A0,new_tau0,A1,tau1 #Note, generally A1,tau1 were also modified.
def norm(x):
x_max = x.max()
if x_max==0:
return x*0.0
else:
return x*1.0/x_max
def bin_y_average_x(x, y, binning = 2, axis = -1, datapoints_lost_warning = True):
'''
y can be a n-dim array with length on axis `axis` equal to len(x)
'''
new_len = int(x.__len__()/binning) * binning
data_loss = x.__len__() - new_len
if data_loss is not 0 and datapoints_lost_warning:
print('bin_y_average_x() warining: lost final', data_loss, 'datapoints')
def bin_1Darray(arr, binning=binning, new_len=new_len):
return arr[:new_len].reshape((-1,binning)).sum(1)
x_ = bin_1Darray(x) / binning
y_ = np.apply_along_axis(bin_1Darray,axis,y)
return x_, y_
def _table2html(data_table, strip_latex = True):
text = '<table border="0">'
for line in data_table:
text += '<tr>'
for element in line:
text += '<td>{} </td>'.format(element)
text += '</tr>'
text += '</table>'
if strip_latex:
text = text.replace('\\','').replace('$','').replace('_','')
return text
if __name__ == '__main__':
import sys
app = DataBrowser(sys.argv)
app.load_view(PicoquantHistogramH5View(app))
sys.exit(app.exec_()) | PypiClean |
/COMPAS-1.17.5.tar.gz/COMPAS-1.17.5/src/compas_rhino/uninstall.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import os
import sys
import compas._os
import compas.plugins
import compas_rhino
from compas_rhino.install import _run_post_execution_steps
from compas_rhino.install import installable_rhino_packages
__all__ = [
"uninstall",
"after_rhino_uninstall",
]
def uninstall(version=None, packages=None):
"""Uninstall COMPAS from Rhino.
Parameters
----------
version : {'5.0', '6.0', '7.0', '8.0'}, optional
The version number of Rhino.
Default is ``'7.0'``.
packages : list of str, optional
List of packages to uninstall.
Default is to uninstall all packages installed by the COMPAS installer.
Examples
--------
.. code-block:: python
import compas_rhino
compas_rhino.uninstall()
.. code-block:: bash
python -m compas_rhino.uninstall
"""
version = compas_rhino._check_rhino_version(version)
# We install COMPAS packages in the scripts folder
# instead of directly as IPy module.
scripts_path = compas_rhino._get_rhino_scripts_path(version)
# This is for old installs
ipylib_path = compas_rhino._get_rhino_ironpython_lib_path(version)
# Filter the provided list of packages
# If no packages are provided
# this first collects all installable packages from the environment.
packages = _filter_installed_packages(version, packages)
# Also remove all broken symlinks
# because ... they're broken!
for name in os.listdir(scripts_path):
path = os.path.join(scripts_path, name)
if os.path.islink(path):
if not os.path.exists(path):
if name not in packages:
packages.append(name)
# Collect paths for removal based on package names
symlinks_to_uninstall = []
for package in packages:
symlink_path = os.path.join(scripts_path, package)
symlinks_to_uninstall.append(dict(name=package, link=symlink_path))
# Handle legacy install location
# This does not always work,
# and especially not in cases where it is in any case not necessary :)
if ipylib_path:
legacy_path = os.path.join(ipylib_path, package)
if os.path.exists(legacy_path):
symlinks_to_uninstall.append(dict(name=package, link=legacy_path))
# There is nothing to uninstall
if not symlinks_to_uninstall:
print("\nNo packages to uninstall from Rhino {0} scripts folder: \n{1}.".format(version, scripts_path))
return
# -------------------------
# Start uninstalling
# -------------------------
uninstalled_packages = []
results = []
exit_code = 0
symlinks = [link["link"] for link in symlinks_to_uninstall]
uninstall_results = compas._os.remove_symlinks(symlinks)
for uninstall_data, success in zip(symlinks_to_uninstall, uninstall_results):
if success:
uninstalled_packages.append(uninstall_data["name"])
result = "OK"
else:
result = "ERROR: Cannot remove symlink, try to run as administrator."
results.append((uninstall_data["name"], result))
if not all(uninstall_results):
exit_code = -1
if exit_code == -1:
results.append(
(
"compas_bootstrapper",
"WARNING: One or more packages failed, will not uninstall bootstrapper.",
)
)
else:
if compas_rhino._try_remove_bootstrapper(scripts_path):
results.append(("compas_bootstrapper", "OK"))
else:
results.append(
(
"compas_bootstrapper",
"ERROR: Cannot remove compas_bootstrapper, try to run as administrator.",
)
)
# Handle legacy bootstrapper
# Again, only if possible...
if ipylib_path:
if not compas_rhino._try_remove_bootstrapper(ipylib_path):
results.append(
(
"compas_bootstrapper",
"ERROR: Cannot remove legacy compas_bootstrapper, try to run as administrator.",
)
)
# -------------------------
# Output results
# -------------------------
print("Uninstalling COMPAS packages from Rhino {0} scripts folder: \n{1}".format(version, scripts_path))
print("\nThe following packages have been detected and will be uninstalled:\n")
for package, status in results:
print(" {} {}".format(package.ljust(20), status))
if status != "OK":
exit_code = -1
if exit_code == 0 and uninstalled_packages:
print("\nRunning post-uninstallation steps...\n")
if not _run_post_execution_steps(after_rhino_uninstall(uninstalled_packages)):
exit_code = -1
print("\nUninstall completed.")
if exit_code != 0:
sys.exit(exit_code)
def _filter_installed_packages(version, packages):
ipylib_path = compas_rhino._get_rhino_ironpython_lib_path(version)
scripts_path = compas_rhino._get_rhino_scripts_path(version)
compas_bootstrapper = compas_rhino._get_bootstrapper_path(scripts_path)
bootstrapper_data = compas_rhino._get_bootstrapper_data(compas_bootstrapper)
# Don't modify the original list if we have one
if packages:
packages = packages[:]
else:
packages = bootstrapper_data.get("INSTALLED_PACKAGES", None)
# No info, fall back to installable packages list
if packages is None:
packages = list(itertools.chain.from_iterable(installable_rhino_packages()))
# Handle legacy install
if ipylib_path:
legacy_bootstrapper = compas_rhino._get_bootstrapper_path(ipylib_path)
if os.path.exists(legacy_bootstrapper):
bootstrapper_data = compas_rhino._get_bootstrapper_data(legacy_bootstrapper)
legacy_packages = bootstrapper_data.get("INSTALLED_PACKAGES", None)
if legacy_packages:
packages.extend(legacy_packages)
return packages
@compas.plugins.pluggable(category="install", selector="collect_all")
def after_rhino_uninstall(uninstalled_packages):
"""Allows extensions to execute actions after uninstall from Rhino is done.
Extensions providing Rhino or Grasshopper features
can implement this pluggable interface to perform
additional steps after the uninstall from Rhino has
been completed.
Parameters
----------
uninstalled_packages : :obj:`list` of :obj:`str`
List of packages that have been uninstalled.
Examples
--------
>>> import compas.plugins
>>> @compas.plugins.plugin(category='install')
... def after_rhino_uninstall(uninstalled_packages):
... # Do something cleanup, eg remove copied files.
... return [('compas_ghpython', 'GH Components uninstalled', True)]
Returns
-------
:obj:`list` of 3-tuple (str, str, bool)
List containing a 3-tuple with component name, message and True/False success flag.
"""
pass
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"-v",
"--version",
choices=compas_rhino.SUPPORTED_VERSIONS,
default=compas_rhino.DEFAULT_VERSION,
help="The version of Rhino to install the packages in.",
)
parser.add_argument("-p", "--packages", nargs="+", help="The packages to uninstall.")
args = parser.parse_args()
uninstall(version=args.version, packages=args.packages) | PypiClean |
/InvokeAI-3.1.0-py3-none-any.whl/invokeai/backend/image_util/txt2mask.py | import numpy as np
import torch
from PIL import Image, ImageOps
from transformers import AutoProcessor, CLIPSegForImageSegmentation
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined"
CLIPSEG_SIZE = 352
config = InvokeAIAppConfig.get_config()
class SegmentedGrayscale(object):
def __init__(self, image: Image, heatmap: torch.Tensor):
self.heatmap = heatmap
self.image = image
def to_grayscale(self, invert: bool = False) -> Image:
return self._rescale(Image.fromarray(np.uint8(255 - self.heatmap * 255 if invert else self.heatmap * 255)))
def to_mask(self, threshold: float = 0.5) -> Image:
discrete_heatmap = self.heatmap.lt(threshold).int()
return self._rescale(Image.fromarray(np.uint8(discrete_heatmap * 255), mode="L"))
def to_transparent(self, invert: bool = False) -> Image:
transparent_image = self.image.copy()
# For img2img, we want the selected regions to be transparent,
# but to_grayscale() returns the opposite. Thus invert.
gs = self.to_grayscale(not invert)
transparent_image.putalpha(gs)
return transparent_image
# unscales and uncrops the 352x352 heatmap so that it matches the image again
def _rescale(self, heatmap: Image) -> Image:
size = self.image.width if (self.image.width > self.image.height) else self.image.height
resized_image = heatmap.resize((size, size), resample=Image.Resampling.LANCZOS)
return resized_image.crop((0, 0, self.image.width, self.image.height))
class Txt2Mask(object):
"""
Create new Txt2Mask object. The optional device argument can be one of
'cuda', 'mps' or 'cpu'.
"""
def __init__(self, device="cpu", refined=False):
logger.info("Initializing clipseg model for text to mask inference")
# BUG: we are not doing anything with the device option at this time
self.device = device
self.processor = AutoProcessor.from_pretrained(CLIPSEG_MODEL, cache_dir=config.cache_dir)
self.model = CLIPSegForImageSegmentation.from_pretrained(CLIPSEG_MODEL, cache_dir=config.cache_dir)
@torch.no_grad()
def segment(self, image, prompt: str) -> SegmentedGrayscale:
"""
Given a prompt string such as "a bagel", tries to identify the object in the
provided image and returns a SegmentedGrayscale object in which the brighter
pixels indicate where the object is inferred to be.
"""
if type(image) is str:
image = Image.open(image).convert("RGB")
image = ImageOps.exif_transpose(image)
img = self._scale_and_crop(image)
inputs = self.processor(text=[prompt], images=[img], padding=True, return_tensors="pt")
outputs = self.model(**inputs)
heatmap = torch.sigmoid(outputs.logits)
return SegmentedGrayscale(image, heatmap)
def _scale_and_crop(self, image: Image) -> Image:
scaled_image = Image.new("RGB", (CLIPSEG_SIZE, CLIPSEG_SIZE))
if image.width > image.height: # width is constraint
scale = CLIPSEG_SIZE / image.width
else:
scale = CLIPSEG_SIZE / image.height
scaled_image.paste(
image.resize(
(int(scale * image.width), int(scale * image.height)),
resample=Image.Resampling.LANCZOS,
),
box=(0, 0),
)
return scaled_image | PypiClean |
/DjangoDjangoAppCenter-0.0.11-py3-none-any.whl/AppCenter/simpleui/static/admin/simpleui-x/elementui/locale/lang/ca.js | 'use strict';
exports.__esModule = true;
exports.default = {
el: {
colorpicker: {
confirm: 'Confirmar',
clear: 'Netejar'
},
datepicker: {
now: 'Ara',
today: 'Avui',
cancel: 'Cancel·lar',
clear: 'Netejar',
confirm: 'Confirmar',
selectDate: 'Seleccionar data',
selectTime: 'Seleccionar hora',
startDate: 'Data Inici',
startTime: 'Hora Inici',
endDate: 'Data Final',
endTime: 'Hora Final',
prevYear: 'Any anterior',
nextYear: 'Pròxim Any',
prevMonth: 'Mes anterior',
nextMonth: 'Pròxim Mes',
year: 'Any',
month1: 'Gener',
month2: 'Febrer',
month3: 'Març',
month4: 'Abril',
month5: 'Maig',
month6: 'Juny',
month7: 'Juliol',
month8: 'Agost',
month9: 'Setembre',
month10: 'Octubre',
month11: 'Novembre',
month12: 'Desembre',
// week: 'setmana',
weeks: {
sun: 'Dg',
mon: 'Dl',
tue: 'Dt',
wed: 'Dc',
thu: 'Dj',
fri: 'Dv',
sat: 'Ds'
},
months: {
jan: 'Gen',
feb: 'Febr',
mar: 'Març',
apr: 'Abr',
may: 'Maig',
jun: 'Juny',
jul: 'Jul',
aug: 'Ag',
sep: 'Set',
oct: 'Oct',
nov: 'Nov',
dec: 'Des'
}
},
select: {
loading: 'Carregant',
noMatch: 'No hi ha dades que coincideixin',
noData: 'Sense Dades',
placeholder: 'Seleccionar'
},
cascader: {
noMatch: 'No hi ha dades que coincideixin',
loading: 'Carregant',
placeholder: 'Seleccionar',
noData: 'Sense Dades'
},
pagination: {
goto: 'Anar a',
pagesize: '/pagina',
total: 'Total {total}',
pageClassifier: ''
},
messagebox: {
confirm: 'Acceptar',
cancel: 'Cancel·lar',
error: 'Entrada invàlida'
},
upload: {
deleteTip: 'premi eliminar per descartar',
delete: 'Eliminar',
preview: 'Vista Prèvia',
continue: 'Continuar'
},
table: {
emptyText: 'Sense Dades',
confirmFilter: 'Confirmar',
resetFilter: 'Netejar',
clearFilter: 'Tot',
sumText: 'Tot'
},
tree: {
emptyText: 'Sense Dades'
},
transfer: {
noMatch: 'No hi ha dades que coincideixin',
noData: 'Sense Dades',
titles: ['Llista 1', 'Llista 2'],
filterPlaceholder: 'Introdueix la paraula clau',
noCheckedFormat: '{total} ítems',
hasCheckedFormat: '{checked}/{total} seleccionats'
},
image: {
error: 'FAILED' // to be translated
},
pageHeader: {
title: 'Back' // to be translated
}
}
}; | PypiClean |
/ICQ_client_study-0.1.2-py3-none-any.whl/client/common/metaclasses.py | import dis
import logging
SERVER_LOGGER = logging.getLogger('serverlog')
CLIENT_LOGGER = logging.getLogger('clientlog')
class ClientVerifier(type):
"""Метакласс выпоняющий проверку,
что в клиентском классе нет неверных вызовов
"""
def __init__(cls, clsname, bases, clsdict):
methods = []
methods_2 = []
attrs = []
for func in clsdict:
try:
ret = dis.get_instructions(clsdict[func])
except TypeError:
pass
else:
for i in ret:
if i.opname == 'LOAD_GLOBAL':
if i.argval not in methods:
methods.append(i.argval)
elif i.opname == 'LOAD_METHOD':
if i.argval not in methods_2:
methods_2.append(i.argval)
elif i.opname == 'LOAD_ATTR':
if i.argval not in attrs:
attrs.append(i.argval)
for command in ('accept', 'listen', 'socket'):
if command in methods:
CLIENT_LOGGER.error('В классе обнаружено '
'использование запрещённого метода')
raise TypeError('В классе обнаружено '
'использование запрещённого метода')
if 'get_message' in methods or 'send_message' in methods:
pass
else:
CLIENT_LOGGER.error('Отсутствуют вызовы функций, '
'работающих с сокетами.')
raise TypeError('Отсутствуют вызовы функций, '
'работающих с сокетами.')
super().__init__(clsname, bases, clsdict)
class ServerVerifier(type):
"""Метакласс выпоняющий проверку,
что в серверном классе нет неверных вызовов
"""
def __init__(cls, clsname, bases, clsdict):
methods = [] # 'LOAD_GLOBAL'
methods_2 = [] # 'LOAD_METHOD',
attrs = [] # 'LOAD_ATTR'
for func in clsdict:
try:
ret = dis.get_instructions(clsdict[func])
except TypeError:
pass
else:
for i in ret:
if i.opname == 'LOAD_GLOBAL':
if i.argval not in methods:
methods.append(i.argval)
elif i.opname == 'LOAD_METHOD':
if i.argval not in methods_2:
methods_2.append(i.argval)
elif i.opname == 'LOAD_ATTR':
if i.argval not in attrs:
attrs.append(i.argval)
if 'connect' in methods:
SERVER_LOGGER.error('Использование метода connect '
'недопустимо в серверном классе')
raise TypeError('Использование метода connect '
'недопустимо в серверном классе')
if not ('SOCK_STREAM' in methods and 'AF_INET' in methods):
SERVER_LOGGER.error('Некорректная инициализация сокета.')
raise TypeError('Некорректная инициализация сокета.')
super().__init__(clsname, bases, clsdict) | PypiClean |
/CRIkit2-0.4.4.tar.gz/CRIkit2-0.4.4/crikit/data/replicate.py | import numpy as _np
__all__ = ['Replicate']
class Replicate:
"""
Replicate class
Attributes
----------
data : 1D ndarray [size]
Replicate data
calib : list [(start), stop, (step size)]
Calibration descriptor. See Note.
units : str
Units of replicate data
size : int, read-only
Methods
-------
update_calib_from_data
Calculate and set calib parameter from data
update_data_from_calib
Calculate and set data from calib parameter
calib_data_agree
Return bool as to whether data and that derived from calib agree
Notes
-----
* input to calib can be a list or tuple or 1D ndarray or int or float
Setting calib can take up to 3 entries :
* 1 entry: stop = entry; start = 0, step size = 1
* 2 entries: start = entry[0], stop = entry[1], step size = 1
* 3 entries: [start, stop, step size]
"""
def __init__(self, data=None, calib=None, units=None, label=None):
self._data = None
self._calib = None
self._units = None
self._label = None
if data is not None:
self.data = data
if calib is not None:
self.calib = calib
if units is not None:
self.units = units
if label is not None:
self.label = label
@property
def data(self):
return self._data
@data.setter
def data(self, value):
if isinstance(value, _np.ndarray):
if value.ndim == 1:
self._data = value
else:
raise TypeError('data must be 1D ndarray')
elif value is None:
self._data = None
else:
raise TypeError('data must be 1D ndarray')
@property
def size(self):
return self._data.size
@property
def calib(self):
return self._calib
@calib.setter
def calib(self, value):
if isinstance(value, _np.ndarray) or isinstance(value, list) or isinstance(value, tuple):
if len(value) == 3:
self._calib = list(value)
elif len(value) == 2:
temp = list(value)
temp.append(1)
self._calib = temp
elif len(value) == 1:
temp = [0]
temp.append(value[0])
temp.append(1)
self._calib = temp
else:
raise TypeError('calib should have 1-3 components: [(start), stop, (step size)]')
elif isinstance(value, int) or isinstance(value, float):
temp = [0]
temp.append(value)
temp.append(1)
self._calib = temp
else:
raise TypeError('calib should be an int or float [stop]; or a \
1D ndarray, tuple, or list with 1-3 entires: [start, stop, step size]')
@property
def units(self):
return self._units
@units.setter
def units(self, value):
if isinstance(value, str) | (value is None):
self._units = value
else:
raise TypeError('units should be of type str')
@property
def label(self):
return self._label
@label.setter
def label(self, value):
if isinstance(value, str) | (value is None):
self._label = value
else:
raise TypeError('label should be of type str')
def update_data_from_calib(self):
"""
Calculate and set data from calib parameter
"""
if self._calib is not None:
self.data = _np.arange(self._calib[0],self._calib[1], self._calib[2])
else:
raise TypeError('calib is not set')
def update_calib_from_data(self):
"""
Calculate and set calib parameter from data. Note: assumes uniform \
spacing of data.
"""
if self._data is not None:
delta = self._data[1] - self._data[0]
self.calib = [self._data[0], self._data[-1]+delta, delta]
else:
raise TypeError('data is not set')
def calib_data_agree(self):
if self._data is None:
raise TypeError('data not set')
if self._calib is None:
raise TypeError('calib not set')
temp = _np.arange(self._calib[0],self._calib[1],self._calib[2])
if temp.size != self._data.size:
return False
else:
return _np.allclose(temp, self._data)
if __name__ == '__main__': # pragma: no cover
start = 0
stop = 10
step_size = .1
x = _np.arange(start, stop, step_size)
rep = Replicate(data=x,calib=[start, stop, step_size])
print('Calib and data agree: {}'.format(rep.calib_data_agree())) | PypiClean |
/Biryani-0.10.8-py3-none-any.whl/biryani/bsonconv.py |
# Biryani -- A conversion and validation toolbox
# By: Emmanuel Raviart <[email protected]>
#
# Copyright (C) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Emmanuel Raviart
# http://packages.python.org/Biryani/
#
# This file is part of Biryani.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MongoDB BSON related Converters
See http://api.mongodb.org/python/current/api/bson/index.html
"""
import re
import bson
from .baseconv import cleanup_line, first_match, function, pipe, test_isinstance
from . import states
__all__ = [
'anything_to_object_id',
'bson_to_json',
'input_to_object_id',
'input_to_object_id_str',
'json_to_bson',
'object_id_re',
'object_id_to_str',
'str_to_object_id',
'str_to_object_id_str',
]
object_id_re = re.compile(r'[\da-f]{24}$')
# Utility functions
def convert_bson_to_json(value):
"""Recursively convert a BSON value to JSON.
A MongoDB document can't have an item with a key containing a ".". So they are escaped with "%".
"""
if value is None:
return value
if isinstance(value, dict):
# Note: Use type(value) instead of dict, to support OrderedDict.
return type(value)(
(
item_key.replace('%2e', '.').replace('%25', '%'),
convert_bson_to_json(item_value),
)
for item_key, item_value in value.items()
)
if isinstance(value, list):
return [
convert_bson_to_json(item)
for item in value
]
return value
def convert_json_to_bson(value):
"""Recursively convert a JSON value to BSON.
A MongoDB document can't have an item with a key containing a ".". So they are escaped with "%".
"""
if value is None:
return value
if isinstance(value, dict):
# Note: Use type(value) instead of dict, to support OrderedDict.
return type(value)(
(
item_key.replace('%', '%25').replace('.', '%2e'),
convert_json_to_bson(item_value),
)
for item_key, item_value in value.items()
)
if isinstance(value, list):
return [
convert_json_to_bson(item)
for item in value
]
return value
# Level-1 Converters
bson_to_json = function(convert_bson_to_json)
"""Convert a BSON value to JSON.
A MongoDB document can't have an item with a key containing a ".". So they are escaped with "%".
>>> bson_to_json({'a': 1, 'b': [2, 3], 'c%2ed': {'e': 4}})
({'a': 1, 'c.d': {'e': 4}, 'b': [2, 3]}, None)
>>> bson_to_json({})
({}, None)
>>> bson_to_json(None)
(None, None)
"""
json_to_bson = function(convert_json_to_bson)
"""Convert a JSON value to BSON.
A MongoDB document can't have an item with a key containing a ".". So they are escaped with "%".
>>> json_to_bson({'a': 1, 'b': [2, 3], 'c.d': {'e': 4}})
({'a': 1, 'b': [2, 3], 'c%2ed': {'e': 4}}, None)
>>> json_to_bson({})
({}, None)
>>> json_to_bson(None)
(None, None)
"""
def object_id_to_str(value, state = None):
"""Convert a BSON ObjectId to unicode.
.. note:: To ensure that input value is an ObjectId, first use :func:`biryani.baseconv.test_isinstance`.
>>> from bson.objectid import ObjectId
>>> object_id_to_str(ObjectId('4e333f53ff42e928000007d8'))
(u'4e333f53ff42e928000007d8', None)
>>> object_id_to_str(u'4e333f53ff42e928000007d8')
(u'4e333f53ff42e928000007d8', None)
>>> from biryani import baseconv as conv
>>> conv.pipe(conv.test_isinstance(ObjectId), object_id_to_str)(ObjectId('4e333f53ff42e928000007d8'))
(u'4e333f53ff42e928000007d8', None)
>>> conv.pipe(conv.test_isinstance(ObjectId), object_id_to_str)(u'4e333f53ff42e928000007d8')
(u'4e333f53ff42e928000007d8', u"Value is not an instance of <class 'bson.objectid.ObjectId'>")
>>> object_id_to_str(None)
(None, None)
"""
if value is None:
return value, None
return unicode(value), None
def str_to_object_id_str(value, state = None):
"""Convert a clean string to a BSON ObjectId string.
.. note:: For a converter that doesn't require a clean string, see :func:`input_to_object_id_str`.
>>> str_to_object_id_str(u'4e333f53ff42e928000007d8')
(u'4e333f53ff42e928000007d8', None)
>>> str_to_object_id_str('4e333f53ff42e928000007d8')
('4e333f53ff42e928000007d8', None)
>>> str_to_object_id_str(u'4E333F53FF42E928000007D8')
(u'4e333f53ff42e928000007d8', None)
>>> from bson.objectid import ObjectId
>>> str_to_object_id_str(ObjectId('4e333f53ff42e928000007d8'))
Traceback (most recent call last):
AttributeError:
>>> str_to_object_id_str(u"ObjectId('4e333f53ff42e928000007d8')")
(u"ObjectId('4e333f53ff42e928000007d8')", u'Invalid value')
>>> str_to_object_id_str(None)
(None, None)
"""
if value is None:
return value, None
if state is None:
state = states.default_state
id = value.lower()
if object_id_re.match(id) is None:
return value, state._(u'Invalid value')
return id, None
# Level-2 Converters
input_to_object_id_str = pipe(cleanup_line, str_to_object_id_str)
"""Convert a string to a BSON ObjectId string.
>>> input_to_object_id_str(u'4e333f53ff42e928000007d8')
(u'4e333f53ff42e928000007d8', None)
>>> input_to_object_id_str('4e333f53ff42e928000007d8')
('4e333f53ff42e928000007d8', None)
>>> input_to_object_id_str(u'4E333F53FF42E928000007D8')
(u'4e333f53ff42e928000007d8', None)
>>> input_to_object_id_str(u' 4e333f53ff42e928000007d8 ')
(u'4e333f53ff42e928000007d8', None)
>>> from bson.objectid import ObjectId
>>> input_to_object_id_str(ObjectId('4e333f53ff42e928000007d8'))
Traceback (most recent call last):
AttributeError:
>>> input_to_object_id_str(u"ObjectId('4e333f53ff42e928000007d8')")
(u"ObjectId('4e333f53ff42e928000007d8')", u'Invalid value')
>>> input_to_object_id_str(u' ')
(None, None)
>>> input_to_object_id_str(None)
(None, None)
"""
str_to_object_id = pipe(str_to_object_id_str, function(bson.objectid.ObjectId))
"""Convert a clean string to a BSON ObjectId.
.. note:: For a converter that doesn't require a clean string, see :func:`input_to_object_id`.
.. note:: For a converter that doesn't fail when input data is already an ObjectId,
use :func:`anything_to_object_id`.
>>> str_to_object_id(u'4e333f53ff42e928000007d8')
(ObjectId('4e333f53ff42e928000007d8'), None)
>>> str_to_object_id('4e333f53ff42e928000007d8')
(ObjectId('4e333f53ff42e928000007d8'), None)
>>> str_to_object_id(u'4E333F53FF42E928000007D8')
(ObjectId('4e333f53ff42e928000007d8'), None)
>>> from bson.objectid import ObjectId
>>> str_to_object_id(ObjectId('4e333f53ff42e928000007d8'))
Traceback (most recent call last):
AttributeError:
>>> str_to_object_id(u"ObjectId('4e333f53ff42e928000007d8')")
(u"ObjectId('4e333f53ff42e928000007d8')", u'Invalid value')
>>> str_to_object_id(None)
(None, None)
"""
# Level-3 Converters
input_to_object_id = pipe(cleanup_line, str_to_object_id)
"""Convert a string to a BSON ObjectId.
.. note:: For a converter that doesn't fail when input data is already an ObjectId,
use :func:`anything_to_object_id`.
>>> input_to_object_id(u'4e333f53ff42e928000007d8')
(ObjectId('4e333f53ff42e928000007d8'), None)
>>> input_to_object_id('4e333f53ff42e928000007d8')
(ObjectId('4e333f53ff42e928000007d8'), None)
>>> input_to_object_id(u'4E333F53FF42E928000007D8')
(ObjectId('4e333f53ff42e928000007d8'), None)
>>> input_to_object_id(u' 4e333f53ff42e928000007d8 ')
(ObjectId('4e333f53ff42e928000007d8'), None)
>>> from bson.objectid import ObjectId
>>> input_to_object_id(ObjectId('4e333f53ff42e928000007d8'))
Traceback (most recent call last):
AttributeError:
>>> input_to_object_id(u"ObjectId('4e333f53ff42e928000007d8')")
(u"ObjectId('4e333f53ff42e928000007d8')", u'Invalid value')
>>> input_to_object_id(u' ')
(None, None)
>>> input_to_object_id(None)
(None, None)
"""
# Level-4 Converters
anything_to_object_id = first_match(
test_isinstance(bson.objectid.ObjectId),
pipe(
test_isinstance(basestring),
input_to_object_id,
),
)
"""Convert any compatible Python data to a BSON ObjectId.
>>> anything_to_object_id(u'4e333f53ff42e928000007d8')
(ObjectId('4e333f53ff42e928000007d8'), None)
>>> anything_to_object_id('4e333f53ff42e928000007d8')
(ObjectId('4e333f53ff42e928000007d8'), None)
>>> anything_to_object_id(u'4E333F53FF42E928000007D8')
(ObjectId('4e333f53ff42e928000007d8'), None)
>>> anything_to_object_id(u' 4e333f53ff42e928000007d8 ')
(ObjectId('4e333f53ff42e928000007d8'), None)
>>> from bson.objectid import ObjectId
>>> anything_to_object_id(ObjectId('4e333f53ff42e928000007d8'))
(ObjectId('4e333f53ff42e928000007d8'), None)
>>> anything_to_object_id(u"ObjectId('4e333f53ff42e928000007d8')")
(u"ObjectId('4e333f53ff42e928000007d8')", u'Invalid value')
>>> anything_to_object_id(u' ')
(None, None)
>>> anything_to_object_id(None)
(None, None)
""" | PypiClean |
/Dpowers-0.1.5rc1.tar.gz/Dpowers-0.1.5rc1/README.md | Documentation see https://dpowers.readthedocs.io.
This package has not been tested properly, but is running well on my PC
(Linux Mint). There are probably some bugs still. Help for testing and bug
reporting is appreciated.
If you have question about a specific part or command of this project, let
me know. I can focus my work depending on the features mostly needed.
Ideas for the future:
- Extend the documentation. (Reference for all commands, more examples,
tutorials.)
- Add support for multiple systems/platforms.
- More testing and debugging.
- Explanations on how to add your own powers or backends. | PypiClean |
/observations-0.1.4.tar.gz/observations-0.1.4/observations/r/gleser.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def gleser(path):
"""Example data from Gleser, Cronbach and Rajaratnam (1965) to show basic pri
nciples of generalizability theory.
Gleser, Cronbach and Rajaratnam (1965) discuss the estimation of
variance components and their ratios as part of their introduction to
generalizability theory. This is a adaptation of their "illustrative
data for a completely matched G study" (Table 3). 12 patients are rated
on 6 symptoms by two judges. Components of variance are derived from the
ANOVA.
A data frame with 12 observations on the following 12 variables. J item
by judge:
`J11`
a numeric vector
`J12`
a numeric vector
`J21`
a numeric vector
`J22`
a numeric vector
`J31`
a numeric vector
`J32`
a numeric vector
`J41`
a numeric vector
`J42`
a numeric vector
`J51`
a numeric vector
`J52`
a numeric vector
`J61`
a numeric vector
`J62`
a numeric vector
Gleser, G., Cronbach, L., and Rajaratnam, N. (1965). Generalizability of
scores influenced by multiple sources of variance. Psychometrika,
30(4):395-418. (Table 3, rearranged to show increasing patient severity
and increasing item severity.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `gleser.csv`.
Returns:
Tuple of np.ndarray `x_train` with 12 rows and 12 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'gleser.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/psych/Gleser.csv'
maybe_download_and_extract(path, url,
save_file_name='gleser.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | PypiClean |
/GridWalker-1.0.1.zip/GridWalker-1.0.1/README.md | # GridWalker
An iterable multi-dimensional grid used for exhaustive search and dimensional
problem solving.
## Description
Defines a Grid object which allows for efficient iteration across an arbitrary
number of dimensions. The grid objects allow for iteration, multi-dimensional
select, and multi-dimensional slicing.
Accessing a grid follows the `__getitem__` convention of `[index]`. To retrieve
a multi-dimensional selection either `[index1, index2]` or `[(index1, index2)]`
may be used to specify the ordered dimensions of the grid to subselect. Any of
the index requests can be replaced by a slice object such that
`[index1, start2:end2:step2]` is a legal request.
Slices provide SubGrid objects which act as grids, but map their referenced data
back to the original grid object. This allows for repeated slicing of a grid with
near constant memory overhead at the cost of layered slice requests for each
change on the original data.
There are several provided Grids which are setup for efficiency for the given
data type. So far those include IntGrid, FloatGrid, and ObjectGrid -- the latter
of which is a general solution without efficient storage. These grid types
define the data being stored, rather than the indexing scheme. All grids use an
integer based indexing, though there are plans to create a float range grid which
does the float to index mapping behind the interface.
Note that creating grids with many dimensions can take up an extremely large amount
of memory, even when using an efficient scheme. This extends to a very long
iteration times as the number of elements to visit grows exponentially.
Take a 5 dimensional grid with 10 values for each dimension. this makes a 10^5
element grid -- which is 100k iterables -- and would take ~400kb of storage space.
The same grid with 100 values for each dimension would have 40 billion elements
and take more than 37GB of memory to store.
## Dependencies
* pydatawrap
* numpy
## Setup
### Installation
From source:
python settup.py install
From pip:
pip install gridwalker
## Features
* Multi-Dimensional grid definitions with arbitrary number of dimensions
* Iteration and assignment through efficient means across any dimensional assignment
## Navigating the Repo
### gridwalker
The implementation files for the repository.
### tests
All unit tests for the repo.
## Language Preferences
* Google Style Guide
* Object Oriented (with a few exceptions)
## TODO
* Create float index grids for floating precision grid iteration
## Author
Author(s): Matthew Seal
© Copyright 2013, [OpenGov](http://opengov.com)
| PypiClean |
/Flask_Modals_markupsafe-0.5.1-py3-none-any.whl/flask_modals/static/js/main.js | (function () {
const formEls = document.querySelectorAll('form')
formEls.forEach(el => {
const modalBodyEl = el.closest('.modal-body')
if (modalBodyEl) {
el.addEventListener('submit', e => {
e.preventDefault()
fetchData(el, modalBodyEl, e.submitter)
})
}
})
function fetchData(el, modalBodyEl, submitter) {
let url
const body = new FormData(el)
let submitterName, submitterValue
if (submitter) {
submitterName = submitter.getAttribute('name') || 'form-submit'
if (submitterName === 'submit') submitterName = 'form-submit'
submitterValue = submitter.getAttribute('value') || submitter.textContent
body.append(submitterName, submitterValue)
}
body.append('_ajax', true)
NProgress.start()
fetch(el.action, {
method: el.method,
body: body,
headers: {
Accept: 'text/modal-stream.html'
}
})
.then(res => {
if (res.ok) {
NProgress.done()
url = res.url
return res.text()
} else {
throw new Error(`Error fetching data. Status ${res.status}`)
}
})
.then(data => {
if (data) {
const doc = new DOMParser().parseFromString(data, "text/html")
const templateEl = doc.querySelector('template')
const newModalBodyEl = doc.importNode(templateEl.content, true)
.firstElementChild
modalBodyEl.innerHTML = newModalBodyEl.innerHTML
const el = modalBodyEl.querySelector('form')
el.addEventListener('submit', e => {
e.preventDefault()
fetchData(el, modalBodyEl, e.submitter)
})
} else {
const btn = el.querySelector('#submit, [name="submit"]')
if (btn) {
btn.removeAttribute('id')
btn.removeAttribute('name')
}
if (submitter) {
const inp = document.createElement('input')
inp.type = 'hidden'
inp.name = submitterName
inp.value = submitterValue
el.appendChild(inp)
}
el.submit()
}
})
.catch(err => {
NProgress.done()
console.log(err)
})
}
})() | PypiClean |
/KL_Audit_supportV3.5-1.0-py3-none-any.whl/AuditModule/util/LoggerUtil.py | from collections import OrderedDict
from logging import Formatter, FileHandler, StreamHandler, getLogger, INFO
from json import loads, dumps
def logger(name, handler, recordfields=None, level=INFO):
"""
returns logger object with logging level set and handlers added
:param name: logger name
:param handler: handler name
:param recordfields: fields for log record
:param level: logging level
:return:
"""
log = getLogger(name)
textformatter = JSONFormatter(recordfields)
handler.setFormatter(textformatter)
log.addHandler(handler)
log.setLevel(level)
return log
def filelogger(logname, recordfields=None, filename='json.log', level=INFO):
"""A convenience function to return a JSON file logger for simple situations.
Args:
logname : The name of the logger - to allow for multiple logs, and levels of logs in an application
recordfields : The metadata fields to add to the JSON record created by the logger
filename : The name of the file to be used in the logger
level : The logger level
Returns:
A JSON file logger.
"""
handler = FileHandler(filename, 'w')
return logger(logname, handler, recordfields, level)
def streamlogger(logname, recordfields=None, outputstream=None, level=INFO):
"""A convenience function to return a JSON stream logger for simple situations.
Args:
logname : The name of the logger - to allow for multiple logs, and levels of logs in an application
recordfields : The metadata fields to add to the JSON record created by the logger
outputstream : The outputstream to be used by the logger. sys.stderr is used when outputstream is None.
level : The logger level
Returns:
A JSON stream logger.
"""
handler = StreamHandler(outputstream)
return logger(logname, handler, recordfields, level)
def read_json_log(logfile, filterfunction=(lambda x: True), customjson=None):
"""Iterate through a log file of JSON records and return a list of JSON records that meet the filterfunction.
Args:
logfile : A file like object consisting of JSON records.
filterfunction : A function that returns True if the JSON record should be included in the output and False
otherwise.
customjson : A decoder function to enable the loading of custom json objects
Returns:
A list of Python objects built from JSON records that passed the filterfunction.
"""
json_records = []
for x in logfile:
rec = loads(x[:-1], object_hook=customjson)
if filterfunction(rec):
json_records.append(rec)
return json_records
class JSONFormatter(Formatter):
"""The JSONFormatter class outputs Python log records in JSON format.
JSONFormatter assumes that log record metadata fields are specified at the fomatter level as opposed to the
record level. The specification of matadata fields at the formatter level allows for multiple handles to display
differing levels of detail. For example, console log output might specify less detail to allow for quick problem
triage while file log output generated from the same data may contain more detail for in-depth investigations.
Attributes:
recordfields : A list of strings containing the names of metadata fields (see Python log record
documentation
for details) to add to the JSON output. Metadata fields will be added to the JSON record in
the order specified in the recordfields list.
customjson : A JSONEncoder subclass to enable writing of custom JSON objects.
"""
def __init__(self, recordfields=None, datefmt=None, customjson=None):
"""__init__ overrides the default constructor to accept a formatter specific list of metadata fields
Args:
recordfields : A list of strings referring to metadata fields on the record object. It can be empty.
The list of fields will be added to the JSON record created by the formatter.
"""
Formatter.__init__(self, None, datefmt)
self.recordfields = recordfields
self.customjson = customjson
def uses_time(self):
""" Overridden from the ancestor to look for the asctime attribute in the recordfields attribute.
The override is needed because of the change in design assumptions from the documentation for the logging module
The implementation in this object could be brittle if a new release changes the name or adds another time
attribute.
Returns:
boolean : True if asctime is in self.recordfields, False otherwise.
"""
return 'asctime' in self.recordfields
def _formattime(self, record):
if self.uses_time():
record.asctime = self.formatTime(record, self.datefmt)
def _getjsondata(self, record):
""" combines any supplied recordfields with the log record msg field into an object to convert to JSON
Args:
record : log record to output to JSON log
Returns:
An object to convert to JSON - either an ordered dict if recordfields are supplied or the record.msg
attribute
"""
if len(self.recordfields) > 0:
fields = []
for x in self.recordfields:
fields.append((x, getattr(record, x)))
fields.append(('msg', record.msg))
# An OrderedDict is used to ensure that the converted data appears in the same order for every record
return OrderedDict(fields)
else:
return record.msg
def format(self, record):
"""overridden from the ancestor class to take a log record and output a JSON formatted string.
Args:
record : log record to output to JSON log
Returns:
A JSON formatted string
"""
self._formattime(record)
jsondata = self._getjsondata(record)
formattedjson = dumps(jsondata, cls=self.customjson)
return formattedjson | PypiClean |
/HashDL-4.0.0.tar.gz/HashDL-4.0.0/README.md |
# Table of Contents
1. [Overview](#org7b3bbec)
2. [Install](#org2797716)
1. [Requirement](#org70644cf)
2. [Install from PyPI](#org467a72f)
3. [Install from Source](#org30c0b52)
3. [Features](#org50e9627)
4. [Implementation](#org77ddc65)
<a id="org7b3bbec"></a>
# Overview
This repository is non-official third-paty re-implementation of SLIDE<sup><a id="fnr.1" class="footref" href="#fn.1">1</a></sup>.
We provide
- Python package
- Hash based Deep Learning
- Parallel computing based on C++17 parallel STL
We don't provide
- Explicit CPU optimized code like AVX (We just rely on compiler optimization)
- Compiled binary (You need to compile by yourself)
<a id="org2797716"></a>
# Install
There are two options, "Install from PyPI" and "Install from Source".
For ordinary user, "Install from PyPI" is recommended.
For both case, sufficient C++ compiler is neccessary.
<a id="org70644cf"></a>
## Requirement
- Recent C++ compiler with parallel STL algorithm support
- [GCC](https://gcc.gnu.org/) 9.1 or newer together with [Intel TBB](https://github.com/oneapi-src/oneTBB)
- [Python](https://www.python.org/) 3
Requirements can be installed on Docker image [gcc:10](https://hub.docker.com/_/gcc).
# On local machine
docker run -it gcc:10 bash
# On gcc:10 image
apt update && apt install -y python3-pip libtbb-dev
<a id="org467a72f"></a>
## Install from PyPI
pip install HashDL
<a id="org30c0b52"></a>
## Install from Source
git clone https://gitlab.com/ymd_h/hashdl.git HashDL
cd HashDL
pip install .
<a id="org50e9627"></a>
# Features
- Neural Network
- hash-based sparse dense layer
- Activation
- ReLU
- linear (no activation)
- sigmoid
- Optimizer
- SGD
- Adam<sup><a id="fnr.2" class="footref" href="#fn.2">2</a></sup>
- Weight Initializer
- constant
- Gauss distribution
- Hash for similarity
- WTA
- DWTA<sup><a id="fnr.3" class="footref" href="#fn.3">3</a></sup>
- Scheduler for hash update
- constant
- exponential decay
In the current architecture, CNN is impossible.
<a id="org77ddc65"></a>
# Implementation
The [official reference implementation](https://github.com/keroro824/HashingDeepLearning) focused on performance and
accepted some "dirtyness" like hard-coded magic number for algotihm
selection and unmanaged memory allocation.
We accept some (but hopefully small) overhead and improve
maintenability in terms of software;
- Polymorphism with inheritance and virtual function
- RAII and smart pointer for memory management
These archtecture allows us to construct and manage C++ class from
Python without recompile.
We also rely recent C++ standard and compiler optimization;
- Parallel STL from C++17
- Because of RVO (or at least move semantics), returning `std::vector`
is not so much costful as it was.
# Footnotes
<sup><a id="fn.1" href="#fnr.1">1</a></sup> [B. Chen *et al*., "SLIDE : In Defense of Smart Algorithms over Hardware Acceleration for Large-Scale Deep Learning Systems", MLSys 2020](https://mlsys.org/Conferences/2020/Schedule?showEvent=1410) ([arXiv](https://arxiv.org/abs/1903.03129), [code](https://github.com/keroro824/HashingDeepLearning))
<sup><a id="fn.2" href="#fnr.2">2</a></sup> [D. P. Kingma and J. Ba, "Adam: A Method for Stochastic Optimization", ICLR (2015)](https://iclr.cc/archive/www/doku.php%3Fid=iclr2015:main.html) ([arXiv](https://arxiv.org/abs/1412.6980))
<sup><a id="fn.3" href="#fnr.3">3</a></sup> [B. Chen *et al*., "Densified Winner Take All (WTA) Hashing for Sparse Datasets", Uncertainty in artificial intelligence (2018)](http://auai.org/uai2018/proceedings/papers/321.pdf)
| PypiClean |
/Authlib-1.2.1.tar.gz/Authlib-1.2.1/authlib/integrations/sqla_oauth2/functions.py | import time
def create_query_client_func(session, client_model):
"""Create an ``query_client`` function that can be used in authorization
server.
:param session: SQLAlchemy session
:param client_model: Client model class
"""
def query_client(client_id):
q = session.query(client_model)
return q.filter_by(client_id=client_id).first()
return query_client
def create_save_token_func(session, token_model):
"""Create an ``save_token`` function that can be used in authorization
server.
:param session: SQLAlchemy session
:param token_model: Token model class
"""
def save_token(token, request):
if request.user:
user_id = request.user.get_user_id()
else:
user_id = None
client = request.client
item = token_model(
client_id=client.client_id,
user_id=user_id,
**token
)
session.add(item)
session.commit()
return save_token
def create_query_token_func(session, token_model):
"""Create an ``query_token`` function for revocation, introspection
token endpoints.
:param session: SQLAlchemy session
:param token_model: Token model class
"""
def query_token(token, token_type_hint):
q = session.query(token_model)
if token_type_hint == 'access_token':
return q.filter_by(access_token=token).first()
elif token_type_hint == 'refresh_token':
return q.filter_by(refresh_token=token).first()
# without token_type_hint
item = q.filter_by(access_token=token).first()
if item:
return item
return q.filter_by(refresh_token=token).first()
return query_token
def create_revocation_endpoint(session, token_model):
"""Create a revocation endpoint class with SQLAlchemy session
and token model.
:param session: SQLAlchemy session
:param token_model: Token model class
"""
from authlib.oauth2.rfc7009 import RevocationEndpoint
query_token = create_query_token_func(session, token_model)
class _RevocationEndpoint(RevocationEndpoint):
def query_token(self, token, token_type_hint):
return query_token(token, token_type_hint)
def revoke_token(self, token, request):
now = int(time.time())
hint = request.form.get('token_type_hint')
token.access_token_revoked_at = now
if hint != 'access_token':
token.refresh_token_revoked_at = now
session.add(token)
session.commit()
return _RevocationEndpoint
def create_bearer_token_validator(session, token_model):
"""Create an bearer token validator class with SQLAlchemy session
and token model.
:param session: SQLAlchemy session
:param token_model: Token model class
"""
from authlib.oauth2.rfc6750 import BearerTokenValidator
class _BearerTokenValidator(BearerTokenValidator):
def authenticate_token(self, token_string):
q = session.query(token_model)
return q.filter_by(access_token=token_string).first()
return _BearerTokenValidator | PypiClean |
/Colr-0.9.1.tar.gz/Colr-0.9.1/colr/control_codes.py | import sys
from enum import Enum
__all__ = [
'cursor',
'erase',
'move',
'pos',
'position',
'scroll',
]
escape_sequence = '\033['
class EraseMethod(Enum):
END = 0
START = 1
ALL = ALL_MOVE = 2
ALL_ERASE = 3
ALL_MOVE_ERASE = 4
def __str__(self):
return str(self.value)
class CursorCodes(object):
""" Escape codes that deal with the cursor itself. """
@staticmethod
def hide():
""" Hide the cursor.
Esc[?25l
"""
return EscapeCode('?25l')
@staticmethod
def show():
""" Show the cursor.
Esc[?25h
"""
return EscapeCode('?25h')
class EraseCodes(object):
""" Escape codes that erase. """
@staticmethod
def display(method=EraseMethod.ALL_MOVE):
""" Clear the screen or part of the screen, and possibly moves the cursor
to the "home" position (1, 1). See `method` argument below.
Esc[<method>J
Arguments:
method: One of these possible values:
EraseMethod.END or 0:
Clear from cursor to the end of the screen.
EraseMethod.START or 1:
Clear from cursor to the start of the screen.
EraseMethod.ALL_MOVE or 2:
Clear all, and move home.
EraseMethod.ALL_ERASE or 3:
Clear all, and erase scrollback buffer.
EraseMethod.ALL_MOVE_ERASE or 4:
Like doing 2 and 3 in succession.
This is a feature of Colr. It is not standard.
Default: EraseMethod.ALL_MOVE (2)
"""
accepted_methods = ('0', '1', '2', '3', '4')
methodstr = str(method)
if methodstr not in accepted_methods:
raise ValueError('Invalid method, expected {}. Got: {!r}'.format(
', '.join(accepted_methods),
method,
))
if methodstr == '4':
methods = (2, 3)
else:
methods = (method, )
return EscapeCode(
''.join(str(EscapeCode('{}J'.format(m))) for m in methods)
)
@staticmethod
def line(method=EraseMethod.ALL):
""" Erase a line, or part of a line. See `method` argument below.
Cursor position does not change.
Esc[<method>K
Arguments:
method : One of these possible values:
EraseMethod.END or 0:
Clear from cursor to the end of the line.
EraseMethod.START or 1:
Clear from cursor to the start of the line.
EraseMethod.ALL or 2:
Clear the entire line.
Default: EraseMethod.ALL (2)
"""
methods = ('0', '1', '2')
if str(method) not in methods:
raise ValueError('Invalid method, expected {}. Got: {!r}'.format(
', '.join(methods),
method,
))
return EscapeCode('{}K'.format(method))
class EscapeCode(object):
""" Responsible for creating a full escape code sequence, with helper
methods for the resulting string.
"""
def __init__(self, code, nowrap=False):
""" Initialize an escape code. """
if not code:
raise ValueError(
'Empty/falsey code is not allowed. Got: {!r}'.format(code)
)
codestr = str(code)
if nowrap or codestr.startswith(escape_sequence):
# Already an escape sequence.
self.code = codestr
else:
# Shorter form used.
self.code = ''.join((escape_sequence, codestr))
def __repr__(self):
return repr(self.code)
def __str__(self):
return str(self.code)
def repeat(self, count=1):
""" Return an EscapeCode containing this escape code repeated `count`
times, joined by ';'.
If `count` is less than 1, '' is returned.
"""
# Not using for-loop, because the id for each item doesn't matter.
# This multiplication method is faster than [s for _ in range(count)].
# Tested with timeitcompare on my machine -Cj:
# https://github.com/welbornprod/timeitcompare
return self.__class__(';'.join([str(self)] * count))
def write(self, file=sys.stdout):
file.write(str(self))
file.flush()
class MoveCodes(object):
""" Escape codes that move the cursor. """
@staticmethod
def back(columns=1):
""" Move the cursor back a number of columns.
Esc[<columns>D:
Moves the cursor back by the specified number of columns without
changing lines. If the cursor is already in the leftmost column,
ANSI.SYS ignores this sequence.
"""
return EscapeCode('{}D'.format(columns))
@staticmethod
def carriage_return():
""" Move the cursor to the beginning of the line, using \\r.
This should act just like `move_column(1)`.
"""
return EscapeCode('\r', nowrap=True)
@staticmethod
def column(column=1):
""" Move the cursor to a specific column, default 1.
Esc[<column>G
"""
return EscapeCode('{}G'.format(column))
@staticmethod
def down(lines=1):
""" Move the cursor down a number of lines.
Esc[<lines>B:
Moves the cursor down by the specified number of lines without
changing columns. If the cursor is already on the bottom line,
ANSI.SYS ignores this sequence.
"""
return EscapeCode('{}B'.format(lines))
@staticmethod
def forward(columns=1):
""" Move the cursor forward a number of columns.
Esc[<columns>C:
Moves the cursor forward by the specified number of columns
without changing lines. If the cursor is already in the rightmost
column, ANSI.SYS ignores this sequence.
"""
return EscapeCode('{}C'.format(columns))
@staticmethod
def next(lines=1):
""" Move the cursor to the beginning of the line, a number of lines down.
Default: 1
Esc[<lines>E
"""
return EscapeCode('{}E'.format(lines))
@staticmethod
def pos(line=1, column=1):
""" Move the cursor to a new position. Values are 1-based, and default
to 1.
Esc[<line>;<column>H
or
Esc[<line>;<column>f
"""
return EscapeCode('{line};{col}H'.format(line=line, col=column))
@staticmethod
def prev(lines=1):
""" Move the cursor to the beginning of the line, a number of lines up.
Default: 1
Esc[<lines>F
"""
return EscapeCode('{}F'.format(lines))
@staticmethod
def up(lines=1):
""" Move the cursor up a number of lines.
Esc[ValueA:
Moves the cursor up by the specified number of lines without
changing columns. If the cursor is already on the top line,
ANSI.SYS ignores this sequence.
"""
return EscapeCode('{}A'.format(lines))
# Alias for move.carriage_return, since 'return' is a keyword.
MoveCodes.ret = MoveCodes.carriage_return # type: ignore
class PositionCodes(object):
""" Escape codes that deal with the current cursor position. """
@staticmethod
def restore():
""" Restore cursor position saved with `save()`.
Esc[u:
Returns the cursor to the position stored by the
'save cursor position' sequence (`restore()`).
"""
return EscapeCode('u')
@staticmethod
def save():
""" Save current cursor position. Can be restored with `restore()`.
Esc[s:
Saves the current cursor position. You can move the cursor to the
saved cursor position by using the 'restore cursor position'
sequence (`restore()`).
"""
return EscapeCode('s')
# Alias for move.pos, since both deal with moving/positions.
PositionCodes.set = MoveCodes.pos # type: ignore
class ScrollCodes(object):
""" Escape codes for scrolling the window. """
@staticmethod
def down(lines=1):
""" Scroll the whole page down a number of lines, new lines are added
to the top.
Esc[<lines>T
"""
return EscapeCode('{}T'.format(lines))
@staticmethod
def up(lines=1):
""" Scroll the whole page up a number of lines, new lines are added
to the bottom.
Esc[<lines>S
"""
return EscapeCode('{}S'.format(lines))
cursor = CursorCodes()
erase = EraseCodes()
move = MoveCodes()
position = pos = PositionCodes()
scroll = ScrollCodes() | PypiClean |
/NovalIDE-1.1.8-py3-none-any.whl/noval/filewatcher.py | from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import os
from noval.util.singleton import *
import noval.util.utils as utils
@Singleton
class FileAlarmWatcher():
path_watchers = {}
def AddFileDoc(self,doc):
file_path = os.path.dirname(doc.GetFilename())
if file_path not in self.path_watchers:
path_watcher = PathWatcher(file_path)
self.path_watchers[file_path] = path_watcher
else:
path_watcher = self.path_watchers[file_path]
path_watcher.AddFile(doc)
def RemoveFileDoc(self,doc):
file_name_path = doc.GetFilename()
file_path = os.path.dirname(file_name_path)
assert file_path in self.path_watchers
path_watcher = self.path_watchers[file_path]
path_watcher.RemoveFile(doc)
if 0 == path_watcher.GetFileCount():
path_watcher.Stop()
self.path_watchers.pop(file_path)
def RemoveFile(self,file_name_path):
file_path = os.path.dirname(file_name_path)
assert file_path in self.path_watchers
path_watcher = self.path_watchers[file_path]
path_watcher.RemoveFilePath(file_name_path)
if 0 == path_watcher.GetFileCount():
path_watcher.Stop()
self.path_watchers.pop(file_path)
def StopWatchFile(self,doc):
file_name_path = doc.GetFilename()
file_path = os.path.dirname(file_name_path)
if file_path not in self.path_watchers:
return
path_watcher = self.path_watchers[file_path]
path_watcher.Stop()
def StartWatchFile(self,doc):
file_name_path = doc.GetFilename()
file_path = os.path.dirname(file_name_path)
if file_path not in self.path_watchers:
self.AddFileDoc(doc)
else:
path_watcher = self.path_watchers[file_path]
path_watcher.AddFile(doc)
path_watcher.Start()
def IsFileWatched(self,filePath):
dir_path = os.path.dirname(filePath)
if dir_path in self.path_watchers:
path_watcher = self.path_watchers[dir_path]
return path_watcher.IsFileWatched(filePath)
return False
def IsDocFileWatched(self,doc):
return self.IsFileWatched(doc.GetFilename())
class PathWatcher(object):
def __init__(self,path):
self.file_docs = {}
self._path = path
self.event_handler = FileEventHandler(self)
self._is_watched = False
self.Start()
def Stop(self):
try:
#TODO:the watchdog package bug,to fix
self.observer.stop()
self.observer.join(5)
except Exception as e:
utils.get_logger().error('stop file watcher error %s',e)
self._is_watched = False
def Start(self):
if self._is_watched:
self.Stop()
self.observer = Observer()
self.observer.schedule(self.event_handler, path=self._path, recursive=False)
self.observer.start()
self._is_watched = True
def AddFile(self,doc):
file_name_path = doc.GetFilename()
##lower_file_path = file_path.lower()
if file_name_path not in self.file_docs:
self.file_docs[file_name_path] = doc
def RemoveFile(self,doc):
file_name_path = doc.GetFilename()
assert file_name_path in self.file_docs
self.file_docs.pop(file_name_path)
def RemoveFilePath(self,file_name_path):
assert file_name_path in self.file_docs
self.file_docs.pop(file_name_path)
def GetFileCount(self):
return len(self.file_docs)
@property
def Path(self):
return self._path
def FileAlarm(self,file_path,event_alarm_type):
if file_path in self.file_docs:
file_doc = self.file_docs[file_path]
file_doc.GetFirstView().Alarm(event_alarm_type)
def IsFileWatched(self,filePath):
if filePath in self.file_docs:
return True
return False
class FileEventHandler(FileSystemEventHandler):
FILE_DELETED_EVENT = 1
FILE_MOVED_EVENT = 2
FILE_MODIFY_EVENT = 3
def __init__(self,path_watcher):
self._path_watcher = path_watcher
def on_modified(self, event):
self._path_watcher.FileAlarm(event.src_path,self.FILE_MODIFY_EVENT)
def on_moved(self,event):
if os.path.exists(event.src_path):
return
self._path_watcher.FileAlarm(event.src_path,self.FILE_MOVED_EVENT)
def on_deleted(self,event):
self._path_watcher.FileAlarm(event.src_path,self.FILE_DELETED_EVENT) | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/materialdjango/static/materialdjango/components/bower_components/paper-slider/README.md | [](https://travis-ci.org/PolymerElements/paper-slider)
## <paper-slider>
Material design: [Sliders](https://www.google.com/design/spec/components/sliders.html)
`paper-slider` allows user to select a value from a range of values by
moving the slider thumb. The interactive nature of the slider makes it a
great choice for settings that reflect intensity levels, such as volume,
brightness, or color saturation.
<!---
```
<custom-element-demo>
<template>
<script src="../webcomponentsjs/webcomponents-lite.js"></script>
<link rel="import" href="paper-slider.html">
<next-code-block></next-code-block>
</template>
</custom-element-demo>
```
-->
```html
<paper-slider value="50"></paper-slider>
```
| PypiClean |
/CherryMusic-0.41.3.tar.gz/CherryMusic-0.41.3/res/bootstrap3/js/affix.js | +function ($) { "use strict";
// AFFIX CLASS DEFINITION
// ======================
var Affix = function (element, options) {
this.options = $.extend({}, Affix.DEFAULTS, options)
this.$window = $(window)
.on('scroll.bs.affix.data-api', $.proxy(this.checkPosition, this))
.on('click.bs.affix.data-api', $.proxy(this.checkPositionWithEventLoop, this))
this.$element = $(element)
this.affixed =
this.unpin = null
this.checkPosition()
}
Affix.RESET = 'affix affix-top affix-bottom'
Affix.DEFAULTS = {
offset: 0
}
Affix.prototype.checkPositionWithEventLoop = function () {
setTimeout($.proxy(this.checkPosition, this), 1)
}
Affix.prototype.checkPosition = function () {
if (!this.$element.is(':visible')) return
var scrollHeight = $(document).height()
var scrollTop = this.$window.scrollTop()
var position = this.$element.offset()
var offset = this.options.offset
var offsetTop = offset.top
var offsetBottom = offset.bottom
if (typeof offset != 'object') offsetBottom = offsetTop = offset
if (typeof offsetTop == 'function') offsetTop = offset.top()
if (typeof offsetBottom == 'function') offsetBottom = offset.bottom()
var affix = this.unpin != null && (scrollTop + this.unpin <= position.top) ? false :
offsetBottom != null && (position.top + this.$element.height() >= scrollHeight - offsetBottom) ? 'bottom' :
offsetTop != null && (scrollTop <= offsetTop) ? 'top' : false
if (this.affixed === affix) return
if (this.unpin) this.$element.css('top', '')
this.affixed = affix
this.unpin = affix == 'bottom' ? position.top - scrollTop : null
this.$element.removeClass(Affix.RESET).addClass('affix' + (affix ? '-' + affix : ''))
if (affix == 'bottom') {
this.$element.offset({ top: document.body.offsetHeight - offsetBottom - this.$element.height() })
}
}
// AFFIX PLUGIN DEFINITION
// =======================
var old = $.fn.affix
$.fn.affix = function (option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.affix')
var options = typeof option == 'object' && option
if (!data) $this.data('bs.affix', (data = new Affix(this, options)))
if (typeof option == 'string') data[option]()
})
}
$.fn.affix.Constructor = Affix
// AFFIX NO CONFLICT
// =================
$.fn.affix.noConflict = function () {
$.fn.affix = old
return this
}
// AFFIX DATA-API
// ==============
$(window).on('load', function () {
$('[data-spy="affix"]').each(function () {
var $spy = $(this)
var data = $spy.data()
data.offset = data.offset || {}
if (data.offsetBottom) data.offset.bottom = data.offsetBottom
if (data.offsetTop) data.offset.top = data.offsetTop
$spy.affix(data)
})
})
}(window.jQuery); | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_hy.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"\u056f\u0565\u057d\u0585\u0580\u056b\u0581 \u0561\u057c\u0561\u057b",
"\u056f\u0565\u057d\u0585\u0580\u056b\u0581 \u0570\u0565\u057f\u0578"
],
"DAY": [
"\u056f\u056b\u0580\u0561\u056f\u056b",
"\u0565\u0580\u056f\u0578\u0582\u0577\u0561\u0562\u0569\u056b",
"\u0565\u0580\u0565\u0584\u0577\u0561\u0562\u0569\u056b",
"\u0579\u0578\u0580\u0565\u0584\u0577\u0561\u0562\u0569\u056b",
"\u0570\u056b\u0576\u0563\u0577\u0561\u0562\u0569\u056b",
"\u0578\u0582\u0580\u0562\u0561\u0569",
"\u0577\u0561\u0562\u0561\u0569"
],
"MONTH": [
"\u0570\u0578\u0582\u0576\u057e\u0561\u0580\u056b",
"\u0583\u0565\u057f\u0580\u057e\u0561\u0580\u056b",
"\u0574\u0561\u0580\u057f\u056b",
"\u0561\u057a\u0580\u056b\u056c\u056b",
"\u0574\u0561\u0575\u056b\u057d\u056b",
"\u0570\u0578\u0582\u0576\u056b\u057d\u056b",
"\u0570\u0578\u0582\u056c\u056b\u057d\u056b",
"\u0585\u0563\u0578\u057d\u057f\u0578\u057d\u056b",
"\u057d\u0565\u057a\u057f\u0565\u0574\u0562\u0565\u0580\u056b",
"\u0570\u0578\u056f\u057f\u0565\u0574\u0562\u0565\u0580\u056b",
"\u0576\u0578\u0575\u0565\u0574\u0562\u0565\u0580\u056b",
"\u0564\u0565\u056f\u057f\u0565\u0574\u0562\u0565\u0580\u056b"
],
"SHORTDAY": [
"\u056f\u056b\u0580",
"\u0565\u0580\u056f",
"\u0565\u0580\u0584",
"\u0579\u0580\u0584",
"\u0570\u0576\u0563",
"\u0578\u0582\u0580",
"\u0577\u0562\u0569"
],
"SHORTMONTH": [
"\u0570\u0576\u057e",
"\u0583\u057f\u057e",
"\u0574\u0580\u057f",
"\u0561\u057a\u0580",
"\u0574\u0575\u057d",
"\u0570\u0576\u057d",
"\u0570\u056c\u057d",
"\u0585\u0563\u057d",
"\u057d\u0565\u057a",
"\u0570\u0578\u056f",
"\u0576\u0578\u0575",
"\u0564\u0565\u056f"
],
"fullDate": "y\u0569. MMMM d, EEEE",
"longDate": "dd MMMM, y\u0569.",
"medium": "dd MMM, y\u0569. H:mm:ss",
"mediumDate": "dd MMM, y\u0569.",
"mediumTime": "H:mm:ss",
"short": "dd.MM.yy H:mm",
"shortDate": "dd.MM.yy",
"shortTime": "H:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "Dram",
"DECIMAL_SEP": ",",
"GROUP_SEP": ".",
"PATTERNS": [
{
"gSize": 0,
"lgSize": 0,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 0,
"lgSize": 0,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "-",
"negSuf": "\u00a0\u00a4",
"posPre": "",
"posSuf": "\u00a0\u00a4"
}
]
},
"id": "hy",
"pluralCat": function(n, opt_precision) { var i = n | 0; if (i == 0 || i == 1) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/Cydra-0.2.tar.gz/Cydra-0.2/cydra/permission/__init__.py |
from functools import partial
from cydra.component import Interface, Component, implements, ExtensionPoint, FallbackAttributeProxy
import logging
logger = logging.getLogger(__name__)
virtual_owner_permissions = {'admin': True, 'owner': True}
class PermissionProviderAttributeProxy(object):
"""Attribute Proxy object for permission providers"""
def __init__(self):
pass
def __call__(self, interface, components, name):
return partial(getattr(self, name), components)
def get_permissions(self, components, project, user, object):
perms = {}
for provider in components:
if hasattr(provider, 'get_permissions'):
perms.update(provider.get_permissions(project, user, object))
return perms
def get_group_permissions(self, components, project, group, object):
perms = {}
for provider in components:
if hasattr(provider, 'get_group_permissions'):
perms.update(provider.get_group_permissions(project, group, object))
return perms
def get_permission(self, components, project, user, object, permission):
value = None
for provider in components:
if hasattr(provider, 'get_permission'):
value = provider.get_permission(project, user, object, permission)
if value is not None:
return value
def get_group_permission(self, components, project, group, object, permission):
value = None
for provider in components:
if hasattr(provider, 'get_group_permission'):
value = provider.get_group_permission(project, group, object, permission)
if value is not None:
return value
def set_permission(self, components, project, user, object, permission, value=None):
for provider in components:
if hasattr(provider, 'set_permission') and provider.set_permission(project, user, object, permission, value):
return True
def set_group_permission(self, components, project, group, object, permission, value=None):
for provider in components:
if hasattr(provider, 'set_group_permission') and provider.set_group_permission(project, group, object, permission, value):
return True
def get_projects_user_has_permissions_on(self, components, user):
projects = set()
for provider in components:
if hasattr(provider, 'get_projects_user_has_permissions_on'):
projects.update(provider.get_projects_user_has_permissions_on(user))
return projects
class IPermissionProvider(Interface):
"""Used to lookup permissions for a user
Permissions are given for a userid,object tuple. A permission provider
can either return True (user has the specified permission on the specified object),
False (user does not have the permission) or None (provider has no authority)
"""
_iface_attribute_proxy = PermissionProviderAttributeProxy()
def get_permissions(self, project, user, object):
"""Retrieve all permissions a user has on a certain object
:param project: project instance. If None is supplied, global permissions are checked
:param user: User object. user of '*' means any user/guest access. None will enumerate all users
:param object: object (dotted, hierarchical string) or None to enumerate all objects
:return: dict of permission: value entries or a dict of object: {permission: value} entries if object is None"""
pass
def get_group_permissions(self, project, group, object):
"""Retrieve all permissions a group has on a certain object
:param project: project instance. If None is supplied, global permissions are checked
:param group: Group object
:param object: object (dotted, hierarchical string) or None to enumerate all objects
:return: dict of permission: value entries or a dict of object: {permission: value} entries if object is None"""
pass
def get_permission(self, project, user, object, permission):
"""Test if the user has this permission
:param project: project instance. If None is supplied, global permissions are checked
:param user: userid. user of '*' means any user/guest access
:param object: object (dotted, hierarchical string)
:param permission: permission (eg: read, write, view, edit, ...)
:return: True if the user has this permission, False if not and None if undefined in this provider"""
pass
def get_group_permission(self, project, group, object, permission):
"""Test if the group has this permission
:param project: project instance. If None is supplied, global permissions are checked
:param group: Group object
:param object: object (dotted, hierarchical string)
:param permission: permission (eg: read, write, view, edit, ...)
:return: True if the user has this permission, False if not and None if undefined in this provider"""
pass
def set_permission(self, project, user, object, permission, value=None):
"""Set or unset the permission
:param project: project instance. If None is supplied, global permissions are set
:param user: userid. user of '*' means any user/guest access
:param object: object (dotted, hierarchical string)
:param permission: permission (eg: read, write, view, edit, ...)
:return: True if successfully set, None if this provider is not authoritative for this tuple"""
pass
def set_group_permission(self, project, group, object, permission, value=None):
"""Set or unset the permission
:param project: project instance. If None is supplied, global permissions are set
:param group: Group object
:param object: object (dotted, hierarchical string)
:param permission: permission (eg: read, write, view, edit, ...)
:return: True if successfully set, None if this provider is not authoritative for this tuple"""
pass
def get_projects_user_has_permissions_on(self, userid):
"""Get all projects a user has permissions on
:param userid: User to test for
:return: List of projects
"""
pass
class Subject(object):
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.id)
def __unicode__(self):
return u'<%s: %s>' % (self.__class__.__name__, self.id)
def __hash__(self):
return hash(self.id)
def __eq__(self, other):
return self.id == other.id
class Group(Subject):
"""Represents a group"""
groupid = None
name = None
def __init__(self, component_manager, groupid, **kwargs):
self.compmgr = component_manager
self.groupid = groupid
for key, value in kwargs.items():
if not hasattr(self, key) or getattr(self, key) is None: #do not overwrite internals
setattr(self, key, value)
@property
def id(self):
return self.groupid
class User(Subject):
"""Represents a user
Note that a user with userid '*' is considered an anonymous, unauthenticatable
guest user. The username and full_name should be set to 'Guest' in this case."""
userid = None
username = None
full_name = None
groups = []
def __init__(self, component_manager, userid, **kwargs):
self.compmgr = component_manager
self.userid = userid
for key, value in kwargs.items():
if not hasattr(self, key) or getattr(self, key) is None or getattr(self, key) == []: #do not overwrite internals
setattr(self, key, value)
@property
def is_guest(self):
return self.userid == '*'
@property
def id(self):
return self.userid
class IUserTranslator(Interface):
"""Translates various aspects of users"""
_iface_attribute_proxy = FallbackAttributeProxy()
def username_to_user(self, username):
"""Given a username (what a user can use to log in) find the user
:returns: a User object on success, None on failure"""
pass
def userid_to_user(self, userid):
"""Given a userid find the user
A translator should construct a guest user if the userid is '*'."""
pass
def groupid_to_group(self, userid):
"""Given a groupid find the group
:returns: a Group object on success, None on failure"""
pass
class IUserAuthenticator(Interface):
"""Authenticate users"""
_iface_attribute_proxy = FallbackAttributeProxy()
def user_password(self, user, password):
"""Authenticate users by user and password"""
pass
def object_walker(obj):
parts = obj.split('.')
numparts = len(parts)
for i, part in enumerate(parts):
yield '.'.join(parts[0:numparts - i])
if obj != '*':
yield '*'
class StaticGlobalPermissionProvider(Component):
"""Global permissions defined in config"""
implements(IPermissionProvider)
def get_permissions(self, project, user, object):
if project is not None:
return {}
if user is not None and not user.is_guest and object == 'projects':
return {'create': True}
def get_group_permissions(self, project, group, object):
return {}
def get_permission(self, project, user, object, permission):
if project is not None:
return None
if user is not None and not user.is_guest and object == 'projects' and permission == 'create':
return True
def get_group_permission(self, project, group, object, permission):
return None
def set_permission(self, project, user, object, permission, value=None):
return None
def set_group_permission(self, project, group, object, permission, value=None):
return None
def get_projects_user_has_permissions_on(self, userid):
return []
class InternalPermissionProvider(Component):
"""Stores permissions in the project's dict
Example::
'permissions': {
'*': {'object': ['read']},
'user': {'*': ['admin']}
}
"""
implements(IPermissionProvider)
MODE_GROUP, MODE_USER = range(2)
PERMISSION_ROOT = {MODE_GROUP: 'group_permissions', MODE_USER: 'permissions'}
def get_permissions(self, project, user, obj):
return self._get_permissions(self.MODE_USER, project, user, obj)
def get_group_permissions(self, project, group, obj):
return self._get_permissions(self.MODE_GROUP, project, group, obj)
def _get_permissions(self, mode, project, subject, obj):
if project is None:
return {} # no project, no permissions
# Resolve root of permissions and translator
# depending on what we try to find
permroot = self.PERMISSION_ROOT[mode]
if mode == self.MODE_USER:
translator = self.compmgr.get_user
elif mode == self.MODE_GROUP:
translator = self.compmgr.get_group
else:
raise ValueError('Unknown mode')
res = {}
perms = project.data.get(permroot, {})
# if both subject and obj are None, return all (subject, obj, perm)
# copy whole structure to prevent side effects
if subject is None and obj is None:
for s, objs in perms.items():
s = translator(s)
res[s] = {}
for o, perm in objs:
res[s][o] = perm.copy()
# Inject global owner permissions if necessary
if mode == self.MODE_USER:
res.setdefault(project.owner, {}).setdefault('*', {}).update(virtual_owner_permissions)
return res
# construct a list of objects in the hierarchy
if obj is not None:
objparts = list(object_walker(obj))
objparts.reverse()
# if subject is none, find all subjects and return all (subject, perm)
# we know here that obj is not none as we handled subject none and obj none
# case above
if subject is None:
for s, p in perms.items():
s = translator(s)
res[s] = {}
for o in objparts:
if o in p:
res[s].update(p[o].copy())
# delete empty entries
if res[s] == {}:
del res[s]
# Inject global owner permissions if necessary
if mode == self.MODE_USER:
res.setdefault(project.owner, {}).update(virtual_owner_permissions)
return res
# subject is given.
# in case of user mode, we also check the guest account
subjects = [subject.id]
if mode == self.MODE_USER:
subjects.append('*')
for p in [perms[x] for x in subjects if x in perms]:
if obj is not None:
for o in objparts:
if o in p:
res.update(p[o].copy())
else:
for o in p:
res[o] = p[o].copy()
# this is the owner, Inject global owner perms
if mode == self.MODE_USER and project.owner == subject:
if obj is None:
res.setdefault('*', {}).update(virtual_owner_permissions)
else:
res.update(virtual_owner_permissions)
# also inject all group permissions
if mode == self.MODE_USER:
for group in [x for x in subject.groups if x is not None]: # safeguard against failing translators
res.update(self.get_group_permissions(project, group, obj))
return res
def get_permission(self, project, user, obj, permission):
return self._get_permission(self.MODE_USER, project, user, obj, permission)
def get_group_permission(self, project, group, obj, permission):
return self._get_permission(self.MODE_GROUP, project, group, obj, permission)
def _get_permission(self, mode, project, subject, obj, permission):
if project is None:
return None
if subject is None:
return None
if obj is None:
return None
# Resolve root of permissions and translator
# depending on what we try to find
permroot = self.PERMISSION_ROOT[mode]
if mode == self.MODE_USER:
translator = self.compmgr.get_user
elif mode == self.MODE_GROUP:
translator = self.compmgr.get_group
else:
raise ValueError('Unknown mode')
# the owner can do everything
if mode == self.MODE_USER and project.owner == subject:
return True
perms = project.data.get(permroot, {})
# What we want to find here is a specific permission on a specific
# object. First get the most precise. If we have a conflict, return the most positive one
ret = None
# If we are in user mode, check groups first
if mode == self.MODE_USER:
for group in subject.groups:
ret = self._merge_perm_values(ret, self.get_group_permission(project, group, obj, permission))
# root level -> find subject in perms
if subject.id in perms:
perms = perms[subject.id]
elif mode == self.MODE_USER and '*' in perms: # if we are in user mode, fall back to guest
perms = perms['*']
else:
return ret
# subject level. Now walk the tree. deeper value overwrites lower
subjret = None
for o in object_walker(obj):
if o in perms:
# object level
perm = perms[o].get(permission, None)
if perm is None:
perm = perms[o].get('admin', None)
if perm is not None:
subjret = perm
# now merge subjret with the previous value
return self._merge_perm_values(ret, subjret)
def _merge_perm_values(self, a, b):
if a == False or b == False:
return False
elif a == True or b == True:
return True
else:
return None
def set_permission(self, project, user, obj, permission, value=None):
return self._set_permission(self.MODE_USER, project, user, obj, permission, value)
def set_group_permission(self, project, group, obj, permission, value=None):
return self._set_permission(self.MODE_GROUP, project, group, obj, permission, value)
def _set_permission(self, mode, project, subject, obj, permission, value=None):
if project is None:
return None
if subject is None:
return None
if obj is None:
return None
# Resolve root of permissions depending on what we try to find
permroot = self.PERMISSION_ROOT[mode]
if value is None:
# check if the permission is set, otherwise do nothing
if permission in project.data.get(permroot, {}).get(subject.id, {}).get(obj, {}):
# remove permission
del project.data[permroot][subject.id][obj][permission]
if project.data[permroot][subject.id][obj] == {}:
del project.data[permroot][subject.id][obj]
if project.data[permroot][subject.id] == {}:
del project.data[permroot][subject.id]
else:
project.data.setdefault(permroot, {}).setdefault(subject.id, {}).setdefault(obj, {})[permission] = value
project.save()
return True
def get_projects_user_has_permissions_on(self, user):
res = set([project for project in self.compmgr.get_projects_where_key_exists(['permissions', user.userid]) if any(project.data.get('permissions', {}).get(user.userid, {}).values())])
for group in user.groups:
res.update(set([project for project in self.compmgr.get_projects_where_key_exists(['group_permissions', group.id]) if any(project.data.get('group_permissions', {}).get(group.id, {}).values())]))
res.update(self.compmgr.get_projects_owned_by(user))
return res
class NopTranslator(Component):
"""Dummy user translator"""
def username_to_user(self, username):
return User(self.compmgr, username)
def userid_to_user(self, userid):
return User(self.compmgr, userid) | PypiClean |
/Flask-Hashing-1.1.tar.gz/Flask-Hashing-1.1/flask_hashing.py | import hashlib
try:
from hashlib import algorithms as algs
except ImportError:
from hashlib import algorithms_available as algs
from sys import version_info
VER = version_info[0]
class Hashing(object):
'''An extension that provides easy hashing and comparing of hashes to a
Flask application. This extension uses the standard library ``hashlib``
to allow access to any available hash functions on the system via OpenSSL,
depending on your version of Python in use.
The ``hashlib`` module guarantees access to ``md5``, ``sha1``, ``sha224``,
``sha256``, ``sha384``, and ``sha512``.
To begin using this extension you must first wrap the application.::
from flask import Flask
from flask.ext.hashing import Hashing
app = Flask(__name__)
hashing = Hashing(app)
If you prefer to use the factory pattern you can also use :class: as follows:::
from flask import Flask
from flask.ext.hashing import Hashing
hashing = Hashing()
# do some stuff
app = create_app()
hashing.init_app(app)
If you would like to customize your instance of :class:, you may specify values
for HASHING_METHOD and HASHING_ROUNDS in the Flask application configuration.
HASHING_METHOD defaults to ``sha256`` and HASHING_ROUNDS defaults to 1. If you
are using anything less than Python 2.7.9 you will only have the guaranteed
functions provided by ``hashlib``. Python 2.7.9 or higher allows access to OpenSSL
hash functions. The name you supply to HASHING_METHOD must be valid to ``hashlib``.
To get a list of valid names, supply a random string to HASHING_METHOD and check
the output when initializing your application (it raises and exception), or check
``hashlib.algorithms`` for Python 2.7.8 or less, or ``hashlib.algorithms_available``
if using Python 2.7.9+.
'''
algorithm = 'sha256'
rounds = 1
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
'''Initializes the Flask application with this extension. It grabs
the necessary configuration values from ``app.config``, those being
HASHING_METHOD and HASHING_ROUNDS. HASHING_METHOD defaults to ``sha256``
but can be any one of ``hashlib.algorithms``. HASHING_ROUNDS specifies
the number of times to hash the input with the specified algorithm.
This defaults to 1.
:param app: Flask application object
'''
self.algorithm = app.config.get('HASHING_METHOD', 'sha256')
if self.algorithm not in algs:
raise ValueError('{} not one of {}'.format(self.algorithm, algs))
self.rounds = app.config.get('HASHING_ROUNDS', 1)
if not isinstance(self.rounds, int):
raise TypeError('HASHING_ROUNDS must be type int')
def hash_value(self, value, salt=''):
'''Hashes the specified value combined with the specified salt.
The hash is done HASHING_ROUNDS times as specified by the application
configuration.
An example usage of :class:``hash_value`` would be::
val_hash = hashing.hash_value('mysecretdata', salt='abcd')
# save to a db or check against known hash
:param value: The value we want hashed
:param salt: The salt to use when generating the hash of ``value``. Default is ''.
:return: The resulting hash as a string
:rtype: str
'''
def hashit(value, salt):
h = hashlib.new(self.algorithm)
tgt = salt+value
h.update(tgt)
return h.hexdigest()
def fix_unicode(value):
if VER < 3 and isinstance(value, unicode):
value = str(value)
elif VER >= 3 and isinstance(value, str):
value = str.encode(value)
return value
salt = fix_unicode(salt)
for i in range(self.rounds):
value = fix_unicode(value)
value = hashit(value, salt)
return value
def check_value(self, value_hash, value, salt=''):
'''Checks the specified hash value against the hash of the provided
salt and value.
An example usage of :class:`check_value` would be::
val_hash = hashing.hash_value('mysecretdata', salt='abcd')
if hashing.check_value(val_hash, 'mysecretdata', salt='abcd'):
# do something special
:param value_hash: The hash value to check against
:param value: The value we want hashed to compare
:param salt: The salt to use when generating the hash of ``value``. Default is ''.
:return: True if equal, False otherwise
:rtype: bool
'''
h = self.hash_value(value, salt=salt)
return h == value_hash | PypiClean |
/ElectronCounting-1.0.0.tar.gz/ElectronCounting-1.0.0/CountingNN/archive/box_ops.py | import math
import torch
import torchvision
class BoxCoder:
"""
Transforming between raw format (xmin, ymin, xmax, ymax) and regression format
"""
def __init__(self, weights, bbox_xform_clip=math.log(1000. / 16)):
self.weights = weights
self.bbox_xform_clip = bbox_xform_clip
def encode(self, reference_box, proposal):
"""
Encode a set of proposals with respect to some
reference boxes (gt)
Arguments:
reference_boxes (Tensor[N, 4]): reference boxes
proposals (Tensor[N, 4]): boxes to be encoded
"""
width = proposal[:, 2] - proposal[:, 0]
height = proposal[:, 3] - proposal[:, 1]
ctr_x = proposal[:, 0] + 0.5 * width
ctr_y = proposal[:, 1] + 0.5 * height
gt_width = reference_box[:, 2] - reference_box[:, 0]
gt_height = reference_box[:, 3] - reference_box[:, 1]
gt_ctr_x = reference_box[:, 0] + 0.5 * gt_width
gt_ctr_y = reference_box[:, 1] + 0.5 * gt_height
dx = self.weights[0] * (gt_ctr_x - ctr_x) / width
dy = self.weights[1] * (gt_ctr_y - ctr_y) / height
dw = self.weights[2] * torch.log(gt_width / width)
dh = self.weights[3] * torch.log(gt_height / height)
delta = torch.stack((dx, dy, dw, dh), dim=1)
return delta
def decode(self, delta, box):
"""
From a set of original boxes and encoded relative box offsets,
get the decoded boxes.
Arguments:
delta (Tensor[N, 4]): encoded boxes.
boxes (Tensor[N, 4]): reference boxes.
"""
dx = delta[:, 0] / self.weights[0]
dy = delta[:, 1] / self.weights[1]
dw = delta[:, 2] / self.weights[2]
dh = delta[:, 3] / self.weights[3]
dw = torch.clamp(dw, max=self.bbox_xform_clip)
dh = torch.clamp(dh, max=self.bbox_xform_clip)
width = box[:, 2] - box[:, 0]
height = box[:, 3] - box[:, 1]
ctr_x = box[:, 0] + 0.5 * width
ctr_y = box[:, 1] + 0.5 * height
pred_ctr_x = dx * width + ctr_x
pred_ctr_y = dy * height + ctr_y
pred_w = torch.exp(dw) * width
pred_h = torch.exp(dh) * height
xmin = pred_ctr_x - 0.5 * pred_w
ymin = pred_ctr_y - 0.5 * pred_h
xmax = pred_ctr_x + 0.5 * pred_w
ymax = pred_ctr_y + 0.5 * pred_h
target = torch.stack((xmin, ymin, xmax, ymax), dim=1)
return target
@torch.jit.script
def box_iou(box_a, box_b, forcecpu=True):
"""
use torch.jit to save GPU memory
applied chunking
Arguments:
boxe_a (Tensor[N, 4])
boxe_b (Tensor[M, 4])
forcecpu: bool, calculate iou on cpu and return iou matrix on cpu
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise
IoU values for every element in box_a and box_b
"""
# print("Will compute boxes: ", box_a.size(dim=0),box_b.size(dim=0))
ori_device = box_a.device
if forcecpu:
box_a = box_a.cpu()
box_b = box_b.cpu()
#box_a = box_a.type(torch.int16)
#box_b = box_a.type(torch.int16)
lt = torch.max(box_a[:, None, :2], box_b[:, :2])
rb = torch.min(box_a[:, None, 2:], box_b[:, 2:])
# wh = (rb - lt).clamp(min=0, max=math.inf)
# inter = wh[:, :, 0] * wh[:, :, 1]
# area_a = torch.prod(box_a[:, 2:] - box_a[:, :2], 1)
area_b = torch.prod(box_b[:, 2:] - box_b[:, :2], 1)
#
# return inter / (area_a[:, None] + area_b - inter)
N = int(len(box_a))
M = int(len(box_b))
iou = torch.zeros([N, M]).to(box_a.device)
for i in range(0, N, 20):
area_a = torch.prod(box_a[i:min(i+20, N), 2:] - box_a[i:min(i+20, N), :2], 1)
wh = (rb[i:min(i+20, N), :] - lt[i:min(i+20, N), :]).clamp(min=0) # [<=20,M,2] # right bottom and left top
inter = wh[:, :, 0] * wh[:, :, 1] # [<=20,M]
# handle empty boxes
iou[i:min(i+20, N), :] = torch.where(
inter > 0,
inter / (area_a[:, None] + area_b - inter),
torch.zeros(1, dtype=inter.dtype, device=inter.device),
)
# if forcecpu:
# iou = iou.to(ori_device)
return iou
def process_box(box, score, image_shape, min_size):
"""
Clip boxes in the image size and remove boxes which are too small.
"""
box[:, [0, 2]] = box[:, [0, 2]].clamp(0, image_shape[1])
box[:, [1, 3]] = box[:, [1, 3]].clamp(0, image_shape[0])
w, h = box[:, 2] - box[:, 0], box[:, 3] - box[:, 1]
keep = torch.where((w >= min_size) & (h >= min_size))[0]
box, score = box[keep], score[keep]
return box, score
def nms(box, score, threshold):
"""
Arguments:
box (Tensor[N, 4])
score (Tensor[N]): scores of the boxes.
threshold (float): iou threshold.
Returns:
keep (Tensor): indices of boxes filtered by NMS.
"""
return torchvision.ops.nms(box, score, threshold) | PypiClean |
/Get-API-Blaze-1.0.tar.gz/Get-API-Blaze-1.0/Get_API_blaze/results_gather.py | import requests
import pendulum
url_double_recent = 'https://blaze.com/api/roulette_games/recent'
url_double_current = 'https://blaze.com/api/roulette_games/current'
url_double_history = 'https://blaze.com/api/roulette_games/history'
url_crash_recent = 'https://blaze.com/api/crash_games/recent'
url_crash_current = 'https://blaze.com/api/crash_games/current'
url_crash_history = 'https://blaze.com/api/crash_games/history'
def Last_Results_Double(
*args,
cor=False,
numero=False,
horario=False,
id=False,
):
br_tz = pendulum.now().timezone_name
results = {
'cores': [],
'numeros': [],
'ids': [],
'horarios': [],
}
requisicao20 = requests.get(url_double_history).json()['records']
for i,it in reversed(list(enumerate(requisicao20))):
if 'id' in requisicao20[i]:
results['ids'].append(requisicao20[i]['id'])
if 'color' in requisicao20[i]:
results['cores'].append(requisicao20[i]['color'])
if 'roll' in requisicao20[i]:
results['numeros'].append(requisicao20[i]['roll'])
if 'created_at' in requisicao20[i]:
convert = pendulum.parse(requisicao20[i]['created_at']).in_timezone(br_tz)
results['horarios'].append(convert)
if cor:
if args:
if len(args) == 2:
return results['cores'][args[0]:args[1]]
elif len(args) < 2:
return results['cores'][args[0]]
else:
return results['cores']
elif numero:
if args:
if len(args) == 2:
return results['numeros'][args[0]:args[1]]
elif len(args) < 2:
return results['numeros'][args[0]]
else:
return results['numeros']
elif horario:
if args:
if len(args) == 2:
return results['horarios'][args[0]:args[1]]
elif len(args) < 2:
return results['horarios'][args[0]]
else:
return results['horarios']
elif id:
if args:
if len(args) == 2:
return results['ids'][args[0]:args[1]]
elif len(args) < 2:
return results['ids'][args[0]]
else:
return results['ids']
else:
return results
def Status_double():
return requests.get(url_double_current).json()["status"]
def Last_Results_crash(
*args,
crash_point=False,
horario=False,
id=False,
):
br_tz = pendulum.now().timezone_name
results = {
'crash_point': [],
'ids': [],
'horarios': [],
}
requisicao20 = requests.get(url_crash_history).json()['records']
for i,it in reversed(list(enumerate(requisicao20))):
if 'id' in requisicao20[i]:
results['ids'].append(requisicao20[i]['id'])
if 'crash_point' in requisicao20[i]:
results['crash_point'].append(float(requisicao20[i]['crash_point']))
if 'created_at' in requisicao20[i]:
convert = pendulum.parse(requisicao20[i]['created_at']).in_timezone(br_tz)
results['horarios'].append(convert)
if crash_point:
if len(args) > 0:
if len(args) == 2:
return results['crash_point'][args[0]:args[1]]
elif len(args) < 2:
return results['crash_point'][args[0]]
else:
return results['crash_point']
elif horario:
if len(args) > 0:
if len(args) == 2:
return results['horarios'][args[0]:args[1]]
elif len(args) < 2:
return results['horarios'][args[0]]
else:
return results['horarios']
elif id:
if len(args) > 0:
if len(args) == 2:
return results['ids'][args[0]:args[1]]
elif len(args) < 2:
return results['ids'][args[0]]
else:
return results['ids']
else:
return results
def Status_crash():
return requests.get(url_crash_current).json()["status"] | PypiClean |
/BoxKit-2023.6.7.tar.gz/BoxKit-2023.6.7/boxkit/resources/read/_sample.py |
import h5pickle as h5py
def read_test_sample(
filename, server, nthreads, batch, monitor, backend
): # pylint: disable=too-many-locals disable=too-many-arguments disable=unused-argument
"""
Read dataset from BoxKit test sample
Parameters
----------
filename : string containing file name
server : server dictionary
Returns
-------
data_attributes : dictionary containing data attributes
block_attributes : dictionary containg block attributes
"""
if server:
raise NotImplementedError("[boxkit.read.test_sample] Cannot read from server")
# Read the hdf5 file
inputfile = h5py.File(filename, "r", skip_cache=True)
# Extract data
nblocks = inputfile["numbox"][0] * inputfile["numbox"][1] * inputfile["numbox"][2]
nxb = inputfile["sizebox"][0]
nyb = inputfile["sizebox"][1]
nzb = inputfile["sizebox"][2]
xmin = inputfile["boundbox/min"][:, 0]
ymin = inputfile["boundbox/min"][:, 1]
zmin = inputfile["boundbox/min"][:, 2]
xmax = inputfile["boundbox/max"][:, 0]
ymax = inputfile["boundbox/max"][:, 1]
zmax = inputfile["boundbox/max"][:, 2]
dx = inputfile["deltas"][0] # pylint: disable=invalid-name
dy = inputfile["deltas"][1] # pylint: disable=invalid-name
dz = inputfile["deltas"][2] # pylint: disable=invalid-name
variables = {}
variables.update(inputfile["quantities"])
# Create data attributes
data_attributes = {
"nblocks": int(nblocks),
"nxb": int(nxb),
"nyb": int(nyb),
"nzb": int(nzb),
"inputfile": inputfile,
"variables": variables,
}
# Create block attributes
block_attributes = [
{
"dx": dx,
"dy": dy,
"dz": dz,
"xmin": xmin[lblock],
"ymin": ymin[lblock],
"zmin": zmin[lblock],
"xmax": xmax[lblock],
"ymax": ymax[lblock],
"zmax": zmax[lblock],
"tag": lblock,
}
for lblock in range(nblocks)
]
return data_attributes, block_attributes | PypiClean |
/django-chuck-0.2.3.tar.gz/django-chuck/build/lib/django_chuck/commands/setup_project.py | import os
import sys
import shutil
from django_chuck.commands.base import BaseCommand
from django_chuck.commands import checkout_source, create_virtualenv, install_virtualenv, sync_database, migrate_database
class Command(BaseCommand):
help = "Checkout and setup an existing project"
module_cache = {}
def __init__(self):
self.opts = [
("checkout_url", {
"help": "repository url",
})
]
self.opts.append((
"-cd", {
"help": "destination directory",
"dest": "checkout_destdir",
"default": "",
"nargs": "?"
}),
)
self.opts.append((
"-b", {
"help": "branch to checkout / clone",
"dest": "branch",
"default": "",
"nargs": "?"
}),
)
self.no_default_checks = True
def handle(self,args, cfg):
super(Command, self).handle(args, cfg)
checkout_cmd = checkout_source.Command()
checkout_cmd.handle(args, cfg)
cfg["checkout_destdir"] = checkout_cmd.checkout_destdir
if not os.path.exists(self.checkout_destdir):
print "Checkout failed! :("
sys.exit(0)
chuck_setup_file = os.path.join(cfg["checkout_destdir"], "chuck_setup.py")
chuck_setup_required_params = [
"project_prefix",
"project_name",
"django_settings",
]
# Import chuck_setup file
if os.access(chuck_setup_file, os.R_OK):
chuck_setup_path = os.path.dirname(chuck_setup_file)
sys.path.insert(0, chuck_setup_path)
import chuck_setup
for param in chuck_setup_required_params:
if not hasattr(chuck_setup, param):
print "Parameter " + param + " is missing in chuck_setup.py!"
sys.exit(1)
self.cfg["project_prefix"] = chuck_setup.project_prefix
self.cfg["project_name"] = chuck_setup.project_name
self.cfg["django_settings"] = chuck_setup.django_settings
if hasattr(chuck_setup, "modules"):
self.cfg["modules"] = chuck_setup.modules
self.inject_variables_and_functions(chuck_setup)
# No chuck_setup file was found
else:
print "\n>>> Cannot find chuck_setup file " + chuck_setup_file
answer = raw_input("Do you want to continue anyway? (Y/n): ")
if answer.lower() == "n":
if os.path.exists(cfg["checkout_destdir"]):
shutil.rmtree(cfg["checkout_destdir"])
sys.exit(1)
else:
if not cfg.get("project_prefix"):
cfg["project_prefix"] = raw_input("Site: ")
if not cfg.get("project_name"):
cfg["project_name"] = raw_input("Project: ")
if not cfg.get("django_settings"):
default_settings = cfg["project_name"] + ".settings.sites.default.dev.developer_example"
cfg["django_settings"] = raw_input("Django settings(" + default_settings + "): ")
if not cfg["django_settings"]:
cfg["django_settings"] = default_settings
chuck_setup = None
# Check if project already exists
if os.path.exists(self.site_dir) or os.path.exists(self.virtualenv_dir):
print "Project already exists!"
answer = raw_input("Remove it? (y/N): ")
if answer.lower() == "y" or answer.lower() == "j":
if os.path.exists(self.virtualenv_dir):
shutil.rmtree(self.virtualenv_dir)
if os.path.exists(self.site_dir):
shutil.rmtree(self.site_dir)
else:
print "Please remove or rename the project and virtualenv before rerun."
print "\nVirtualenv: " + self.virtualenv_dir
print "Project: " + self.site_dir
sys.exit(1)
# Otherwise move source checkout
shutil.move(self.checkout_destdir, self.site_dir)
if chuck_setup and getattr(chuck_setup, "post_git_clone"):
chuck_setup.post_git_clone()
# Build Virtualenv
if chuck_setup and getattr(chuck_setup, "pre_build_virtualenv"):
chuck_setup.pre_build_virtualenv()
create_virtualenv.Command().handle(args, cfg)
install_virtualenv.Command().handle(args, cfg)
if chuck_setup and getattr(chuck_setup, "post_build_virtualenv"):
chuck_setup.post_build_virtualenv()
if self.cfg.get("modules"):
self.print_header("EXECUTE POST-SETUP METHODS IF AVAILABLE")
# Execute post-setup methods if available
module_cache = self.get_module_cache()
modules_to_check = self.cfg["modules"].split(',')
modules_to_check = self.clean_module_list(modules_to_check, module_cache)
for module_name in modules_to_check:
module = module_cache.get(module_name)
if module.cfg:
self.inject_variables_and_functions(module.cfg)
if module.post_setup:
module.post_setup()
# Create database
os.chdir(self.site_dir)
if not os.path.exists(os.path.join(self.site_dir, "db")):
os.makedirs(os.path.join(self.site_dir, "db"))
if chuck_setup and getattr(chuck_setup, "pre_sync_db"):
chuck_setup.pre_sync_db()
if chuck_setup and getattr(chuck_setup, "extra_syncdb_options") and chuck_setup.extra_syncdb_options:
cfg["extra_syncdb_options"] = chuck_setup.extra_syncdb_options
if chuck_setup and getattr(chuck_setup, "extra_migrate_options") and chuck_setup.extra_migrate_options:
cfg["extra_migrate_options"] = chuck_setup.extra_migrate_options
sync_database.Command().handle(args, cfg)
if chuck_setup and getattr(chuck_setup, "post_sync_db"):
chuck_setup.post_sync_db()
if chuck_setup and getattr(chuck_setup, "pre_migrate_db"):
chuck_setup.pre_migrate_db()
migrate_database.Command().handle(args, cfg)
if chuck_setup and getattr(chuck_setup, "post_migrate_db"):
chuck_setup.post_migrate_db()
self.print_header("SUMMARY")
print "\nCloned project " + self.site_dir + " from " + self.checkout_url
if self.use_virtualenvwrapper:
print "\nworkon " + self.site_name
else:
print "\nsource " + os.path.join(self.virtualenv_dir, "bin", "activate")
print "cd " + self.site_dir
print "django-admin.py runserver" | PypiClean |
/Dowell%20Mail-1.0.2.tar.gz/Dowell Mail-1.0.2/docs/_build/html/_static/javascripts/modernizr.js | !function(e,t){for(var n in t)e[n]=t[n]}(window,function(n){var r={};function o(e){if(r[e])return r[e].exports;var t=r[e]={i:e,l:!1,exports:{}};return n[e].call(t.exports,t,t.exports,o),t.l=!0,t.exports}return o.m=n,o.c=r,o.d=function(e,t,n){o.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:n})},o.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},o.t=function(t,e){if(1&e&&(t=o(t)),8&e)return t;if(4&e&&"object"==typeof t&&t&&t.__esModule)return t;var n=Object.create(null);if(o.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:t}),2&e&&"string"!=typeof t)for(var r in t)o.d(n,r,function(e){return t[e]}.bind(null,r));return n},o.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return o.d(t,"a",t),t},o.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},o.p="",o(o.s=11)}({11:function(e,t,n){"use strict";n.r(t);n(12)},12:function(e,t){var n;(function(i,d,p){function y(e,t){return typeof e===t}function s(e){var t=S.className,n=c._config.classPrefix||"";if(b&&(t=t.baseVal),c._config.enableJSClass){var r=new RegExp("(^|\\s)"+n+"no-js(\\s|$)");t=t.replace(r,"$1"+n+"js$2")}c._config.enableClasses&&(0<e.length&&(t+=" "+n+e.join(" "+n)),b?S.className.baseVal=t:S.className=t)}function a(e,t){if("object"==typeof e)for(var n in e)v(e,n)&&a(n,e[n]);else{var r=(e=e.toLowerCase()).split("."),o=c[r[0]];if(2===r.length&&(o=o[r[1]]),void 0!==o)return c;t="function"==typeof t?t():t,1===r.length?c[r[0]]=t:(!c[r[0]]||c[r[0]]instanceof Boolean||(c[r[0]]=new Boolean(c[r[0]])),c[r[0]][r[1]]=t),s([(t&&!1!==t?"":"no-")+r.join("-")]),c._trigger(e,t)}return c}function m(){return"function"!=typeof d.createElement?d.createElement(arguments[0]):b?d.createElementNS.call(d,"http://www.w3.org/2000/svg",arguments[0]):d.createElement.apply(d,arguments)}function o(e,t,n,r){var o,i,s,a,l,u="modernizr",f=m("div"),c=((l=d.body)||((l=m(b?"svg":"body")).fake=!0),l);if(parseInt(n,10))for(;n--;)(s=m("div")).id=r?r[n]:u+(n+1),f.appendChild(s);return(o=m("style")).type="text/css",o.id="s"+u,(c.fake?c:f).appendChild(o),c.appendChild(f),o.styleSheet?o.styleSheet.cssText=e:o.appendChild(d.createTextNode(e)),f.id=u,c.fake&&(c.style.background="",c.style.overflow="hidden",a=S.style.overflow,S.style.overflow="hidden",S.appendChild(c)),i=t(f,e),c.fake?(c.parentNode.removeChild(c),S.style.overflow=a,S.offsetHeight):f.parentNode.removeChild(f),!!i}function l(e){return e.replace(/([A-Z])/g,function(e,t){return"-"+t.toLowerCase()}).replace(/^ms-/,"-ms-")}function h(e,t){var n=e.length;if("CSS"in i&&"supports"in i.CSS){for(;n--;)if(i.CSS.supports(l(e[n]),t))return!0;return!1}if("CSSSupportsRule"in i){for(var r=[];n--;)r.push("("+l(e[n])+":"+t+")");return o("@supports ("+(r=r.join(" or "))+") { #modernizr { position: absolute; } }",function(e){return"absolute"===function(e,t,n){var r;if("getComputedStyle"in i){r=getComputedStyle.call(i,e,t);var o=i.console;null!==r?n&&(r=r.getPropertyValue(n)):o&&o[o.error?"error":"log"].call(o,"getComputedStyle returning null, its possible modernizr test results are inaccurate")}else r=!t&&e.currentStyle&&e.currentStyle[n];return r}(e,null,"position")})}return p}function u(e,t){return function(){return e.apply(t,arguments)}}function r(e,t,n,r,o){var i=e.charAt(0).toUpperCase()+e.slice(1),s=(e+" "+w.join(i+" ")+i).split(" ");return y(t,"string")||y(t,"undefined")?function(e,t,n,r){function o(){s&&(delete T.style,delete T.modElem)}if(r=!y(r,"undefined")&&r,!y(n,"undefined")){var i=h(e,n);if(!y(i,"undefined"))return i}for(var s,a,l,u,f,c=["modernizr","tspan","samp"];!T.style&&c.length;)s=!0,T.modElem=m(c.shift()),T.style=T.modElem.style;for(l=e.length,a=0;a<l;a++)if(u=e[a],f=T.style[u],!!~(""+u).indexOf("-")&&(u=u.replace(/([a-z])-([a-z])/g,function(e,t,n){return t+n.toUpperCase()}).replace(/^-/,"")),T.style[u]!==p){if(r||y(n,"undefined"))return o(),"pfx"!==t||u;try{T.style[u]=n}catch(e){}if(T.style[u]!==f)return o(),"pfx"!==t||u}return o(),!1}(s,t,r,o):function(e,t,n){var r;for(var o in e)if(e[o]in t)return!1===n?e[o]:y(r=t[e[o]],"function")?u(r,n||t):r;return!1}(s=(e+" "+P.join(i+" ")+i).split(" "),t,n)}function e(e,t,n){return r(e,p,p,t,n)}var f=[],t={_version:"3.7.1",_config:{classPrefix:"",enableClasses:!0,enableJSClass:!0,usePrefixes:!0},_q:[],on:function(e,t){var n=this;setTimeout(function(){t(n[e])},0)},addTest:function(e,t,n){f.push({name:e,fn:t,options:n})},addAsyncTest:function(e){f.push({name:null,fn:e})}},c=function(){};c.prototype=t,c=new c;var v,n,g=[],S=d.documentElement,b="svg"===S.nodeName.toLowerCase();v=y(n={}.hasOwnProperty,"undefined")||y(n.call,"undefined")?function(e,t){return t in e&&y(e.constructor.prototype[t],"undefined")}:function(e,t){return n.call(e,t)},t._l={},t.on=function(e,t){this._l[e]||(this._l[e]=[]),this._l[e].push(t),c.hasOwnProperty(e)&&setTimeout(function(){c._trigger(e,c[e])},0)},t._trigger=function(e,t){if(this._l[e]){var n=this._l[e];setTimeout(function(){var e;for(e=0;e<n.length;e++)(0,n[e])(t)},0),delete this._l[e]}},c._q.push(function(){t.addTest=a}),c.addTest("json","JSON"in i&&"parse"in JSON&&"stringify"in JSON),c.addTest("svg",!!d.createElementNS&&!!d.createElementNS("http://www.w3.org/2000/svg","svg").createSVGRect);var C=t.testStyles=o;c.addTest("checked",function(){return C("#modernizr {position:absolute} #modernizr input {margin-left:10px} #modernizr :checked {margin-left:20px;display:block}",function(e){var t=m("input");return t.setAttribute("type","checkbox"),t.setAttribute("checked","checked"),e.appendChild(t),20===t.offsetLeft})}),c.addTest("target",function(){var e=i.document;if(!("querySelectorAll"in e))return!1;try{return e.querySelectorAll(":target"),!0}catch(e){return!1}}),c.addTest("dataset",function(){var e=m("div");return e.setAttribute("data-a-b","c"),!(!e.dataset||"c"!==e.dataset.aB)}),c.addTest("details",function(){var t,n=m("details");return"open"in n&&(C("#modernizr details{display:block}",function(e){e.appendChild(n),n.innerHTML="<summary>a</summary>b",t=n.offsetHeight,n.open=!0,t=t!==n.offsetHeight}),t)}),c.addTest("fetch","fetch"in i);var _="Moz O ms Webkit",w=t._config.usePrefixes?_.split(" "):[];t._cssomPrefixes=w;var x={elem:m("modernizr")};c._q.push(function(){delete x.elem});var T={style:x.elem.style};c._q.unshift(function(){delete T.style});var P=t._config.usePrefixes?_.toLowerCase().split(" "):[];t._domPrefixes=P,t.testAllProps=r,t.testAllProps=e;var j="CSS"in i&&"supports"in i.CSS,O="supportsCSS"in i;c.addTest("supports",j||O),c.addTest("csstransforms3d",function(){return!!e("perspective","1px",!0)}),function(){var e,t,n,r,o,i;for(var s in f)if(f.hasOwnProperty(s)){if(e=[],(t=f[s]).name&&(e.push(t.name.toLowerCase()),t.options&&t.options.aliases&&t.options.aliases.length))for(n=0;n<t.options.aliases.length;n++)e.push(t.options.aliases[n].toLowerCase());for(r=y(t.fn,"function")?t.fn():t.fn,o=0;o<e.length;o++)1===(i=e[o].split(".")).length?c[i[0]]=r:(!c[i[0]]||c[i[0]]instanceof Boolean||(c[i[0]]=new Boolean(c[i[0]])),c[i[0]][i[1]]=r),g.push((r?"":"no-")+i.join("-"))}}(),s(g),delete t.addTest,delete t.addAsyncTest;for(var z=0;z<c._q.length;z++)c._q[z]();i.Modernizr=c})(n=window,document),e.exports=n.Modernizr}})); | PypiClean |
/MuPhyN-0.1.1.post4-py3-none-any.whl/muphyn/packages/core/application/plci_core_io.py | from typing import List, Union
from muphyn.packages.core.application.plci_core_signal import Signal
class Input:
def __init__(self, name: str, signal: Signal) -> None:
self._name: str = name
self._signal: Signal = signal
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, newName: str):
self.setName(newName)
@property
def signal(self) -> str:
return self._signal
@signal.setter
def signal(self, newSignal: str):
self.setSignal(newSignal)
def setName(self, newName: str):
if self._name != newName:
self._name = newName
def setSignal(self, newSignal: str):
if self._signal != newSignal:
self._signal = newSignal
class Output:
def __init__(self, name: str, signals: Signal = []) -> None:
self._name: str = name
self._signals: List[Signal] = signals
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, newName: str):
self.setName(newName)
@property
def signals(self) -> str:
return self._signals
def appendSignal(self, newSignal: Signal):
if newSignal not in self._signals:
self._signals.append(newSignal)
def appendSignals(self, newSignals: List[Signal]):
for newSignal in newSignals:
self.appendSignal(newSignal)
def clearSignals(self):
self._signals: List[Signal] = []
def insertSignal(self, newSignal: Signal, index: int):
if newSignal not in self._signals:
self._signals.insert(index, newSignal)
def insertSignals(self, newSignals: List[Signal], index: int):
for signalIndex, newSignal in enumerate(newSignals):
self.insertSignal(index + signalIndex, newSignal)
def removeSignal(self, signal: Union[Signal, int]):
if type(signal) == int and signal < len(self._signals):
signal = self._signals[signal]
if signal in self._signals:
self._signals.remove(signal)
def removeSignals(self, newSignals: List[Union[Signal, int]]):
for newSignal in newSignals:
self.removeSignal(newSignal)
def setName(self, newName: str):
if self._name != newName:
self._name = newName | PypiClean |
/DockerMake-0.9.0b3.tar.gz/DockerMake-0.9.0b3/dockermake/imagedefs.py |
from __future__ import print_function
from builtins import object
import os
from collections import OrderedDict
import yaml
import uuid
import dockermake.step
from . import builds
from . import staging
from . import errors
from . import utils
RECOGNIZED_KEYS = set(
(
"requires build_directory build copy_from FROM description _sourcefile"
" FROM_DOCKERFILE ignore ignorefile squash secret_files"
).split()
)
SPECIAL_FIELDS = set("_ALL_ _SOURCES_".split())
class ImageDefs(object):
""" Stores and processes the image definitions
"""
def __init__(self, makefile_path):
self._sources = set()
self.makefile_path = makefile_path
print("Working directory: %s" % os.path.abspath(os.curdir))
print("Copy cache directory: %s" % staging.TMPDIR)
try:
ymldefs, alltargets = self.parse_yaml(self.makefile_path)
except errors.UserException:
raise
except Exception as exc:
raise errors.ParsingFailure(
"Failed to read file %s:\n" % self.makefile_path + str(exc)
)
self.ymldefs = ymldefs
self.all_targets = alltargets
self._external_dockerfiles = {}
def parse_yaml(self, filename):
# locate and verify the DockerMake.yml file
fname = os.path.expanduser(filename)
print("READING %s" % os.path.expanduser(fname))
if fname in self._sources:
raise errors.CircularSourcesError(
"Circular _SOURCES_ in %s" % self.makefile_path
)
self._sources.add(fname)
with open(fname, "r") as yaml_file:
yamldefs = yaml.safe_load(yaml_file)
self._check_yaml_and_paths(filename, yamldefs)
# Recursively read all steps in included files from the _SOURCES_ field and
# store them in sourcedefs
sourcedefs = {}
for s in yamldefs.pop("_SOURCES_", []):
src, _ = self.parse_yaml(s) # ignore source's _ALL_ targets
sourcedefs.update(src)
# Now add the steps defined in this file
sourcedefs.update(yamldefs)
alltargets = sourcedefs.pop("_ALL_", [])
return sourcedefs, alltargets
@staticmethod
def _check_yaml_and_paths(ymlfilepath, yamldefs):
""" Checks YAML for errors and resolves all paths
"""
relpath = os.path.relpath(ymlfilepath)
if "/" not in relpath:
relpath = "./%s" % relpath
pathroot = os.path.abspath(os.path.dirname(ymlfilepath))
for imagename, defn in yamldefs.items():
if imagename == "_SOURCES_":
yamldefs["_SOURCES_"] = [
os.path.relpath(_get_abspath(pathroot, p))
for p in yamldefs["_SOURCES_"]
]
continue
elif imagename in SPECIAL_FIELDS:
continue
for key in ("build_directory", "FROM_DOCKERFILE", "ignorefile"):
if key in defn:
defn[key] = _get_abspath(pathroot, defn[key])
if "copy_from" in defn:
if not isinstance(defn["copy_from"], dict):
raise errors.ParsingFailure(
(
'Syntax error in file "%s": \n'
+ 'The "copy_from" field in image definition "%s" is not \n'
"a key:value list."
)
% (ymlfilepath, imagename)
)
for otherimg, value in defn.get("copy_from", {}).items():
if not isinstance(value, dict):
raise errors.ParsingFailure(
(
"Syntax error in field:\n"
' %s . copy_from . %s\nin file "%s". \n'
'All entries must be of the form "sourcepath: destpath"'
)
% (imagename, otherimg, ymlfilepath)
)
# save the file path for logging
defn["_sourcefile"] = relpath
if "ignore" in defn and "ignorefile" in defn:
raise errors.MultipleIgnoreError(
'Image "%s" has both "ignore" AND "ignorefile" fields.' % imagename
+ " At most ONE of these should be defined"
)
if "secret_files" in defn and not defn.get("squash", True):
raise errors.ParsingFailure(
"Step '%s' defines secret_files, so 'squash' cannot be set to 'false'"
% imagename
)
if defn.get("secret_files", None) and defn.get("copy_from", False):
raise errors.ParsingFailure(
"`secret_files` currently is not implmemented to handle `copy_from`"
" (step %s)" % imagename
)
for key in defn:
if key not in RECOGNIZED_KEYS:
raise errors.UnrecognizedKeyError(
'Field "%s" in image "%s" in file "%s" not recognized'
% (key, imagename, relpath)
)
def generate_build(
self,
image,
targetname,
rebuilds=None,
cache_repo="",
cache_tag="",
buildargs=None,
**kwargs,
):
"""
Separate the build into a series of one or more intermediate steps.
Each specified build directory gets its own step
Args:
image (str): name of the image as defined in the dockermake.py file
targetname (str): name to tag the final built image with
rebuilds (List[str]): list of image layers to rebuild (i.e., without docker's cache)
cache_repo (str): repository to get images for caches in builds
cache_tag (str): tags to use from repository for caches in builds
buildargs (dict): build-time dockerfile arugments
**kwargs (dict): extra keyword arguments for the BuildTarget object
"""
build_uuid = str(uuid.uuid4())
from_image = self.get_external_base_image(image)
if cache_repo or cache_tag:
cache_from = utils.generate_name(image, cache_repo, cache_tag)
else:
cache_from = None
if from_image is None:
raise errors.NoBaseError("No base image found in %s's dependencies" % image)
if isinstance(from_image, ExternalDockerfile):
build_first = from_image
base_image = from_image.tag
else:
base_image = from_image
build_first = None
build_steps = []
istep = 0
sourceimages = set()
if rebuilds is None:
rebuilds = []
else:
rebuilds = set(rebuilds)
for base_name in self.sort_dependencies(image):
istep += 1
buildname = self._generate_stepname(istep, image, build_uuid)
secret_files = self.ymldefs[base_name].get("secret_files", None)
squash = self.ymldefs[base_name].get("squash", bool(secret_files))
build_steps.append(
dockermake.step.BuildStep(
base_name,
base_image,
self.ymldefs[base_name],
buildname,
bust_cache=base_name in rebuilds,
build_first=build_first,
cache_from=cache_from,
buildargs=buildargs,
squash=squash,
secret_files=secret_files,
)
)
base_image = buildname
build_first = None
for sourceimage, files in (
self.ymldefs[base_name].get("copy_from", {}).items()
):
sourceimages.add(sourceimage)
for sourcepath, destpath in files.items():
istep += 1
buildname = self._generate_stepname(istep, image, build_uuid)
build_steps.append(
dockermake.step.FileCopyStep(
sourceimage,
sourcepath,
destpath,
base_name,
base_image,
self.ymldefs[base_name],
buildname,
bust_cache=base_name in rebuilds,
build_first=build_first,
cache_from=cache_from,
)
)
base_image = buildname
sourcebuilds = [
self.generate_build(
img, img, cache_repo=cache_repo, cache_tag=cache_tag, **kwargs
)
for img in sourceimages
]
return builds.BuildTarget(
imagename=image,
targetname=targetname,
steps=build_steps,
sourcebuilds=sourcebuilds,
from_image=from_image,
**kwargs,
)
def _generate_stepname(self, istep, image, build_uuid):
return f"{istep}.{image}.dmk:{build_uuid}"
def sort_dependencies(self, image, dependencies=None):
"""
Topologically sort the docker commands by their requirements
Note:
Circular "requires" dependencies are assumed to have already been checked in
get_external_base_image, they are not checked here
Args:
image (str): process this docker image's dependencies
dependencies (OrderedDict): running cache of sorted dependencies (ordered dict)
Returns:
List[str]: list of dependencies a topologically-sorted build order
"""
if dependencies is None:
dependencies = (
OrderedDict()
) # using this as an ordered set - not storing any values
if image in dependencies:
return
requires = self.ymldefs[image].get("requires", [])
for dep in requires:
self.sort_dependencies(dep, dependencies)
dependencies[image] = None
return dependencies.keys()
def get_external_base_image(self, image, stack=None):
""" Makes sure that this image has exactly one unique external base image
"""
if stack is None:
stack = list()
mydef = self.ymldefs[image]
if image in stack:
stack.append(image)
raise errors.CircularDependencyError(
"Circular dependency found:\n" + "->".join(stack)
)
stack.append(image)
# Deal with FROM and FROM_DOCKERFILE fields
if "FROM" in mydef and "FROM_DOCKERFILE" in mydef:
raise errors.MultipleBaseError(
'ERROR: Image "%s" has both a "FROM" and a "FROM_DOCKERFILE" field.'
% image
+ " It should have at most ONE of these fields."
)
if "FROM" in mydef:
externalbase = mydef["FROM"]
elif "FROM_DOCKERFILE" in mydef:
path = mydef["FROM_DOCKERFILE"]
if path not in self._external_dockerfiles:
self._external_dockerfiles[path] = ExternalDockerfile(path)
externalbase = self._external_dockerfiles[path]
else:
externalbase = None
requires = mydef.get("requires", [])
if not isinstance(requires, list):
raise errors.InvalidRequiresList(
'Requirements for image "%s" are not a list' % image
)
for base in requires:
try:
otherexternal = self.get_external_base_image(base, stack)
except ValueError:
continue
if externalbase is None:
externalbase = otherexternal
elif otherexternal is None:
continue
elif externalbase != otherexternal:
raise errors.ConflictingBaseError(
'Multiple external dependencies: definition "%s" depends on:\n'
% image
+ " %s (FROM: %s), and\n" % (image, externalbase)
+ " %s (FROM: %s)." % (base, otherexternal)
)
assert stack.pop() == image
return externalbase
class ExternalDockerfile(object):
def __init__(self, path):
self.path = path
self.built = False
self.tag = uuid.uuid4()
def __str__(self):
return "Dockerfile at %s" % self.path
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
else:
return self.path == other.path
def _get_abspath(pathroot, relpath):
path = os.path.expanduser(pathroot)
buildpath = os.path.expanduser(relpath)
if not os.path.isabs(buildpath):
buildpath = os.path.join(os.path.abspath(path), buildpath)
return buildpath | PypiClean |
/DjangoKit-0.13.tar.gz/DjangoKit-0.13/djangokit/management/log.py | import traceback
from functools import wraps
from logging import getLogger
from django.utils.encoding import force_str
from django.utils.log import AdminEmailHandler as BaseAdminEmailHandler
class AdminErrorHandler(BaseAdminEmailHandler):
"""Обработчик логгера запускаемых команд, отправляющий почту админам."""
def emit(self, record):
subject = message = None
if hasattr(record, 'command') and hasattr(record, 'exception'):
command = record.command
name = command.__class__.__module__.split('.')[-1]
subject = 'Something went wrong in %s' % name
exception = record.exception
message = ''.join(traceback.format_exception(
etype=type(exception),
value=exception,
tb=exception.__traceback__,
))
if hasattr(record, 'command_args'):
message += '\n\nARGS:\n %s' % '\n '.join(
[force_str(i) for i in record.command_args]
)
if hasattr(record, 'command_kwargs'):
message += '\n\nKWARGS:\n %s' % '\n '.join(
['%s=%s' % (force_str(k), force_str(v)) for k, v in
record.command_kwargs.items()]
)
else:
subject = '%s: %s' % (
record.levelname,
record.getMessage()
)
subject = self.format_subject(subject)
message = 'Undefined message, please check server logs.'
self.send_mail(subject, message, fail_silently=True)
def handler_logging(method=None, level='error', logger_or_name='management'):
"""
Декоратор для метода handler команды, который отправляет логгеру
ошибки исполнения.
"""
# Проверяем наличие метода в момент определения кода.
assert level in ('debug', 'info', 'warning', 'error', 'critical')
if isinstance(logger_or_name, str):
logger = getLogger(logger_or_name)
else:
logger = logger_or_name
log_func = getattr(logger, level)
def decorator(method_func):
@wraps(method_func)
def _wrapped_method(self, *args, **kwargs):
try:
return method_func(self, *args, **kwargs)
except Exception as e:
log_func(
force_str(e),
extra={
'command': self,
'command_args': args,
'command_kwargs': kwargs,
'exception': e,
}
)
raise e
return _wrapped_method
if method:
return decorator(method)
return decorator | PypiClean |
/GNN/data/dataloaders.py | from torch.utils.data import DataLoader
from MoleculeACE.benchmark.utils import collate_molgraphs, RANDOM_SEED
import torch
import random
import numpy as np
from MoleculeACE.benchmark.utils.const import CONFIG_PATH_GENERAL
from MoleculeACE.benchmark.utils import get_config
general_settings = get_config(CONFIG_PATH_GENERAL)
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
random.seed(RANDOM_SEED)
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2 ** 32
np.random.seed(worker_seed)
random.seed(worker_seed)
g = torch.Generator()
g.manual_seed(0)
def get_train_val_test_dataloaders(train_set, val_set, test_set, batch_size,
num_workers=general_settings['num_workers']):
train_loader = DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True,
collate_fn=collate_molgraphs, num_workers=num_workers,
worker_init_fn=seed_worker, generator=g)
val_loader = DataLoader(dataset=val_set, batch_size=batch_size, shuffle=False,
collate_fn=collate_molgraphs, num_workers=num_workers,
worker_init_fn=seed_worker, generator=g)
test_loader = DataLoader(dataset=test_set, batch_size=batch_size, shuffle=False,
collate_fn=collate_molgraphs, num_workers=num_workers,
worker_init_fn=seed_worker, generator=g)
return train_loader, val_loader, test_loader
def get_train_val_dataloaders(train_set, val_set, batch_size, num_workers=general_settings['num_workers']):
train_loader = DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True,
collate_fn=collate_molgraphs, num_workers=num_workers,
worker_init_fn=seed_worker, generator=g)
val_loader = DataLoader(dataset=val_set, batch_size=batch_size, shuffle=False,
collate_fn=collate_molgraphs, num_workers=num_workers,
worker_init_fn=seed_worker, generator=g)
return train_loader, val_loader
def split_dataset_in_loaders(train_set, validation_split, batch_size, num_workers=general_settings['num_workers']):
from dgllife.utils import ScaffoldSplitter
# Split the train and validation set by molecular scaffolds
train_set, val_set, _ = ScaffoldSplitter.train_val_test_split(train_set, mols=None, sanitize=True,
frac_train=1 - validation_split,
frac_val=validation_split,
frac_test=0, log_every_n=1000,
scaffold_func='decompose')
# Get the train and validation dataloaders
train_loader, val_loader = get_train_val_dataloaders(train_set, val_set,
batch_size=batch_size,
num_workers=num_workers)
return train_loader, val_loader | PypiClean |
/Fabric-with-working-dependencies-1.0.1.tar.gz/Fabric-with-working-dependencies-1.0.1/fabric/operations.py | from __future__ import with_statement
import os
import os.path
import re
import stat
import subprocess
import sys
import time
from glob import glob
from traceback import format_exc
from contextlib import closing
from fabric.context_managers import settings, char_buffered
from fabric.io import output_loop, input_loop
from fabric.network import needs_host
from fabric.state import (env, connections, output, win32, default_channel,
io_sleep)
from fabric.utils import abort, indent, warn, puts
from fabric.thread_handling import ThreadHandler
from fabric.sftp import SFTP
# For terminal size logic below
if not win32:
import fcntl
import termios
import struct
def _pty_size():
"""
Obtain (rows, cols) tuple for sizing a pty on the remote end.
Defaults to 80x24 (which is also the Paramiko default) but will detect
local (stdout-based) terminal window size on non-Windows platforms.
"""
rows, cols = 24, 80
if not win32 and sys.stdin.isatty():
# We want two short unsigned integers (rows, cols)
fmt = 'HH'
# Create an empty (zeroed) buffer for ioctl to map onto. Yay for C!
buffer = struct.pack(fmt, 0, 0)
# Call TIOCGWINSZ to get window size of stdout, returns our filled buffer
try:
result = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ,
buffer)
# Unpack buffer back into Python data types
rows, cols = struct.unpack(fmt, result)
# Deal with e.g. sys.stdout being monkeypatched, such as in testing.
# Or termios not having a TIOCGWINSZ.
except AttributeError:
pass
return rows, cols
def _handle_failure(message, exception=None):
"""
Call `abort` or `warn` with the given message.
The value of ``env.warn_only`` determines which method is called.
If ``exception`` is given, it is inspected to get a string message, which
is printed alongside the user-generated ``message``.
"""
func = env.warn_only and warn or abort
# If debug printing is on, append a traceback to the message
if output.debug:
message += "\n\n" + format_exc()
# Otherwise, if we were given an exception, append its contents.
elif exception is not None:
# Figure out how to get a string out of the exception; EnvironmentError
# subclasses, for example, "are" integers and .strerror is the string.
# Others "are" strings themselves. May have to expand this further for
# other error types.
if hasattr(exception, 'strerror') and exception.strerror is not None:
underlying = exception.strerror
else:
underlying = exception
message += "\n\nUnderlying exception message:\n" + indent(underlying)
return func(message)
def _shell_escape(string):
"""
Escape double quotes, backticks and dollar signs in given ``string``.
For example::
>>> _shell_escape('abc$')
'abc\\\\$'
>>> _shell_escape('"')
'\\\\"'
"""
for char in ('"', '$', '`'):
string = string.replace(char, '\%s' % char)
return string
class _AttributeString(str):
"""
Simple string subclass to allow arbitrary attribute access.
"""
@property
def stdout(self):
return str(self)
class _AttributeList(list):
"""
Like _AttributeString, but for lists.
"""
pass
# Can't wait till Python versions supporting 'def func(*args, foo=bar)' become
# widespread :(
def require(*keys, **kwargs):
"""
Check for given keys in the shared environment dict and abort if not found.
Positional arguments should be strings signifying what env vars should be
checked for. If any of the given arguments do not exist, Fabric will abort
execution and print the names of the missing keys.
The optional keyword argument ``used_for`` may be a string, which will be
printed in the error output to inform users why this requirement is in
place. ``used_for`` is printed as part of a string similar to::
"Th(is|ese) variable(s) (are|is) used for %s"
so format it appropriately.
The optional keyword argument ``provided_by`` may be a list of functions or
function names which the user should be able to execute in order to set the
key or keys; it will be included in the error output if requirements are
not met.
Note: it is assumed that the keyword arguments apply to all given keys as a
group. If you feel the need to specify more than one ``used_for``, for
example, you should break your logic into multiple calls to ``require()``.
"""
# If all keys exist, we're good, so keep going.
missing_keys = filter(lambda x: x not in env, keys)
if not missing_keys:
return
# Pluralization
if len(missing_keys) > 1:
variable = "variables were"
used = "These variables are"
else:
variable = "variable was"
used = "This variable is"
# Regardless of kwargs, print what was missing. (Be graceful if used outside
# of a command.)
if 'command' in env:
prefix = "The command '%s' failed because the " % env.command
else:
prefix = "The "
msg = "%sfollowing required environment %s not defined:\n%s" % (
prefix, variable, indent(missing_keys)
)
# Print used_for if given
if 'used_for' in kwargs:
msg += "\n\n%s used for %s" % (used, kwargs['used_for'])
# And print provided_by if given
if 'provided_by' in kwargs:
funcs = kwargs['provided_by']
# Pluralize this too
if len(funcs) > 1:
command = "one of the following commands"
else:
command = "the following command"
to_s = lambda obj: getattr(obj, '__name__', str(obj))
provided_by = [to_s(obj) for obj in funcs]
msg += "\n\nTry running %s prior to this one, to fix the problem:\n%s"\
% (command, indent(provided_by))
abort(msg)
def prompt(text, key=None, default='', validate=None):
"""
Prompt user with ``text`` and return the input (like ``raw_input``).
A single space character will be appended for convenience, but nothing
else. Thus, you may want to end your prompt text with a question mark or a
colon, e.g. ``prompt("What hostname?")``.
If ``key`` is given, the user's input will be stored as ``env.<key>`` in
addition to being returned by `prompt`. If the key already existed in
``env``, its value will be overwritten and a warning printed to the user.
If ``default`` is given, it is displayed in square brackets and used if the
user enters nothing (i.e. presses Enter without entering any text).
``default`` defaults to the empty string. If non-empty, a space will be
appended, so that a call such as ``prompt("What hostname?",
default="foo")`` would result in a prompt of ``What hostname? [foo]`` (with
a trailing space after the ``[foo]``.)
The optional keyword argument ``validate`` may be a callable or a string:
* If a callable, it is called with the user's input, and should return the
value to be stored on success. On failure, it should raise an exception
with an exception message, which will be printed to the user.
* If a string, the value passed to ``validate`` is used as a regular
expression. It is thus recommended to use raw strings in this case. Note
that the regular expression, if it is not fully matching (bounded by
``^`` and ``$``) it will be made so. In other words, the input must fully
match the regex.
Either way, `prompt` will re-prompt until validation passes (or the user
hits ``Ctrl-C``).
Examples::
# Simplest form:
environment = prompt('Please specify target environment: ')
# With default, and storing as env.dish:
prompt('Specify favorite dish: ', 'dish', default='spam & eggs')
# With validation, i.e. requiring integer input:
prompt('Please specify process nice level: ', key='nice', validate=int)
# With validation against a regular expression:
release = prompt('Please supply a release name',
validate=r'^\w+-\d+(\.\d+)?$')
"""
# Store previous env value for later display, if necessary
if key:
previous_value = env.get(key)
# Set up default display
default_str = ""
if default != '':
default_str = " [%s] " % str(default).strip()
else:
default_str = " "
# Construct full prompt string
prompt_str = text.strip() + default_str
# Loop until we pass validation
value = None
while value is None:
# Get input
value = raw_input(prompt_str) or default
# Handle validation
if validate:
# Callable
if callable(validate):
# Callable validate() must raise an exception if validation
# fails.
try:
value = validate(value)
except Exception, e:
# Reset value so we stay in the loop
value = None
print("Validation failed for the following reason:")
print(indent(e.message) + "\n")
# String / regex must match and will be empty if validation fails.
else:
# Need to transform regex into full-matching one if it's not.
if not validate.startswith('^'):
validate = r'^' + validate
if not validate.endswith('$'):
validate += r'$'
result = re.findall(validate, value)
if not result:
print("Regular expression validation failed: '%s' does not match '%s'\n" % (value, validate))
# Reset value so we stay in the loop
value = None
# At this point, value must be valid, so update env if necessary
if key:
env[key] = value
# Print warning if we overwrote some other value
if key and previous_value is not None and previous_value != value:
warn("overwrote previous env variable '%s'; used to be '%s', is now '%s'." % (
key, previous_value, value
))
# And return the value, too, just in case someone finds that useful.
return value
@needs_host
def put(local_path=None, remote_path=None, use_sudo=False,
mirror_local_mode=False, mode=None):
"""
Upload one or more files to a remote host.
`~fabric.operations.put` returns an iterable containing the absolute file
paths of all remote files uploaded. This iterable also exhibits a
``.failed`` attribute containing any local file paths which failed to
upload (and may thus be used as a boolean test.) You may also check
``.succeeded`` which is equivalent to ``not .failed``.
``local_path`` may be a relative or absolute local file or directory path,
and may contain shell-style wildcards, as understood by the Python ``glob``
module. Tilde expansion (as implemented by ``os.path.expanduser``) is also
performed.
``local_path`` may alternately be a file-like object, such as the result of
``open('path')`` or a ``StringIO`` instance.
.. note::
In this case, `~fabric.operations.put` will attempt to read the entire
contents of the file-like object by rewinding it using ``seek`` (and
will use ``tell`` afterwards to preserve the previous file position).
.. note::
Use of a file-like object in `~fabric.operations.put`'s ``local_path``
argument will cause a temporary file to be utilized due to limitations
in our SSH layer's API.
``remote_path`` may also be a relative or absolute location, but applied to
the remote host. Relative paths are relative to the remote user's home
directory, but tilde expansion (e.g. ``~/.ssh/``) will also be performed if
necessary.
An empty string, in either path argument, will be replaced by the
appropriate end's current working directory.
While the SFTP protocol (which `put` uses) has no direct ability to upload
files to locations not owned by the connecting user, you may specify
``use_sudo=True`` to work around this. When set, this setting causes `put`
to upload the local files to a temporary location on the remote end, and
then use `sudo` to move them to ``remote_path``.
In some use cases, it is desirable to force a newly uploaded file to match
the mode of its local counterpart (such as when uploading executable
scripts). To do this, specify ``mirror_local_mode=True``.
Alternately, you may use the ``mode`` kwarg to specify an exact mode, in
the same vein as ``os.chmod`` or the Unix ``chmod`` command.
`~fabric.operations.put` will honor `~fabric.context_managers.cd`, so
relative values in ``remote_path`` will be prepended by the current remote
working directory, if applicable. Thus, for example, the below snippet
would attempt to upload to ``/tmp/files/test.txt`` instead of
``~/files/test.txt``::
with cd('/tmp'):
put('/path/to/local/test.txt', 'files')
Use of `~fabric.context_managers.lcd` will affect ``local_path`` in the
same manner.
Examples::
put('bin/project.zip', '/tmp/project.zip')
put('*.py', 'cgi-bin/')
put('index.html', 'index.html', mode=0755)
.. versionchanged:: 1.0
Now honors the remote working directory as manipulated by
`~fabric.context_managers.cd`, and the local working directory as
manipulated by `~fabric.context_managers.lcd`.
.. versionchanged:: 1.0
Now allows file-like objects in the ``local_path`` argument.
.. versionchanged:: 1.0
Directories may be specified in the ``local_path`` argument and will
trigger recursive uploads.
.. versionchanged:: 1.0
Return value is now an iterable of uploaded remote file paths which
also exhibits the ``.failed`` and ``.succeeded`` attributes.
"""
# Handle empty local path
local_path = local_path or os.getcwd()
# Test whether local_path is a path or a file-like object
local_is_path = not (hasattr(local_path, 'read') \
and callable(local_path.read))
ftp = SFTP(env.host_string)
with closing(ftp) as ftp:
# Expand tildes (assumption: default remote cwd is user $HOME)
home = ftp.normalize('.')
# Empty remote path implies cwd
remote_path = remote_path or home
# Honor cd() (assumes Unix style file paths on remote end)
if not os.path.isabs(remote_path) and env.get('cwd'):
remote_path = env.cwd.rstrip('/') + '/' + remote_path
if local_is_path:
# Expand local paths
local_path = os.path.expanduser(local_path)
# Honor lcd() where it makes sense
if not os.path.isabs(local_path) and env.lcwd:
local_path = os.path.join(env.lcwd, local_path)
# Glob local path
names = glob(local_path)
else:
names = [local_path]
# Make sure local arg exists
if local_is_path and not names:
err = "'%s' is not a valid local path or glob." % local_path
raise ValueError(err)
# Sanity check and wierd cases
if ftp.exists(remote_path):
if local_is_path and len(names) != 1 and not ftp.isdir(remote_path):
raise ValueError("'%s' is not a directory" % remote_path)
# Iterate over all given local files
remote_paths = []
failed_local_paths = []
for lpath in names:
try:
if local_is_path and os.path.isdir(lpath):
p = ftp.put_dir(lpath, remote_path, use_sudo,
mirror_local_mode, mode)
remote_paths.extend(p)
else:
p = ftp.put(lpath, remote_path, use_sudo, mirror_local_mode,
mode, local_is_path)
remote_paths.append(p)
except Exception, e:
msg = "put() encountered an exception while uploading '%s'"
failure = lpath if local_is_path else "<StringIO>"
failed_local_paths.append(failure)
_handle_failure(message=msg % lpath, exception=e)
ret = _AttributeList(remote_paths)
ret.failed = failed_local_paths
ret.succeeded = not ret.failed
return ret
@needs_host
def get(remote_path, local_path=None):
"""
Download one or more files from a remote host.
`~fabric.operations.get` returns an iterable containing the absolute paths
to all local files downloaded, which will be empty if ``local_path`` was a
StringIO object (see below for more on using StringIO). This object will
also exhibit a ``.failed`` attribute containing any remote file paths which
failed to download, and a ``.succeeded`` attribute equivalent to ``not
.failed``.
``remote_path`` is the remote file or directory path to download, which may
contain shell glob syntax, e.g. ``"/var/log/apache2/*.log"``, and will have
tildes replaced by the remote home directory. Relative paths will be
considered relative to the remote user's home directory, or the current
remote working directory as manipulated by `~fabric.context_managers.cd`.
If the remote path points to a directory, that directory will be downloaded
recursively.
``local_path`` is the local file path where the downloaded file or files
will be stored. If relative, it will honor the local current working
directory as manipulated by `~fabric.context_managers.lcd`. It may be
interpolated, using standard Python dict-based interpolation, with the
following variables:
* ``host``: The value of ``env.host_string``, eg ``myhostname`` or
``user@myhostname-222`` (the colon between hostname and port is turned
into a dash to maximize filesystem compatibility)
* ``dirname``: The directory part of the remote file path, e.g. the
``src/projectname`` in ``src/projectname/utils.py``.
* ``basename``: The filename part of the remote file path, e.g. the
``utils.py`` in ``src/projectname/utils.py``
* ``path``: The full remote path, e.g. ``src/projectname/utils.py``.
.. note::
When ``remote_path`` is an absolute directory path, only the inner
directories will be recreated locally and passed into the above
variables. So for example, ``get('/var/log', '%(path)s')`` would start
writing out files like ``apache2/access.log``,
``postgresql/8.4/postgresql.log``, etc, in the local working directory.
It would **not** write out e.g. ``var/log/apache2/access.log``.
Additionally, when downloading a single file, ``%(dirname)s`` and
``%(path)s`` do not make as much sense and will be empty and equivalent
to ``%(basename)s``, respectively. Thus a call like
``get('/var/log/apache2/access.log', '%(path)s')`` will save a local
file named ``access.log``, not ``var/log/apache2/access.log``.
This behavior is intended to be consistent with the command-line
``scp`` program.
If left blank, ``local_path`` defaults to ``"%(host)s/%(path)s"`` in order
to be safe for multi-host invocations.
.. warning::
If your ``local_path`` argument does not contain ``%(host)s`` and your
`~fabric.operations.get` call runs against multiple hosts, your local
files will be overwritten on each successive run!
If ``local_path`` does not make use of the above variables (i.e. if it is a
simple, explicit file path) it will act similar to ``scp`` or ``cp``,
overwriting pre-existing files if necessary, downloading into a directory
if given (e.g. ``get('/path/to/remote_file.txt', 'local_directory')`` will
create ``local_directory/remote_file.txt``) and so forth.
``local_path`` may alternately be a file-like object, such as the result of
``open('path', 'w')`` or a ``StringIO`` instance.
.. note::
Attempting to `get` a directory into a file-like object is not valid
and will result in an error.
.. note::
This function will use ``seek`` and ``tell`` to overwrite the entire
contents of the file-like object, in order to be consistent with the
behavior of `~fabric.operations.put` (which also considers the entire
file). However, unlike `~fabric.operations.put`, the file pointer will
not be restored to its previous location, as that doesn't make as much
sense here and/or may not even be possible.
.. note::
Due to how our SSH layer works, a temporary file will still be written
to your hard disk even if you specify a file-like object such as a
StringIO for the ``local_path`` argument. Cleanup is performed,
however -- we just note this for users expecting straight-to-memory
transfers. (We hope to patch our SSH layer in the future to enable true
straight-to-memory downloads.)
.. versionchanged:: 1.0
Now honors the remote working directory as manipulated by
`~fabric.context_managers.cd`, and the local working directory as
manipulated by `~fabric.context_managers.lcd`.
.. versionchanged:: 1.0
Now allows file-like objects in the ``local_path`` argument.
.. versionchanged:: 1.0
``local_path`` may now contain interpolated path- and host-related
variables.
.. versionchanged:: 1.0
Directories may be specified in the ``remote_path`` argument and will
trigger recursive downloads.
.. versionchanged:: 1.0
Return value is now an iterable of downloaded local file paths, which
also exhibits the ``.failed`` and ``.succeeded`` attributes.
"""
# Handle empty local path / default kwarg value
local_path = local_path or "%(host)s/%(path)s"
# Test whether local_path is a path or a file-like object
local_is_path = not (hasattr(local_path, 'write') \
and callable(local_path.write))
# Honor lcd() where it makes sense
if local_is_path and not os.path.isabs(local_path) and env.lcwd:
local_path = os.path.join(env.lcwd, local_path)
ftp = SFTP(env.host_string)
with closing(ftp) as ftp:
home = ftp.normalize('.')
# Expand home directory markers (tildes, etc)
if remote_path.startswith('~'):
remote_path = remote_path.replace('~', home, 1)
if local_is_path:
local_path = os.path.expanduser(local_path)
# Honor cd() (assumes Unix style file paths on remote end)
if not os.path.isabs(remote_path):
# Honor cwd if it's set (usually by with cd():)
if env.get('cwd'):
remote_path = env.cwd.rstrip('/') + '/' + remote_path
# Otherwise, be relative to remote home directory (SFTP server's
# '.')
else:
remote_path = os.path.join(home, remote_path)
# Track final local destination files so we can return a list
local_files = []
failed_remote_files = []
try:
# Glob remote path
names = ftp.glob(remote_path)
# Handle invalid local-file-object situations
if not local_is_path:
if len(names) > 1 or ftp.isdir(names[0]):
_handle_failure("[%s] %s is a glob or directory, but local_path is a file object!" % (env.host_string, remote_path))
for remote_path in names:
if ftp.isdir(remote_path):
result = ftp.get_dir(remote_path, local_path)
local_files.extend(result)
else:
# Result here can be file contents (if not local_is_path)
# or final resultant file path (if local_is_path)
result = ftp.get(remote_path, local_path, local_is_path,
os.path.basename(remote_path))
if not local_is_path:
# Overwrite entire contents of local_path
local_path.seek(0)
local_path.write(result)
else:
local_files.append(result)
except Exception, e:
failed_remote_files.append(remote_path)
msg = "get() encountered an exception while downloading '%s'"
_handle_failure(message=msg % remote_path, exception=e)
ret = _AttributeList(local_files if local_is_path else [])
ret.failed = failed_remote_files
ret.succeeded = not ret.failed
return ret
def _sudo_prefix(user):
"""
Return ``env.sudo_prefix`` with ``user`` inserted if necessary.
"""
# Insert env.sudo_prompt into env.sudo_prefix
prefix = env.sudo_prefix % env.sudo_prompt
if user is not None:
if str(user).isdigit():
user = "#%s" % user
return "%s -u \"%s\" " % (prefix, user)
return prefix
def _shell_wrap(command, shell=True, sudo_prefix=None):
"""
Conditionally wrap given command in env.shell (while honoring sudo.)
"""
# Honor env.shell, while allowing the 'shell' kwarg to override it (at
# least in terms of turning it off.)
if shell and not env.use_shell:
shell = False
# Sudo plus space, or empty string
if sudo_prefix is None:
sudo_prefix = ""
else:
sudo_prefix += " "
# If we're shell wrapping, prefix shell and space, escape the command and
# then quote it. Otherwise, empty string.
if shell:
shell = env.shell + " "
command = '"%s"' % _shell_escape(command)
else:
shell = ""
# Resulting string should now have correct formatting
return sudo_prefix + shell + command
def _prefix_commands(command, which):
"""
Prefixes ``command`` with all prefixes found in ``env.command_prefixes``.
``env.command_prefixes`` is a list of strings which is modified by the
`~fabric.context_managers.prefix` context manager.
This function also handles a special-case prefix, ``cwd``, used by
`~fabric.context_managers.cd`. The ``which`` kwarg should be a string,
``"local"`` or ``"remote"``, which will determine whether ``cwd`` or
``lcwd`` is used.
"""
# Local prefix list (to hold env.command_prefixes + any special cases)
prefixes = list(env.command_prefixes)
# Handle current working directory, which gets its own special case due to
# being a path string that gets grown/shrunk, instead of just a single
# string or lack thereof.
# Also place it at the front of the list, in case user is expecting another
# prefixed command to be "in" the current working directory.
cwd = env.cwd if which == 'remote' else env.lcwd
if cwd:
prefixes.insert(0, 'cd %s' % cwd)
glue = " && "
prefix = (glue.join(prefixes) + glue) if prefixes else ""
return prefix + command
def _prefix_env_vars(command):
"""
Prefixes ``command`` with any shell environment vars, e.g. ``PATH=foo ``.
Currently, this only applies the PATH updating implemented in
`~fabric.context_managers.path`.
"""
# path(): local shell env var update, appending/prepending/replacing $PATH
path = env.path
if path:
if env.path_behavior == 'append':
path = 'PATH=$PATH:\"%s\" ' % path
elif env.path_behavior == 'prepend':
path = 'PATH=\"%s\":$PATH ' % path
elif env.path_behavior == 'replace':
path = 'PATH=\"%s\" ' % path
else:
path = ''
return path + command
def _execute(channel, command, pty=True, combine_stderr=True,
invoke_shell=False):
"""
Execute ``command`` over ``channel``.
``pty`` controls whether a pseudo-terminal is created.
``combine_stderr`` controls whether we call ``channel.set_combine_stderr``.
``invoke_shell`` controls whether we use ``exec_command`` or
``invoke_shell`` (plus a handful of other things, such as always forcing a
pty.)
Returns a three-tuple of (``stdout``, ``stderr``, ``status``), where
``stdout``/``stderr`` are captured output strings and ``status`` is the
program's return code, if applicable.
"""
with char_buffered(sys.stdin):
# Combine stdout and stderr to get around oddball mixing issues
if combine_stderr or env.combine_stderr:
channel.set_combine_stderr(True)
# Assume pty use, and allow overriding of this either via kwarg or env
# var. (invoke_shell always wants a pty no matter what.)
using_pty = True
if not invoke_shell and (not pty or not env.always_use_pty):
using_pty = False
# Request pty with size params (default to 80x24, obtain real
# parameters if on POSIX platform)
if using_pty:
rows, cols = _pty_size()
channel.get_pty(width=cols, height=rows)
# Kick off remote command
if invoke_shell:
channel.invoke_shell()
if command:
channel.sendall(command + "\n")
else:
channel.exec_command(command)
# Init stdout, stderr capturing. Must use lists instead of strings as
# strings are immutable and we're using these as pass-by-reference
stdout, stderr = [], []
if invoke_shell:
stdout = stderr = None
workers = (
ThreadHandler('out', output_loop, channel, "recv", stdout),
ThreadHandler('err', output_loop, channel, "recv_stderr", stderr),
ThreadHandler('in', input_loop, channel, using_pty)
)
while True:
if channel.exit_status_ready():
break
else:
for worker in workers:
e = worker.exception
if e:
raise e[0], e[1], e[2]
time.sleep(io_sleep)
# Obtain exit code of remote program now that we're done.
status = channel.recv_exit_status()
# Wait for threads to exit so we aren't left with stale threads
for worker in workers:
worker.thread.join()
# Close channel
channel.close()
# Update stdout/stderr with captured values if applicable
if not invoke_shell:
stdout = ''.join(stdout).strip()
stderr = ''.join(stderr).strip()
# Tie off "loose" output by printing a newline. Helps to ensure any
# following print()s aren't on the same line as a trailing line prefix
# or similar. However, don't add an extra newline if we've already
# ended up with one, as that adds a entire blank line instead.
if output.running \
and (output.stdout and stdout and not stdout.endswith("\n")) \
or (output.stderr and stderr and not stderr.endswith("\n")):
print("")
return stdout, stderr, status
@needs_host
def open_shell(command=None):
"""
Invoke a fully interactive shell on the remote end.
If ``command`` is given, it will be sent down the pipe before handing
control over to the invoking user.
This function is most useful for when you need to interact with a heavily
shell-based command or series of commands, such as when debugging or when
fully interactive recovery is required upon remote program failure.
It should be considered an easy way to work an interactive shell session
into the middle of a Fabric script and is *not* a drop-in replacement for
`~fabric.operations.run`, which is also capable of interacting with the
remote end (albeit only while its given command is executing) and has much
stronger programmatic abilities such as error handling and stdout/stderr
capture.
Specifically, `~fabric.operations.open_shell` provides a better interactive
experience than `~fabric.operations.run`, but use of a full remote shell
prevents Fabric from determining whether programs run within the shell have
failed, and pollutes the stdout/stderr stream with shell output such as
login banners, prompts and echoed stdin.
Thus, this function does not have a return value and will not trigger
Fabric's failure handling if any remote programs result in errors.
.. versionadded:: 1.0
"""
_execute(default_channel(), command, True, True, True)
def _run_command(command, shell=True, pty=True, combine_stderr=True,
sudo=False, user=None):
"""
Underpinnings of `run` and `sudo`. See their docstrings for more info.
"""
# Set up new var so original argument can be displayed verbatim later.
given_command = command
# Handle context manager modifications, and shell wrapping
wrapped_command = _shell_wrap(
_prefix_commands(_prefix_env_vars(command), 'remote'),
shell,
_sudo_prefix(user) if sudo else None
)
# Execute info line
which = 'sudo' if sudo else 'run'
if output.debug:
print("[%s] %s: %s" % (env.host_string, which, wrapped_command))
elif output.running:
print("[%s] %s: %s" % (env.host_string, which, given_command))
# Actual execution, stdin/stdout/stderr handling, and termination
stdout, stderr, status = _execute(default_channel(), wrapped_command, pty,
combine_stderr)
# Assemble output string
out = _AttributeString(stdout)
err = _AttributeString(stderr)
# Error handling
out.failed = False
if status != 0:
out.failed = True
msg = "%s() encountered an error (return code %s) while executing '%s'" % (which, status, command)
_handle_failure(message=msg)
# Attach return code to output string so users who have set things to
# warn only, can inspect the error code.
out.return_code = status
# Convenience mirror of .failed
out.succeeded = not out.failed
# Attach stderr for anyone interested in that.
out.stderr = err
return out
@needs_host
def run(command, shell=True, pty=True, combine_stderr=True):
"""
Run a shell command on a remote host.
If ``shell`` is True (the default), `run` will execute the given command
string via a shell interpreter, the value of which may be controlled by
setting ``env.shell`` (defaulting to something similar to ``/bin/bash -l -c
"<command>"``.) Any double-quote (``"``) or dollar-sign (``$``) characters
in ``command`` will be automatically escaped when ``shell`` is True.
`run` will return the result of the remote program's stdout as a single
(likely multiline) string. This string will exhibit ``failed`` and
``succeeded`` boolean attributes specifying whether the command failed or
succeeded, and will also include the return code as the ``return_code``
attribute.
Any text entered in your local terminal will be forwarded to the remote
program as it runs, thus allowing you to interact with password or other
prompts naturally. For more on how this works, see
:doc:`/usage/interactivity`.
You may pass ``pty=False`` to forego creation of a pseudo-terminal on the
remote end in case the presence of one causes problems for the command in
question. However, this will force Fabric itself to echo any and all input
you type while the command is running, including sensitive passwords. (With
``pty=True``, the remote pseudo-terminal will echo for you, and will
intelligently handle password-style prompts.) See :ref:`pseudottys` for
details.
Similarly, if you need to programmatically examine the stderr stream of the
remote program (exhibited as the ``stderr`` attribute on this function's
return value), you may set ``combine_stderr=False``. Doing so has a high
chance of causing garbled output to appear on your terminal (though the
resulting strings returned by `~fabric.operations.run` will be properly
separated). For more info, please read :ref:`combine_streams`.
Examples::
run("ls /var/www/")
run("ls /home/myuser", shell=False)
output = run('ls /var/www/site1')
.. versionadded:: 1.0
The ``succeeded`` and ``stderr`` return value attributes, the
``combine_stderr`` kwarg, and interactive behavior.
.. versionchanged:: 1.0
The default value of ``pty`` is now ``True``.
"""
return _run_command(command, shell, pty, combine_stderr)
@needs_host
def sudo(command, shell=True, pty=True, combine_stderr=True, user=None):
"""
Run a shell command on a remote host, with superuser privileges.
`sudo` is identical in every way to `run`, except that it will always wrap
the given ``command`` in a call to the ``sudo`` program to provide
superuser privileges.
`sudo` accepts an additional ``user`` argument, which is passed to ``sudo``
and allows you to run as some user other than root. On most systems, the
``sudo`` program can take a string username or an integer userid (uid);
``user`` may likewise be a string or an int.
Examples::
sudo("~/install_script.py")
sudo("mkdir /var/www/new_docroot", user="www-data")
sudo("ls /home/jdoe", user=1001)
result = sudo("ls /tmp/")
.. versionchanged:: 1.0
See the changed and added notes for `~fabric.operations.run`.
"""
return _run_command(command, shell, pty, combine_stderr, sudo=True,
user=user)
def local(command, capture=False):
"""
Run a command on the local system.
`local` is simply a convenience wrapper around the use of the builtin
Python ``subprocess`` module with ``shell=True`` activated. If you need to
do anything special, consider using the ``subprocess`` module directly.
`local` is not currently capable of simultaneously printing and
capturing output, as `~fabric.operations.run`/`~fabric.operations.sudo`
do. The ``capture`` kwarg allows you to switch between printing and
capturing as necessary, and defaults to ``False``.
When ``capture=False``, the local subprocess' stdout and stderr streams are
hooked up directly to your terminal, though you may use the global
:doc:`output controls </usage/output_controls>` ``output.stdout`` and
``output.stderr`` to hide one or both if desired. In this mode,
`~fabric.operations.local` returns None.
When ``capture=True``, this function will return the contents of the
command's stdout as a string-like object; as with `~fabric.operations.run`
and `~fabric.operations.sudo`, this return value exhibits the
``return_code``, ``stderr``, ``failed`` and ``succeeded`` attributes. See
`run` for details.
`~fabric.operations.local` will honor the `~fabric.context_managers.lcd`
context manager, allowing you to control its current working directory
independently of the remote end (which honors
`~fabric.context_managers.cd`).
.. versionchanged:: 1.0
Added the ``succeeded`` and ``stderr`` attributes.
.. versionchanged:: 1.0
Now honors the `~fabric.context_managers.lcd` context manager.
.. versionchanged:: 1.0
Changed the default value of ``capture`` from ``True`` to ``False``.
"""
given_command = command
# Apply cd(), path() etc
wrapped_command = _prefix_commands(_prefix_env_vars(command), 'local')
if output.debug:
print("[localhost] local: %s" % (wrapped_command))
elif output.running:
print("[localhost] local: " + given_command)
# Tie in to global output controls as best we can; our capture argument
# takes precedence over the output settings.
dev_null = None
if capture:
out_stream = subprocess.PIPE
err_stream = subprocess.PIPE
else:
dev_null = open(os.devnull, 'w+')
# Non-captured, hidden streams are discarded.
out_stream = None if output.stdout else dev_null
err_stream = None if output.stderr else dev_null
try:
cmd_arg = [wrapped_command] if win32 else wrapped_command
p = subprocess.Popen(cmd_arg, shell=True, stdout=out_stream,
stderr=err_stream)
(stdout, stderr) = p.communicate()
finally:
if dev_null is not None:
dev_null.close()
# Handle error condition (deal with stdout being None, too)
out = _AttributeString(stdout.strip() if stdout else "")
err = _AttributeString(stderr.strip() if stderr else "")
out.failed = False
out.return_code = p.returncode
out.stderr = err
if p.returncode != 0:
out.failed = True
msg = "local() encountered an error (return code %s) while executing '%s'" % (p.returncode, command)
_handle_failure(message=msg)
out.succeeded = not out.failed
# If we were capturing, this will be a string; otherwise it will be None.
return out
@needs_host
def reboot(wait):
"""
Reboot the remote system, disconnect, and wait for ``wait`` seconds.
After calling this operation, further execution of `run` or `sudo` will
result in a normal reconnection to the server, including any password
prompts.
.. versionadded:: 0.9.2
"""
sudo('reboot')
client = connections[env.host_string]
client.close()
if env.host_string in connections:
del connections[env.host_string]
if output.running:
puts("Waiting for reboot: ", flush=True, end='')
per_tick = 5
for second in range(int(wait / per_tick)):
puts(".", show_prefix=False, flush=True, end='')
time.sleep(per_tick)
puts("done.\n", show_prefix=False, flush=True) | PypiClean |
/2Keys-0.5.1.tar.gz/2Keys-0.5.1/twokeys/add_keyboard/async_handler.py | from .add_keyboard import add_keyboard
import sys
import os
import signal
import aiofiles
from ..util import Logger
import yaml
from .sync_keyboard_path import update_server_keyboard_path
logger = Logger("add")
PID = os.getpid()
# IMPORTANT: Don't use non async functions in this. That includes the logger
# EXCEPTIONS ARE NOT CAUGHT
def gen_async_handler(keyboards, keyboard_name):
async def handler(keyboard):
print("[DEBUG] STOPPING WATCH")
# Stop each keyboard object one by one, then write config
for keyboard_stop in keyboards:
print("[DEBUG] ROOT: STOPPING " + keyboard_stop.keyboard)
await keyboard_stop.stop_watch()
# Write config
logger.info("Writing keyboard " + keyboard + " as " + keyboard_name)
logger.debug("Opening config...")
# 1: Open current file for updating
async with aiofiles.open(os.getcwd() + "/config.yml", mode="r") as config_file:
logger.debug("ASYNC FILE OPS") # DEBUG: signal start of async file ops, so as to help detect where program breaks
config_contents = await config_file.read() # Read config
logger.debug("Contents:\n" + config_contents)
# Parse it into python obj
config = yaml.load(config_contents, Loader=yaml.FullLoader)
logger.debug("Parsed contents: " + str(config))
config["keyboards"][keyboard_name]["path"] = keyboard # Update keyboard with path in /dev/input
logger.debug("Writing config...")
# r+ appends, so we have to create a new stream so we cam write
async with aiofiles.open("config.yml", mode="w") as config_write:
await config_write.write("# Config for 2Keys\n# ONLY FOR USE BY THE PROGRAM\n# To change the config, update it on the client and run \"2Keys config-update\" here\n" +
yaml.dump(config, default_flow_style=False)) # Write it
await config_write.close() # Close so other programs can use
logger.info("Config writen.")
logger.info("Updating path on server....")
await update_server_keyboard_path(keyboard_name, keyboard)
os.kill(PID, signal.SIGTERM) # Exit() does't work, so we have to self kill the script
exit() # So only one ^C is needed to end the program
return
return handler | PypiClean |
/Bytestag-0.2b1.tar.gz/Bytestag-0.2b1/src/py3/bytestag/storage.py | # This file is part of Bytestag.
# Copyright © 2012 Christopher Foo <[email protected]>.
# Licensed under GNU GPLv3. See COPYING.txt for details.
from bytestag.dht.models import FileInfo
from bytestag.events import Task
from bytestag.keys import KeyBytes
from bytestag.tables import KVPTable, KVPRecord, KVPID
import collections
import contextlib
import hashlib
import itertools
import logging
import math
import os
import sqlite3
import threading
__docformat__ = 'restructuredtext en'
_logger = logging.getLogger(__name__)
def part_to_byte_number(part_number, part_size):
'''Converts a file segment number to the byte offset
:rtype: :obj:`int`
'''
return part_number * part_size
def byte_to_part_number(byte_number, part_size):
'''Converts a byte offset to a file segment number.
:rtype: :obj:`int`
'''
return byte_number // part_size
def total_parts(total_byte_size, part_size):
'''Returns the total number of segments of a file
:rtype: :obj:`int`
'''
return math.ceil(total_byte_size / part_size)
class MemoryKVPTable(KVPTable):
'''A quick and dirty implementation of :class:`.KVPTable`
.. note::
This class is generally used for unit tests.
'''
def __init__(self):
KVPTable.__init__(self)
self._table = collections.defaultdict(
lambda: collections.defaultdict(dict))
def _contains(self, kvpid):
return kvpid.index in self._table[kvpid.key]
def indices(self, key):
return list(self._table[key].keys())
def _getitem(self, kvpid):
return self._table[kvpid.key][kvpid.index]['value']
def _setitem(self, kvpid, value):
self._table[kvpid.key][kvpid.index]['value'] = value
def _delitem(self, kvpid):
del self._table[kvpid.key][kvpid.index]
def keys(self):
for key in self._table:
for index in self._table[key]:
yield KVPID(key, index)
def record(self, kvpid):
return MemoryKVPRecord(kvpid, self._table[kvpid.key][kvpid.index])
def is_acceptable(self, kvpid, size, timestamp):
if not kvpid in self:
return True
if self.record(kvpid).timestamp != timestamp:
return True
class MemoryKVPRecord(KVPRecord):
'''The record associated with :class:`MemoryKVPTable`'''
def __init__(self, kvpid, d):
self._kvpid = kvpid
self._d = d
@property
def key(self):
return self._kvpid.key
@property
def index(self):
return self._kvpid.index
@property
def size(self):
return len(self._d['value'])
@property
def value(self):
return self._d['value']
@property
def timestamp(self):
return self._d.get('timestamp')
@timestamp.setter
def timestamp(self, seconds):
self._d['timestamp'] = seconds
@property
def time_to_live(self):
return self._d.get('time_to_live')
@time_to_live.setter
def time_to_live(self, seconds):
self._d['time_to_live'] = seconds
@property
def is_original(self):
return self._d.get('is_original')
@is_original.setter
def is_original(self, b):
self._d['is_original'] = b
@property
def last_update(self):
return self._d.get('last_update')
@last_update.setter
def last_update(self, seconds):
self._d['last_update'] = seconds
class SQLite3Mixin(object):
'''A SQLite 3 mixin class to provide connection management'''
@contextlib.contextmanager
def connection(self):
'''Return a connection context manager'''
if not hasattr(self, '_num_connections'):
self._num_connections = 0
# if self._num_connections:
# _logger.warning('There are %d connections already',
# self._num_connections)
con = sqlite3.connect(self._path, isolation_level='DEFERRED',
detect_types=sqlite3.PARSE_DECLTYPES)
con.row_factory = sqlite3.Row
con.execute('PRAGMA synchronous=NORMAL')
con.execute('PRAGMA journal_mode=WAL')
con.execute('PRAGMA foreign_keys = ON')
self._num_connections += 1
_logger.debug('Begin transaction current=%d', self._num_connections)
try:
with con:
yield con
finally:
self._num_connections -= 1
_logger.debug('End transaction current=%d', self._num_connections)
@property
def database_size(self):
'''The size of the database.
:rtype: :obj:`int`
'''
with self.connection() as con:
cur = con.execute('PRAGMA page_count')
page_count = cur.fetchone()[0]
cur = con.execute('PRAGMA page_size')
page_size = cur.fetchone()[0]
return page_count * page_size
def iter_query(self, query, params=(), limit=1000):
'''Return rows that are fetch in blocks and stored in memory.
This function is useful for iterating the entire database without
blocking other connections.
'''
offset = 0
deque = collections.deque()
while True:
deque.clear()
with self.connection() as con:
cur = con.execute(query.format(limit, offset), params)
for row in cur:
deque.append(row)
if not deque:
break
while True:
try:
yield deque.popleft()
except IndexError:
break
offset += limit
class DatabaseKVPTable(KVPTable, SQLite3Mixin):
'''A KVPTable stored as a SQLite database'''
def __init__(self, path, max_size=2 ** 36):
'''
:param path: A filename to the database.
:param max_size: The maximum database size that the table will grow.
'''
KVPTable.__init__(self)
self._max_size = max_size
self._path = path
self._create_tables()
@property
def max_size(self):
'''The maximum size the table will grow.'''
return self._max_size
@max_size.setter
def max_size(self, s):
self._max_size = s
def _create_tables(self):
with self.connection() as con:
con.execute('CREATE TABLE IF NOT EXISTS kvps ('
'key_id BLOB NOT NULL, index_id BLOB NOT NULL,'
'timestamp INTEGER,'
'time_to_live INTEGER,'
'is_original INTEGER,'
'value BLOB,'
'last_update INTEGER DEFAULT 0,'
'PRIMARY KEY (key_id, index_id))')
def _getitem(self, kvpid):
with self.connection() as con:
cur = con.execute('SELECT value FROM kvps '
'WHERE key_id = ? AND index_id = ? '
'LIMIT 1', (kvpid.key, kvpid.index))
for row in cur:
return row['value']
def _contains(self, kvpid):
with self.connection() as con:
cur = con.execute('SELECT 1 FROM kvps '
'WHERE key_id = ? AND index_id = ? LIMIT 1',
(kvpid.key, kvpid.index))
return True if cur.fetchone() else False
def _setitem(self, kvpid, value):
with self.connection() as con:
params = (value, kvpid.key, kvpid.index)
try:
con.execute('INSERT INTO kvps '
'(value, key_id, index_id) VALUES (?, ?, ?)', params)
except sqlite3.IntegrityError:
con.execute('UPDATE kvps SET value = ? '
'WHERE key_id = ? AND index_id = ?', params)
def keys(self):
query = 'SELECT key_id, index_id FROM kvps LIMIT {} OFFSET {}'
for row in self.iter_query(query):
yield KVPID(KeyBytes(row['key_id']), KeyBytes(row['index_id']))
def indices(self, key):
for row in self.iter_query('SELECT index_id FROM kvps WHERE '
'key_id = ? LIMIT {} OFFSET {}', (key,)):
yield KeyBytes(row['index_id'])
def _delitem(self, kvpid):
with self.connection() as con:
con.execute('DELETE FROM kvps WHERE '
'key_id = ? AND index_id = ?', (kvpid.key, kvpid.index))
def is_acceptable(self, kvpid, size, timestamp):
if kvpid in self and self.record(kvpid).timestamp == timestamp:
return False
if self.database_size + size > self._max_size:
return False
return True
def record(self, kvpid):
return DatabaseKVPRecord(self, kvpid)
def clean(self):
'''Remove expired key-value pairs.'''
_logger.debug('Clean database')
with self.connection() as con:
con.execute('''DELETE FROM kvps WHERE '''
'''timestamp + time_to_live < strftime('%s', 'now')''')
class DatabaseKVPRecord(KVPRecord):
'''The record associated with :class:`DatabaseKVPTable`.'''
__slots__ = ('_table', '_kvpid')
def __init__(self, table, kvpid):
self._table = table
self._kvpid = kvpid
def _get_field(self, name):
with self._table.connection() as con:
cur = con.execute('SELECT {} FROM kvps '
'WHERE key_id = ? AND index_id = ?'.format(name),
(self._kvpid.key, self._kvpid.index))
for row in cur:
return row[0]
def _save_field(self, name, value):
with self._table.connection() as con:
con.execute('UPDATE kvps SET {} = ? '
'WHERE key_id = ? AND index_id = ?'.format(name),
(value, self._kvpid.key, self._kvpid.index))
@property
def key(self):
return self._kvpid.key
@property
def index(self):
return self._kvpid.index
@property
def value(self):
return self._table[self._kvpid]
@property
def size(self):
return len(self.value)
@property
def timestamp(self):
return self._get_field('timestamp')
@timestamp.setter
def timestamp(self, seconds):
self._save_field('timestamp', seconds)
@property
def time_to_live(self):
return self._get_field('time_to_live')
@time_to_live.setter
def time_to_live(self, seconds):
self._save_field('time_to_live', seconds)
@property
def is_original(self):
return self._get_field('is_original')
@is_original.setter
def is_original(self, b):
self._save_field('is_original', b)
@property
def last_update(self):
return self._get_field('last_update')
@last_update.setter
def last_update(self, seconds):
self._save_field('last_update', seconds)
class ReadOnlyTableError(Exception):
'''This error is raised when the table does support storing values.'''
pass
class CollectionInfoTypes(object):
'''Types of CollectionInfo file types'''
DUMMY, BYTESTAG, BITTORRENT = range(3)
BYTESTAG_COOKIE = b'{"!":"BytestagCollectionInfo"'
class SharedFilesKVPTable(KVPTable, SQLite3Mixin):
'''Provides a KVPTable interface to shared files split into pieces.'''
def __init__(self, path):
'''
:param path: The filename of the database.
'''
KVPTable.__init__(self)
self._path = path
self._shared_directories = []
self._create_tables()
def _create_tables(self):
with self.connection() as con:
con.execute('CREATE TABLE IF NOT EXISTS files ('
'id INTEGER PRIMARY KEY,'
'filename TEXT NOT NULL UNIQUE,'
'key BLOB NOT NULL,'
'`index` BLOB NOT NULL,'
'size INTEGER NOT NULL,'
'mtime INTEGER NOT NULL,'
'part_size INTEGER NOT NULL,'
'last_update INTEGER DEFAULT 0,'
'file_hash_info BLOB NOT NULL)'
)
con.execute('CREATE TABLE IF NOT EXISTS parts ('
'hash_id BLOB PRIMARY KEY,'
'file_id INTEGER NOT NULL,'
'file_offset INTEGER NOT NULL,'
'last_update INTEGER DEFAULT 0,'
'FOREIGN KEY (file_id) REFERENCES files (id)'
'ON DELETE CASCADE'
')')
con.execute('CREATE TABLE IF NOT EXISTS collections ('
'file_id INTEGER PRIMARY KEY,'
'type INTEGER NOT NULL,'
'FOREIGN KEY (file_id) REFERENCES files (id)'
'ON DELETE CASCADE'
')')
con.execute('CREATE INDEX IF NOT EXISTS key ON files (key)')
@property
def shared_directories(self):
'''A list directories to be shared.
Modify the list at your will, but be sure to sure to call
:func:`hash_directories` as file monitoring is not yet supported.
'''
return self._shared_directories
def is_acceptable(self, kvpid, size, timestamp):
return False
def indices(self, key):
if self._contains_part(key):
yield key
for i in self._file_hash_index(key):
yield i
def _file_hash_index(self, key):
for row in self.iter_query('SELECT `index` FROM files '
'WHERE key = ?', (key,)):
yield KeyBytes(row['index'])
def _contains(self, kvpid):
if kvpid.key == kvpid.index:
return self._contains_part(kvpid.key)
return self._contains_file_hash_info(kvpid)
def _contains_part(self, key):
with self.connection() as con:
cur = con.execute('SELECT 1 FROM parts WHERE '
'hash_id = ? ', (key,))
row = cur.fetchone()
if row:
return True
def _contains_file_hash_info(self, kvpid):
with self.connection() as con:
cur = con.execute('SELECT 1 FROM files WHERE '
'key = ? AND `index` = ? ', (kvpid.key, kvpid.index))
row = cur.fetchone()
if row:
return True
def keys(self):
return itertools.chain(self._parts_keys(), self._files_keys())
def _parts_keys(self):
query = 'SELECT hash_id FROM parts LIMIT {} OFFSET {}'
for row in self.iter_query(query):
yield KVPID(KeyBytes(row[0]), KeyBytes(row[0]))
def _files_keys(self):
query = 'SELECT key, `index` FROM files LIMIT {} OFFSET {}'
for row in self.iter_query(query):
yield KVPID(KeyBytes(row[0]), KeyBytes(row[1]))
def _getitem(self, kvpid):
if kvpid.key == kvpid.index:
return self._get_part(kvpid.key)
else:
return self._get_file_hash_info(kvpid)
def _get_part(self, key):
with self.connection() as con:
cur = con.execute('SELECT files.filename,'
'parts.file_offset, files.part_size '
'FROM parts JOIN files '
'ON parts.file_id = files.id '
'WHERE hash_id = ?', (key,))
filename, offset, part_size = cur.fetchone()
with open(filename, 'rb') as f:
f.seek(offset)
return f.read(part_size)
def file_hash_info(self, kvpid):
return FileInfo.from_bytes(self._get_file_hash_info(kvpid))
def _get_file_hash_info(self, kvpid):
with self.connection() as con:
cur = con.execute('SELECT file_hash_info FROM files '
'WHERE key = ? AND `index` = ? LIMIT 1',
(kvpid.key, kvpid.index))
for row in cur:
return row['file_hash_info']
raise IndexError('Not found')
def _delitem(self, kvpid):
raise ReadOnlyTableError()
def _setitem(self, kvpid, value):
raise ReadOnlyTableError()
def record(self, kvpid):
if kvpid.key == kvpid.index:
return SharedFilesRecord(self, kvpid)
else:
return SharedFileHashRecord(self, kvpid)
def hash_directories(self):
'''Hash the directories and populate the table with file info.
:rtype: :class:`SharedFilesHashTask`
'''
task = SharedFilesHashTask(self)
thread = threading.Thread(target=task)
thread.daemon = True
thread.name = 'SharedFilesHashTask'
thread.start()
return task
@property
def num_files(self):
with self.connection() as con:
cur = con.execute('SELECT COUNT(1) FROM files')
return cur.fetchone()[0]
@property
def num_collections(self):
with self.connection() as con:
cur = con.execute('SELECT COUNT(1) FROM collections')
return cur.fetchone()[0]
@property
def total_disk_size(self):
with self.connection() as con:
cur = con.execute('SELECT SUM(size) FROM files')
return cur.fetchone()[0]
class SharedFilesRecord(KVPRecord):
'''The record associated with :class:`SharedFilesKVPTable`.
This record describes a single file on the filesystem.
:see: :class:`SharedFileHashRecord`
'''
__slots__ = ('_table', '_kvpid')
def __init__(self, table, kvpid):
self._table = table
self._kvpid = kvpid
def _get_field(self, name):
with self._table.connection() as con:
cur = con.execute('SELECT {} FROM parts '
'WHERE hash_id = ?'.format(name),
(self._kvpid.key,))
for row in cur:
return row[0]
def _save_field(self, name, value):
with self._table.connection() as con:
con.execute('UPDATE parts SET {} = ? '
'WHERE hash_id = ?'.format(name),
(value, self._kvpid.key))
@property
def key(self):
return self._kvpid.key
@property
def index(self):
return self._kvpid.index
@property
def value(self):
return self._table[self._kvpid]
@property
def size(self):
return len(self.value)
@property
def timestamp(self):
return self.last_update
@timestamp.setter
def timestamp(self, seconds):
raise ReadOnlyTableError()
@property
def time_to_live(self):
return None
@time_to_live.setter
def time_to_live(self, seconds):
raise ReadOnlyTableError()
@property
def is_original(self):
return True
@is_original.setter
def is_original(self, b):
raise ReadOnlyTableError()
@property
def last_update(self):
return self._get_field('last_update')
@last_update.setter
def last_update(self, seconds):
self._save_field('last_update', seconds)
class SharedFileHashRecord(KVPRecord):
'''The record associated with :class:`SharedFilesKVPTable`.
This record describes a single file on the filesystem.
:see: :class:`SharedFileRecord`
'''
__slots__ = ('_table', '_kvpid')
def __init__(self, table, kvpid):
self._table = table
self._kvpid = kvpid
def _get_field(self, name):
with self._table.connection() as con:
cur = con.execute('SELECT {} FROM files '
'WHERE key = ? and `index` = ?'.format(name),
(self._kvpid.key, self._kvpid.index))
for row in cur:
return row[0]
def _save_field(self, name, value):
with self._table.connection() as con:
con.execute('UPDATE files SET {} = ? '
'WHERE key = ? AND `index` = ?'.format(name),
(value, self._kvpid.key, self._kvpid.index))
@property
def key(self):
return self._kvpid.key
@property
def index(self):
return self._kvpid.index
@property
def value(self):
return self._table[self._kvpid]
@property
def size(self):
return len(self.value)
@property
def timestamp(self):
return None
@timestamp.setter
def timestamp(self, seconds):
raise ReadOnlyTableError()
@property
def time_to_live(self):
return None
@time_to_live.setter
def time_to_live(self, seconds):
raise ReadOnlyTableError()
@property
def is_original(self):
return True
@is_original.setter
def is_original(self, b):
raise ReadOnlyTableError()
@property
def last_update(self):
return self._get_field('last_update')
@last_update.setter
def last_update(self, seconds):
self._save_field('last_update', seconds)
@property
def file_hash_info(self):
return self._table.file_hash_info(self._kvpid)
class SharedFilesHashTask(Task):
'''A task that hashes and populates a shared files table.
:ivar progress: a tuple (`str`, `int`) describing the filename and bytes
read.
'''
def _walk_dir(self, path):
'''Walk a directory in a sorted order and yield path, size and mtime'''
# TODO: may run into recursion
for dirpath, dirnames, filenames in os.walk(path, followlinks=True):
dirnames.sort()
for filename in sorted(filenames):
file_path = os.path.join(dirpath, filename)
size = os.path.getsize(file_path)
mtime = int(os.path.getmtime(file_path))
yield file_path, size, mtime
def run(self, table, part_size=2 ** 18):
self._table = table
self._part_size = part_size
for directory in table.shared_directories:
if not self.is_running:
return
self._hash_directory(directory)
if not table.shared_directories:
_logger.info('No directories to hash')
self._clean_database()
self._table.value_changed_observer(None)
def _hash_directory(self, directory):
_logger.info('Hashing directory %s', directory)
for file_path, size, mtime in self._walk_dir(directory):
if not self.is_running:
return
if os.path.isfile(file_path):
self._hash_file(file_path, size, mtime)
def _hash_file(self, path, size, mtime):
self.progress = (path, 0)
with self._table.connection() as con:
cur = con.execute('SELECT id, size, mtime '
'FROM files WHERE '
'filename = ? LIMIT 1', (path,))
for row in cur:
id_, result_size, result_mtime = row
if result_size == size and result_mtime == mtime:
return
con.execute('PRAGMA foreign_keys = ON')
con.execute('DELETE FROM files WHERE id = ?', (id_,))
self._hash_parts(path, size, mtime)
def _hash_parts(self, path, size, mtime):
_logger.info('Hashing file %s', path)
whole_file_hasher = hashlib.sha1()
hashes = []
with open(path, 'rb') as f:
while True:
if not self.is_running:
return
data = f.read(self._part_size)
if not data:
break
self.progress = (path, f.tell())
whole_file_hasher.update(data)
part_hasher = hashlib.sha1(data)
hashes.append(part_hasher.digest())
file_hash = whole_file_hasher.digest()
file_hash_info = FileInfo(file_hash, hashes)
index = hashlib.sha1(file_hash_info.to_bytes()).digest()
with self._table.connection() as con:
cur = con.execute('INSERT INTO files '
'(key, `index`, size, mtime, part_size, filename,'
'file_hash_info) '
'VALUES (?, ? , ? , ? , ?, ?, ?)', (file_hash, index,
size, mtime, self._part_size, path,
file_hash_info.to_bytes()))
row_id = cur.lastrowid
for i in range(len(hashes)):
offset = i * self._part_size
hash_bytes = hashes[i]
self.progress = (path, offset)
try:
con.execute('INSERT INTO parts '
'(hash_id, file_id, file_offset) VALUES '
'(?, ?, ?)', (hash_bytes, row_id, offset))
except sqlite3.IntegrityError:
_logger.exception('Possible duplicate')
collection_type = self._get_collection_type(path)
if collection_type:
con.execute('INSERT INTO collections '
'(file_id, type) VALUES '
'(?, ?)', (row_id, collection_type))
def _get_collection_type(self, path):
cookie_len = len(CollectionInfoTypes.BYTESTAG_COOKIE)
with open(path, 'rb') as f:
data = f.read(cookie_len)
if data.startswith(CollectionInfoTypes.BYTESTAG_COOKIE):
return CollectionInfoTypes.BYTESTAG
if path.endswith('.torrent'):
f.seek(0)
if self._check_bittorrent_file_contents(f):
return CollectionInfoTypes.BITTORRENT
def _check_bittorrent_file_contents(self, f):
data = f.read(1024)
if b'info' in data and b'pieces' in data:
return True
def _clean_database(self):
_logger.info('Cleaning database')
delete_params = []
with self._table.connection() as con:
cur = con.execute('SELECT rowid, filename FROM files')
for row in cur:
rowid, filename = row
if not os.path.exists(filename) \
or not self._is_in_shared_directory(filename):
delete_params.append((rowid,))
with self._table.connection() as con:
con.execute('PRAGMA foreign_keys = ON')
cur = con.executemany('DELETE FROM files WHERE rowid = ?',
delete_params)
def _is_in_shared_directory(self, path):
for shared_dir in self._table._shared_directories:
common_prefix = os.path.commonprefix([shared_dir, path])
if common_prefix in self._table._shared_directories:
return True | PypiClean |
/Gizela-1.0.18.tar.gz/Gizela-1.0.18/gizela/pyplot/FigureLayoutErrEll.py |
from gizela.pyplot.FigureLayoutBase import FigureLayoutBase
from gizela.pyplot.PlotPoint import PlotPoint
import math
class FigureLayoutErrEll(FigureLayoutBase):
"""
layout with error ellipses
designed for GamaLocalData instance
"""
def __init__(self,
axesOri="en",
figScale=None,
errScale=1,
stdev=None,
configFileName=None):
"""
figScale: scale of data in axes
errScale: relative scale of error ellipses
stdev: StandardDeviation instance
configFileName ... name of configuration file
"""
super(FigureLayoutErrEll, self).__init__(axesOri=axesOri,
figScale=figScale,
configFileName=configFileName)
self.errScale = errScale
if stdev is None:
from gizela.data.StandardDeviation import StandardDeviation
self.stdev = StandardDeviation()
else:
self.stdev = stdev
# error ellipse scale circle
#if "errorEllipseScaleCircle" in self.config and figScale is not None:
# from matplotlib.patches import Circle
# if self.config["errorEllipseScaleCircle"]["visible"] == "on":
# # plot scale bar
# radius_norm = self.config["errorEllipseScaleCircle"]["radius"]/100
# offset_norm = self.config["errorEllipseScaleCircle"]["offset"]/100
# radius_real = radius_norm * self.figWidth / figScale
# print "Radius norm:", radius_norm
# print "Radius real:", radius_real
# #: radius in milimeters in real
# exp = round(math.log10(radius_real))
# print "exp", exp
# radius = round(radius_real, int(-exp))
# print "Radius:", radius
# radius_norm = radius * figScale / self.figWidth
# xy = [1 - offset_norm, 1 - offset_norm]
# facecolor="white"
# trnax = self.gca().transAxes
# for i in xrange(4):
# self.gca().add_patch(Circle(xy=xy,
# radius=radius_norm,
# transform=trnax,
# facecolor=facecolor))
def update_(self, gamaLocalData):
"update figure settings according to data"
self.set_axes(gamaLocalData.get_axes_ori())
self.stdev = gamaLocalData.stdev
if self.figScale is not None:
self.set_scale_ratio(self.figScale)
def set_axes_ori(self, axesOri):
self.set_axes(axesOri)
def plot_point_error_ellipse(self, point):
#import sys
#print >>sys.stderr, "errScale: %s" % self.errScale
#print >>sys.stderr, "stdev: %s" % self.stdev
#print >>sys.stderr, "conf_scale_2d(): %s" %\
# self.stdev.get_conf_scale_2d()
PlotPoint.plot_point_error_ellipse(self, point,
self.errScale*self.stdev.get_conf_scale_2d(),
style="errorEllipseStyle")
def plot_point_x_stdev(self, point, x):
PlotPoint.plot_y_stdev(self, x, point.x,
self.errScale*self.stdev.get_conf_scale_1d()*point.stdevx,
style="stdevStyle")
def plot_point_y_stdev(self, point, x):
PlotPoint.plot_y_stdev(self, x, point.y,
self.errScale*self.stdev.get_conf_scale_1d()*point.stdevy,
style="stdevStyle")
def plot_point_z_stdev(self, point, x):
PlotPoint.plot_y_stdev(self, x, point.z,
self.errScale*self.stdev.get_conf_scale_1d()*point.stdevz,
style="stdevStyle")
def plot_point_error_z(self, point):
PlotPoint.plot_y_stdev(self, point.x, point.y,
self.errScale*self.stdev.get_conf_scale_1d()*point.stdevz,
style="errorZStyle")
if __name__ == "__main__":
try:
file = open("../../example/xml-epoch/epoch.adj.xml")
except Exception, e:
print e
print "try to run make in ../../example/xml-epoch/ directory"
import sys
sys.exit()
from gizela.data.GamaLocalDataAdj import GamaLocalDataAdj
adj = GamaLocalDataAdj()
adj.parse_file(file)
print adj
print adj.pointListAdjCovMat.covmat.make_gama_xml()
print adj.stdev
from gizela.data.Network import Network
from gizela.util.CoordSystemLocal3D import CoordSystemLocal3D
c3d = CoordSystemLocal3D()
net = Network(c3d, adj, useApriori=True)
fig = FigureLayoutErrEll(figScale=0.0001, errScale=2e3)
net.plot_point(fig)
fig.show_()
# graph
# test orientation of axes
for ori in ("ne", "en", "se", "es", "sw", "ws", "nw", "wn"):
fig = FigureLayoutErrEll(errScale=2e3)
from matplotlib import pyplot as pl
pl.figtext(0.5, 0.5, ori, fontsize=20)
net.set_axes_ori(ori)
net.plot_point(fig)
#fig.set_scale_ratio(1.0/4000)
#print fig.get_scale_ratio_string_min()
#fig.save_as("errell.png")
fig.show_() | PypiClean |
/Messenger_server_dmitry_vokh-1.0.0.tar.gz/Messenger_server_dmitry_vokh-1.0.0/server/main_window.py | from PyQt5.QtWidgets import QMainWindow, QAction, qApp, QApplication, QLabel, QTableView
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtCore import QTimer
from server.stat_window import StatWindow
from server.config_window import ConfigWindow
from server.add_user import RegisterUser
from server.remove_user import DelUserDialog
class MainWindow(QMainWindow):
'''Класс - основное окно сервера.'''
def __init__(self, database, server, config):
# Конструктор предка
super().__init__()
# База данных сервера
self.database = database
self.server_thread = server
self.config = config
# Ярлык выхода
self.exitAction = QAction('Выход', self)
self.exitAction.setShortcut('Ctrl+Q')
self.exitAction.triggered.connect(qApp.quit)
# Кнопка обновить список клиентов
self.refresh_button = QAction('Обновить список', self)
# Кнопка настроек сервера
self.config_btn = QAction('Настройки сервера', self)
# Кнопка регистрации пользователя
self.register_btn = QAction('Регистрация пользователя', self)
# Кнопка удаления пользователя
self.remove_btn = QAction('Удаление пользователя', self)
# Кнопка вывести историю сообщений
self.show_history_button = QAction('История клиентов', self)
# Статусбар
self.statusBar()
self.statusBar().showMessage('Server Working')
# Тулбар
self.toolbar = self.addToolBar('MainBar')
self.toolbar.addAction(self.exitAction)
self.toolbar.addAction(self.refresh_button)
self.toolbar.addAction(self.show_history_button)
self.toolbar.addAction(self.config_btn)
self.toolbar.addAction(self.register_btn)
self.toolbar.addAction(self.remove_btn)
# Настройки геометрии основного окна
# Поскольку работать с динамическими размерами мы не умеем, и мало
# времени на изучение, размер окна фиксирован.
self.setFixedSize(800, 600)
self.setWindowTitle('Messaging Server alpha release')
# Надпись о том, что ниже список подключённых клиентов
self.label = QLabel('Список подключённых клиентов:', self)
self.label.setFixedSize(240, 15)
self.label.move(10, 25)
# Окно со списком подключённых клиентов.
self.active_clients_table = QTableView(self)
self.active_clients_table.move(10, 45)
self.active_clients_table.setFixedSize(780, 400)
# Таймер, обновляющий список клиентов 1 раз в секунду
self.timer = QTimer()
self.timer.timeout.connect(self.create_users_model)
self.timer.start(1000)
# Связываем кнопки с процедурами
self.refresh_button.triggered.connect(self.create_users_model)
self.show_history_button.triggered.connect(self.show_statistics)
self.config_btn.triggered.connect(self.server_config)
self.register_btn.triggered.connect(self.reg_user)
self.remove_btn.triggered.connect(self.rem_user)
# Последним параметром отображаем окно.
self.show()
def create_users_model(self):
'''Метод заполняющий таблицу активных пользователей.'''
list_users = self.database.active_users_list()
list = QStandardItemModel()
list.setHorizontalHeaderLabels(
['Имя Клиента', 'IP Адрес', 'Порт', 'Время подключения'])
for row in list_users:
user, ip, port, time = row
user = QStandardItem(user)
user.setEditable(False)
ip = QStandardItem(ip)
ip.setEditable(False)
port = QStandardItem(str(port))
port.setEditable(False)
# Уберём милисекунды из строки времени, т.к. такая точность не
# требуется.
time = QStandardItem(str(time.replace(microsecond=0)))
time.setEditable(False)
list.appendRow([user, ip, port, time])
self.active_clients_table.setModel(list)
self.active_clients_table.resizeColumnsToContents()
self.active_clients_table.resizeRowsToContents()
def show_statistics(self):
'''Метод создающий окно со статистикой клиентов.'''
global stat_window
stat_window = StatWindow(self.database)
stat_window.show()
def server_config(self):
'''Метод создающий окно с настройками сервера.'''
global config_window
# Создаём окно и заносим в него текущие параметры
config_window = ConfigWindow(self.config)
def reg_user(self):
'''Метод создающий окно регистрации пользователя.'''
global reg_window
reg_window = RegisterUser(self.database, self.server_thread)
reg_window.show()
def rem_user(self):
'''Метод создающий окно удаления пользователя.'''
global rem_window
rem_window = DelUserDialog(self.database, self.server_thread)
rem_window.show() | PypiClean |
/MRFI-2.0.0.tar.gz/MRFI-2.0.0/docs/basic_faultinjection.md | # Fault injection on LeNet
## A coarse-grained configuation fault inject experiment
For example, the following code perform a quantized random integer bit flip injection on LeNet.
```python title="Setup LeNet default fault injection"
from dataset.lenet_cifar import make_testloader, LeNet
from mrfi import MRFI, EasyConfig
from mrfi.experiment import Acc_experiment, Acc_golden, BER_Acc_experiment
testloader = make_testloader(1000, batch_size = 128) # test on 1000 cifar-10 images
# Create fault inject model
fi_model = MRFI(LeNet(trained=True).eval(),
EasyConfig.load_preset('default_fi'))
# fi_model can be used as regular PyTorch model
print(fi_model(torch.zeros(1,3,32,32)).shape)
```
```python title="Simple fault injection acccuracy experiment"
# Test accuracy under fault injection with select rate = 1e-3,
# which specified in "easyconfigs/default_fi"
print('FI Acc: ', Acc_experiment(fi_model, dataloader))
# Test accuracy w/o fault inject
with fi_model.golden_run():
print('golden run Acc: ', Acc_experiment(fi_model, dataloader))
# Another way to get golden run accuracy
print('golden run Acc: ', Acc_golden(fi_model, dataloader))
```
Find the relation between bit error rate (BER) and classification accuracy.
```python title="BER_Acc_experiment"
# Get selector handler because BER_Acc_experiment needs to modify selection rate in experiment
selector_cfg = fi_model.get_activation_configs('selector')
BER, Acc = BER_Acc_experiment(fi_model, selector_cfg,
make_testloader(1000, batch_size = 128),
[1e-6, 1e-5, 1e-4, 1e-3])
```
| PypiClean |
/EmbyServerAPI-2.1.0-py3-none-any.whl/embyapi/rest.py | from __future__ import absolute_import
import io
import json
import logging
import re
import ssl
import certifi
# python 2 and python 3 compatibility library
import six
from six.moves.urllib.parse import urlencode
try:
import urllib3
except ImportError:
raise ImportError('Swagger python client requires urllib3.')
logger = logging.getLogger(__name__)
class RESTResponse(io.IOBase):
def __init__(self, resp):
self.urllib3_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = resp.data
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
class RESTClientObject(object):
def __init__(self, configuration, pools_size=4, maxsize=None):
# urllib3.PoolManager will pass all kw parameters to connectionpool
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501
# maxsize is the number of requests to host that are allowed in parallel # noqa: E501
# Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501
# cert_reqs
if configuration.verify_ssl:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
# ca_certs
if configuration.ssl_ca_cert:
ca_certs = configuration.ssl_ca_cert
else:
# if not set certificate file, use Mozilla's root certificates.
ca_certs = certifi.where()
addition_pool_args = {}
if configuration.assert_hostname is not None:
addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501
if maxsize is None:
if configuration.connection_pool_maxsize is not None:
maxsize = configuration.connection_pool_maxsize
else:
maxsize = 4
# https pool manager
if configuration.proxy:
self.pool_manager = urllib3.ProxyManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
proxy_url=configuration.proxy,
**addition_pool_args
)
else:
self.pool_manager = urllib3.PoolManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
**addition_pool_args
)
def request(self, method, url, query_params=None, headers=None,
body=None, post_params=None, _preload_content=True,
_request_timeout=None):
"""Perform requests.
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
'PATCH', 'OPTIONS']
if post_params and body:
raise ValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int, ) if six.PY3 else (int, long)): # noqa: E501,F821
timeout = urllib3.Timeout(total=_request_timeout)
elif (isinstance(_request_timeout, tuple) and
len(_request_timeout) == 2):
timeout = urllib3.Timeout(
connect=_request_timeout[0], read=_request_timeout[1])
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
if query_params:
url += '?' + urlencode(query_params)
if re.search('json', headers['Content-Type'], re.IGNORECASE):
request_body = '{}'
if body is not None:
request_body = json.dumps(body)
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=False,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct
# Content-Type which generated by urllib3 will be
# overwritten.
del headers['Content-Type']
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=True,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
# Pass a `string` parameter directly in the body to support
# other content types than Json when `body` argument is
# provided in serialized form
elif isinstance(body, (str, bytes)):
request_body = body
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided
arguments. Please check that your arguments match
declared content type."""
raise ApiException(status=0, reason=msg)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(method, url,
fields=query_params,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
# log response body
logger.debug("response body: %s", r.data)
if not 200 <= r.status <= 299:
raise ApiException(http_resp=r)
return r
def GET(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("GET", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def HEAD(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("HEAD", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def OPTIONS(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def DELETE(self, url, headers=None, query_params=None, body=None,
_preload_content=True, _request_timeout=None):
return self.request("DELETE", url,
headers=headers,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def POST(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("POST", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PUT(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PUT", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PATCH(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PATCH", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
class ApiException(Exception):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""Custom error messages for exception"""
error_message = "({0})\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(
self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message | PypiClean |
/DjangoKit-0.13.tar.gz/DjangoKit-0.13/djangokit/templatetags/navigation.py | from django import template
from django.urls import reverse
from django.utils.crypto import get_random_string
register = template.Library()
@register.simple_tag
def active(request, *urls, **kwargs):
for url in urls:
# Any a roots does not gets under activity.
# Namespaces like 'blabla:index' (1 level) does not gets too.
# But compound, like 'blabla:trololo:index' (2 level and more) gets
# under activity.
if (url != '/' and not url.endswith(':index')) or url.count(':') > 1:
if request.path.startswith(reverse(url, kwargs=kwargs)):
return "active"
return ""
@register.simple_tag
def active_equal(request, url, **kwargs):
if request.path == reverse(url, kwargs=kwargs):
return "active"
return ""
@register.simple_tag
def addparams(request, **kwargs):
q = request.GET.copy()
for k, v in kwargs.items():
v = str(v)
if v:
q[k] = v
else:
q.pop(k, None)
if q:
return '?%s' % q.urlencode()
return ''
@register.simple_tag
def toggleparams(request, **kwargs):
q = request.GET.copy()
for k, v in kwargs.items():
if k in q:
del q[k]
else:
q[k] = str(v)
if q:
return '?%s' % q.urlencode()
return ''
@register.simple_tag
def delparams(request, *params):
q = request.GET.copy()
for name in params:
q.pop(name, None)
if q:
return '?%s' % q.urlencode()
return ''
def page_range_dots(page, on_each_side=3, on_ends=2, dot='.'):
number = page.number
paginator = page.paginator
num_pages = paginator.num_pages
if num_pages > 9:
if number > (on_each_side + on_ends):
page_range = [
*range(1, on_each_side),
dot,
*range(number + 1 - on_each_side, number + 1),
]
else:
page_range = list(range(1, number + 1))
if number < (num_pages - on_each_side - on_ends + 1):
page_range.extend([
*range(number + 1, number + on_each_side),
dot,
*range(num_pages - on_ends + 1, num_pages + 1),
])
else:
page_range.extend(range(number + 1, num_pages + 1))
else:
page_range = paginator.page_range
return page_range
@register.filter
def make_page_range(page):
return page_range_dots(page)
@register.filter
def split(s, sep=','):
return s.split(sep)
@register.filter
def guid(s, length=12):
return '%s-%s' % (s, get_random_string(length)) | PypiClean |
/ConfigIt-1.1.tar.gz/ConfigIt-1.1/configit.py | import inspect
import os
import sys
__version__ = 1.1
version = __version__
try:
unicode
except NameError:
# Python 3
basestring = unicode = str
class ConfigDict(dict):
'''
Dictionary supporting attribute read & write access.
:raise AttributeError: Attempted to read an invalid attribute
'''
def __getattr__(self, key):
try:
return self[key]
except KeyError:
# to conform with __getattr__ spec
raise AttributeError(key)
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
del self[key]
def use(conf_source):
'''
Include configurations in local configuration dictionary.
:param conf_source: The path string or
module to include in configuration.
'''
if isinstance(conf_source, basestring):
conf_dict = conf_from_file(conf_source)
elif inspect.ismodule(conf_source):
conf_dict = conf_from_module(conf_source)
_locals_dict = sys._getframe(1).f_locals
_locals_dict.update(conf_dict)
def conf_from_module(module):
'''
Creates a configuration dictionary from a module.
:param module: The module, either as a string or the module itself.
'''
if isinstance(module, str):
module = import_module(module)
module_dict = dict(inspect.getmembers(module))
conf_dict = conf_from_dict(module_dict)
return conf_dict
def conf_from_file(filepath):
'''
Creates a configuration dictionary from a file.
:param filepath: The path to the file.
'''
abspath = os.path.abspath(os.path.expanduser(filepath))
conf_dict = {}
try:
exec(compile(open(abspath).read(), abspath, 'exec'),
globals(), conf_dict)
except IOError as ioerror:
raise IOError('Error while trying to get configuration from file: '
'%s\n'
'%s' % (abspath, ioerror))
conf_dict = conf_from_dict(conf_dict)
conf_dict['__config_file__'] = abspath
return conf_dict
def conf_from_dict(conf_dict):
'''
Creates a configuration dictionary from a dictionary.
:param dictionary: The dictionary.
'''
conf = ConfigDict()
for k, v in conf_dict.items():
if k.startswith('__'):
continue
if inspect.ismodule(v):
continue
if isinstance(v, dict):
v = conf_from_dict(v)
conf[k] = v
return conf
def import_module(conf):
'''
Imports the configuration as a module.
:param conf: The string to the configuration.
Automatically strips off ".py" file extensions.
'''
if '.' in conf:
parts = conf.split('.')
name = '.'.join(parts[:-1])
fromlist = parts[-1:]
try:
module = __import__(name, fromlist=fromlist)
conf_mod = getattr(module, parts[-1])
except AttributeError:
raise ImportError('No module named %s' % conf)
else:
conf_mod = __import__(conf)
return conf_mod | PypiClean |
/BittyTax-0.5.1.tar.gz/BittyTax-0.5.1/src/bittytax/conv/parsers/hitbtc.py |
import copy
import sys
from decimal import Decimal
from colorama import Fore
from ...config import config
from ..dataparser import DataParser
from ..exceptions import DataRowError, UnexpectedTypeError
from ..out_record import TransactionOutRecord
WALLET = "HitBTC"
def parse_hitbtc_trades_v2(data_rows, parser, **_kwargs):
for row_index, data_row in enumerate(data_rows):
if config.debug:
sys.stderr.write(
"%sconv: row[%s] %s\n"
% (Fore.YELLOW, parser.in_header_row_num + data_row.line_num, data_row)
)
if data_row.parsed:
continue
try:
_parse_hitbtc_trades_row(data_rows, parser, data_row, row_index)
except DataRowError as e:
data_row.failure = e
def _parse_hitbtc_trades_row(data_rows, parser, data_row, row_index):
row_dict = data_row.row_dict
data_row.timestamp = DataParser.parse_timestamp(row_dict["Date (UTC)"])
data_row.parsed = True
# Negative fees are rebates, add as gift-received
if Decimal(row_dict["Fee"]) < 0:
dup_data_row = copy.copy(data_row)
dup_data_row.row = []
dup_data_row.t_record = TransactionOutRecord(
TransactionOutRecord.TYPE_GIFT_RECEIVED,
data_row.timestamp,
buy_quantity=abs(Decimal(row_dict["Fee"])),
buy_asset=row_dict["Instrument"].split("/")[1],
wallet=WALLET,
)
data_rows.insert(row_index + 1, dup_data_row)
fee_quantity = 0
else:
fee_quantity = row_dict["Fee"]
fee_asset = row_dict["Instrument"].split("/")[1]
if row_dict["Side"] == "buy":
data_row.t_record = TransactionOutRecord(
TransactionOutRecord.TYPE_TRADE,
data_row.timestamp,
buy_quantity=row_dict["Quantity"],
buy_asset=row_dict["Instrument"].split("/")[0],
sell_quantity=Decimal(row_dict["Quantity"]) * Decimal(row_dict["Price"]),
sell_asset=row_dict["Instrument"].split("/")[1],
fee_quantity=fee_quantity,
fee_asset=fee_asset,
wallet=WALLET,
)
elif row_dict["Side"] == "sell":
data_row.t_record = TransactionOutRecord(
TransactionOutRecord.TYPE_TRADE,
data_row.timestamp,
buy_quantity=Decimal(row_dict["Quantity"]) * Decimal(row_dict["Price"]),
buy_asset=row_dict["Instrument"].split("/")[1],
sell_quantity=row_dict["Quantity"],
sell_asset=row_dict["Instrument"].split("/")[0],
fee_quantity=fee_quantity,
fee_asset=fee_asset,
wallet=WALLET,
)
else:
raise UnexpectedTypeError(parser.in_header.index("Side"), "Side", row_dict["Side"])
def parse_hitbtc_trades_v1(data_row, parser, **_kwargs):
row_dict = data_row.row_dict
data_row.timestamp = DataParser.parse_timestamp(row_dict["Date (UTC)"])
if row_dict["Side"] == "buy":
data_row.t_record = TransactionOutRecord(
TransactionOutRecord.TYPE_TRADE,
data_row.timestamp,
buy_quantity=row_dict["Quantity"],
buy_asset=row_dict["Instrument"].split("/")[0],
sell_quantity=Decimal(row_dict["Volume"]) - Decimal(row_dict["Rebate"]),
sell_asset=row_dict["Instrument"].split("/")[1],
fee_quantity=row_dict["Fee"],
fee_asset=row_dict["Instrument"].split("/")[1],
wallet=WALLET,
)
elif row_dict["Side"] == "sell":
data_row.t_record = TransactionOutRecord(
TransactionOutRecord.TYPE_TRADE,
data_row.timestamp,
buy_quantity=Decimal(row_dict["Volume"]) + Decimal(row_dict["Rebate"]),
buy_asset=row_dict["Instrument"].split("/")[1],
sell_quantity=row_dict["Quantity"],
sell_asset=row_dict["Instrument"].split("/")[0],
fee_quantity=row_dict["Fee"],
fee_asset=row_dict["Instrument"].split("/")[1],
wallet=WALLET,
)
else:
raise UnexpectedTypeError(parser.in_header.index("Side"), "Side", row_dict["Side"])
def parse_hitbtc_deposits_withdrawals_v2(data_row, _parser, **_kwargs):
row_dict = data_row.row_dict
data_row.timestamp = DataParser.parse_timestamp(row_dict["Date (UTC)"])
# Looks like a bug in the exporter, Withdrawals are blank
# failed transactions have no transaction hash
if row_dict["Type"] in ("Withdraw", "") and row_dict["Transaction hash"] != "":
data_row.t_record = TransactionOutRecord(
TransactionOutRecord.TYPE_WITHDRAWAL,
data_row.timestamp,
sell_quantity=abs(Decimal(row_dict["Amount"])),
sell_asset=row_dict["Currency"].upper(),
wallet=WALLET,
)
elif row_dict["Type"] == "Deposit" and row_dict["Transaction hash"] != "":
data_row.t_record = TransactionOutRecord(
TransactionOutRecord.TYPE_DEPOSIT,
data_row.timestamp,
buy_quantity=row_dict["Amount"],
buy_asset=row_dict["Currency"].upper(),
wallet=WALLET,
)
def parse_hitbtc_deposits_withdrawals_v1(data_row, _parser, **_kwargs):
row_dict = data_row.row_dict
data_row.timestamp = DataParser.parse_timestamp(row_dict["Date (UTC)"])
if row_dict["Type"] == "Withdraw":
data_row.t_record = TransactionOutRecord(
TransactionOutRecord.TYPE_WITHDRAWAL,
data_row.timestamp,
sell_quantity=abs(Decimal(row_dict["Amount"])),
sell_asset=data_row.row[6],
wallet=WALLET,
)
elif row_dict["Type"] == "Deposit":
data_row.t_record = TransactionOutRecord(
TransactionOutRecord.TYPE_DEPOSIT,
data_row.timestamp,
buy_quantity=row_dict["Amount"],
buy_asset=data_row.row[6],
wallet=WALLET,
)
DataParser(
DataParser.TYPE_EXCHANGE,
"HitBTC Trades",
[
"Email",
"Date (UTC)",
"Instrument",
"Trade ID",
"Order ID",
"Side",
"Quantity",
"Price",
"Volume",
"Fee",
"Rebate",
"Total",
"Taker",
],
worksheet_name="HitBTC T",
all_handler=parse_hitbtc_trades_v2,
)
DataParser(
DataParser.TYPE_EXCHANGE,
"HitBTC Trades",
[
"Email",
"Date (UTC)",
"Instrument",
"Trade ID",
"Order ID",
"Side",
"Quantity",
"Price",
"Volume",
"Fee",
"Rebate",
"Total",
],
worksheet_name="HitBTC T",
all_handler=parse_hitbtc_trades_v2,
)
DataParser(
DataParser.TYPE_EXCHANGE,
"HitBTC Trades",
[
"Date (UTC)",
"Instrument",
"Trade ID",
"Order ID",
"Side",
"Quantity",
"Price",
"Volume",
"Fee",
"Rebate",
"Total",
],
worksheet_name="HitBTC T",
row_handler=parse_hitbtc_trades_v1,
)
DataParser(
DataParser.TYPE_EXCHANGE,
"HitBTC Deposits/Withdrawals",
[
"Email",
"Date (UTC)",
"Operation id",
"Type",
"Amount",
"Transaction hash",
"Main account balance",
"Currency",
],
worksheet_name="HitBTC D,W",
row_handler=parse_hitbtc_deposits_withdrawals_v2,
)
DataParser(
DataParser.TYPE_EXCHANGE,
"HitBTC Deposits/Withdrawals",
[
"Date (UTC)",
"Operation id",
"Type",
"Amount",
"Transaction Hash",
"Main account balance",
],
worksheet_name="HitBTC D,W",
row_handler=parse_hitbtc_deposits_withdrawals_v1,
) | PypiClean |
/CheckM2-1.0.1.tar.gz/CheckM2-1.0.1/checkm2/modelProcessing.py | import warnings
warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=DeprecationWarning)
import pandas as pd
warnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning)
from checkm2.defaultValues import DefaultValues
# import xgboost as xgb
import lightgbm as lgb
import os
#make sure we're only using CPUs as GPUs can throw weird errors and is not worth the minor speed advantage
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
from tensorflow import keras
from sklearn.preprocessing import MinMaxScaler
import pickle
import logging
import sys
import os
class modelProcessor:
def __init__(self, threads):
self.nthreads = threads
try:
self.general_model_comp = lgb.Booster(model_file=DefaultValues.GENERAL_MODEL_COMP_LOCATION)
self.model_cont = lgb.Booster(model_file=DefaultValues.MODEL_CONT_LOCATION)
self.specific_model_comp_nn = keras.models.load_model(DefaultValues.SPECIFIC_MODEL_COMP_LOCATION)
self.minmax_scaler = pickle.load(open(DefaultValues.SCALER_FILE_LOCATION, 'rb'))
if logging.root.level == logging.DEBUG:
self.verbosity = 1
else:
self.verbosity = 0
except Exception as e:
logging.error("Saved models could not be loaded: {}".format(e))
sys.exit(1)
def run_prediction_general(self, vector_array):
#TODO: make sure runs on 1 sample
comp_predictions = self.general_model_comp.predict(vector_array, n_jobs=self.nthreads)
comp_predictions[comp_predictions > 100] = 100
cont_predictions = self.model_cont.predict(vector_array, n_jobs=self.nthreads)
comp_predictions[comp_predictions < 0] = 0
cont_predictions[cont_predictions < 0] = 0
return comp_predictions.flatten(), cont_predictions.flatten()
def run_prediction_specific(self, vector_array, specific_model_vector_len):
scaled_vector = self.minmax_scaler.transform(vector_array)
# re-shape into keras-cnn-appropriate array
scaled_vector = scaled_vector.reshape(scaled_vector.shape[0], scaled_vector.shape[1], 1)
# only using genes for specific predictions
comp_predictions = self.specific_model_comp_nn.predict(scaled_vector[:, :specific_model_vector_len],
verbose=self.verbosity)
# as we're using sigmoid output for completeness model, convert to 100-scale
comp_predictions = comp_predictions * 100
comp_predictions[comp_predictions < 0] = 0
return comp_predictions.flatten(), scaled_vector.reshape(scaled_vector.shape[0], scaled_vector.shape[1]) | PypiClean |
/DoorPi-2.4.1.8.tar.gz/DoorPi-2.4.1.8/doorpi/status/requirements_lib/req_config.py |
import logging
logger = logging.getLogger(__name__)
logger.debug("%s loaded", __name__)
REQUIREMENT = dict(
fulfilled_with_one = True,
text_description = '',
events = [
#dict( name = 'Vorlage', description = ''),
],
configuration = [
#dict( section = 'DoorPi', key = 'eventlog', type = 'string', default = '!BASEPATH!/conf/eventlog.db', mandatory = False, description = 'Ablageort der SQLLite Datenbank für den Event-Handler.'),
],
libraries = dict(
ConfigParser = dict(
text_warning = '',
text_description = 'Das Config-Modul wird benötigt um alle Einstellungen in einer Datei abspeichern und später wieder laden zu können.',
text_installation = 'Eine Installation ist nicht nötig, da es sich hierbei um eine Python-Standard-Modul handelt.',
auto_install = False,
text_test = 'Der Status kann gestestet werden, in dem im Python-Interpreter <code>import ConfigParser</code> eingeben wird.',
text_configuration = '''Eine Konfiguration als Eintrag in der Konfigurationsdatei macht logischerweise keinen Sinn.
Deshalb kann die zu nutzende Config-Datei als Parameter (--configfile) beim DoorPi Start mitgegeben werden. Beispiel:
<code>sudo /home/DoorPi/doorpi/main.py --configfile /home/DoorPi/conf/doorpi.ini</code>
Wenn der Parameter wegelassen wird, sucht der ConfigParser automatisch nach folgenden Dateien (wobei !BASEPATH! das Home-Verzeichnis von DoorPi ist)
<ol>
<li>!BASEPATH!/conf/doorpi.ini</li>
<li>!BASEPATH!/conf/doorpi.cfg</li>
<li>!BASEPATH!\conf\doorpi.ini</li>
<li>!BASEPATH!\conf\doorpi.cfg</li>
</ol>
Sollte keine Datei vorhanden sein, so wird mit default-Werten versucht DoorPi zum Laufen zu bringen und die Config-Datei als erster möglicher Eintrag abzuspeichern.
''',
configuration = [
#dict( section = 'DoorPi', key = 'eventlog', type = 'string', default = '!BASEPATH!/conf/eventlog.db', mandatory = False, description = 'Ablageort der SQLLite Datenbank für den Event-Handler.')
],
text_links = {
'docs.python.org': 'https://docs.python.org/2.7/library/configparser.html'
}
)
)
) | PypiClean |
/Kunai-0.9.tar.gz/Kunai-0.9/kunai/cluster.py | import os
import sys
import socket
import json
import uuid
import imp
import threading
import time
import random
import math
import shutil
import hashlib
import signal
import traceback
import cStringIO
import bisect
import requests as rq
import shlex
import subprocess
import tempfile
import tarfile
import base64
import shutil
import glob
import zlib
import re
import copy
import cPickle
# for mail handler
import smtplib
import datetime
try:
import jinja2
except ImportError:
jinja2 = None
try:
from Crypto.Cipher import AES
except ImportError:
AES = None
# Cannot take RSA from Crypto because on centos6 the version
# is just toooooo old :(
try:
import rsa as RSA
except ImportError:
RSA = None
# DO NOT FORGEET:
# sysctl -w net.core.rmem_max=26214400
from kunai.log import logger
from kunai.kv import KVBackend
from kunai.dnsquery import DNSQuery
from kunai.ts import TSListener
from kunai.wsocket import WebSocketBackend
from kunai.util import make_dir, copy_dir, get_public_address
from kunai.threadmgr import threader
from kunai.perfdata import PerfDatas
from kunai.now import NOW
from kunai.gossip import Gossip
from kunai.generator import Generator
# now singleton objects
from kunai.websocketmanager import websocketmgr
from kunai.broadcast import broadcaster
from kunai.httpdaemon import httpdaemon, route, error, response, request, abort, gserver
from kunai.pubsub import pubsub
from kunai.dockermanager import dockermgr
from kunai.encrypter import encrypter
from kunai.version import VERSION
from kunai.stop import stopper
REPLICATS = 1
#LIMIT= 4 * math.ceil(math.log10(float(2 + 1)))
class Cluster(object):
parameters = {
'port': {'type':'int', 'mapto':'port'},
'datacenters': {'type':'list', 'mapto':'datacenters'},
'data': {'type':'path', 'mapto':'data_dir'},
'libexec': {'type':'path', 'mapto':'libexec_dir'},
'log': {'type':'path', 'mapto':'log_dir'},
'lock': {'type':'path', 'mapto':'lock_path'},
'socket': {'type':'path', 'mapto':'socket_path'},
'log_level': {'type':'string', 'mapto':'log_level'},
'bootstrap': {'type':'bool', 'mapto':'bootstrap'},
'seeds': {'type':'list', 'mapto':'seeds'},
'tags': {'type':'list', 'mapto':'tags'},
'encryption_key': {'type':'string', 'mapto':'encryption_key'},
'master_key_priv': {'type':'string', 'mapto':'master_key_priv'},
'master_key_pub': {'type':'string', 'mapto':'master_key_pub'},
}
def __init__(self, port=6768, name='', bootstrap=False, seeds='', tags='', cfg_dir='', libexec_dir=''):
self.set_exit_handler()
# Launch the now-update thread
NOW.launch()
# This will be the place where we will get our configuration data
self.cfg_data = {}
self.checks = {}
self.services = {}
self.generators = {}
self.handlers = {}
# keep a list of the checks names that match our tags
self.active_checks = []
# graphite and statsd objects
self.graphite = None
self.statsd = None
self.websocket = None
self.dns = None
# Some default value that can be erased by the
# main configuration file
# By default no encryption
self.encryption_key = ''
# Same for public/priv for the master fucking key
self.master_key_priv = '' # Paths
self.master_key_pub = ''
self.mfkey_priv = None # real key objects
self.mfkey_pub = None
self.port = port
self.name = name
if not self.name:
self.name = '%s' % socket.gethostname()
self.tags = [s.strip() for s in tags.split(',') if s.strip()]
self.interrupted = False
self.bootstrap = bootstrap
self.seeds = [s.strip() for s in seeds.split(',')]
# By default, we are alive :)
self.state = 'alive'
self.addr = get_public_address()#socket.gethostname()
self.listening_addr = '0.0.0.0'
#self.broadcasts = []
self.data_dir = os.path.abspath('/var/lib/kunai/')
self.log_dir = '/var/log/kunai'
self.lock_path = '/var/run/kunai.lock'
self.libexec_dir = '/var/lib/kunai/libexec'
self.socket_path = '$data$/kunai.sock'
self.log_level = 'INFO'
# Now look at the cfg_dir part
if cfg_dir:
self.cfg_dir = os.path.abspath(cfg_dir)
else:
self.cfg_dir = '/etc/kunai'
if not os.path.exists(self.cfg_dir):
logger.error('Configuration directory is missing')
sys.exit(2)
self.load_cfg_dir()
# Configure the logger with its new level if need
logger.setLevel(self.log_level)
# For the path inside the configuration we must
# string replace $data$ by the good value if it's set
parameters = self.__class__.parameters
for (k, d) in parameters.iteritems():
if d['type'] == 'path':
mapto = d['mapto']
v = getattr(self, mapto).replace('$data$', self.data_dir)
setattr(self, mapto, v)
# We can start with a void data dir
if not os.path.exists(self.data_dir):
os.mkdir(self.data_dir)
# We can start with a void log dir too
if not os.path.exists(self.log_dir):
os.mkdir(self.log_dir)
# open the log file
logger.load(self.log_dir, self.name)
# Look if our encryption key is valid or not
if self.encryption_key:
if AES is None:
logger.error('You set an encryption key but cannot import python-crypto module, please install it. Exiting.')
sys.exit(2)
try:
self.encryption_key = base64.b64decode(self.encryption_key)
except ValueError:
logger.warning('The encryption key is invalid, not in base64 format')
# todo: exit or no exit?
# and load the encryption key in the global encrypter object
encrypter.load(self.encryption_key)
# Same for master fucking key PRIVATE
if self.master_key_priv:
if not os.path.isabs(self.master_key_priv):
self.master_key_priv = os.path.join(self.cfg_dir, self.master_key_priv)
if not os.path.exists(self.master_key_priv):
logger.error('Cannot find the master key private file at %s' % self.master_key_priv)
if RSA is None:
logger.error('You set a master private key but but cannot import rsa module, please install it. Exiting.')
sys.exit(2)
buf = ''
with open(self.master_key_priv, 'r') as f:
buf = f.read()
try:
self.mfkey_priv = RSA.PrivateKey.load_pkcs1(buf)
except Exception, exp:
logger.error('Invalid master private key at %s. (%s) Exiting.' % (self.master_key_priv, exp))
sys.exit(2)
logger.info('Master private key file %s is loaded' % self.master_key_priv)
# Same for master fucking key PUBLIC
if self.master_key_pub:
if not os.path.isabs(self.master_key_pub):
self.master_key_pub = os.path.join(self.cfg_dir, self.master_key_pub)
if not os.path.exists(self.master_key_pub):
logger.error('Cannot find the master key public file at %s' % self.master_key_pub)
if RSA is None:
logger.error('You set a master public key but but cannot import rsa module, please install it. Exiting.')
sys.exit(2)
buf = ''
with open(self.master_key_pub, 'r') as f:
buf = f.read()
try:
self.mfkey_pub = RSA.PublicKey.load_pkcs1(buf)
except Exception, exp:
logger.error('Invalid master public key at %s. (%s) Exiting.' % (self.master_key_pub, exp))
sys.exit(2)
logger.info('Master public key file %s is loaded' % self.master_key_pub)
# Open the retention data about our pevious runs
self.incarnation_file = os.path.join(self.data_dir, 'incarnation')
self.server_key_file = os.path.join(self.data_dir, 'server.key')
self.nodes_file = os.path.join(self.data_dir, 'nodes.json')
self.check_retention = os.path.join(self.data_dir, 'checks.dat')
self.service_retention = os.path.join(self.data_dir, 'services.dat')
self.last_alive_file = os.path.join(self.data_dir, 'last_alive')
# Our cluster need a uniq uuid
self.uuid = ''
if os.path.exists(self.server_key_file):
with open(self.server_key_file, 'r') as f:
self.uuid = f.read()
logger.log("KEY: %s loaded from the server file %s" % (self.uuid, self.server_key_file))
else:
self.uuid = hashlib.sha1(uuid.uuid1().get_hex()).hexdigest()
# now save the key
with open(self.server_key_file, 'w') as f:
f.write(self.uuid)
logger.log("KEY: %s saved to the server file %s" % (self.uuid, self.server_key_file))
# Now load nodes to do not start from zero
if os.path.exists(self.nodes_file):
with open(self.nodes_file, 'r') as f:
self.nodes = json.loads(f.read())
else:
self.nodes = {}
# We must protect the nodes with a lock
self.nodes_lock = threading.RLock()
# Load some files, like the old incarnation file
if os.path.exists(self.incarnation_file):
with open(self.incarnation_file, 'r') as f:
self.incarnation = json.loads(f.read())
self.incarnation += 1
else:
self.incarnation = 0
# Load check and service retention as they are filled
self.load_check_retention()
self.load_service_retention()
# Now the kv backend
self.kv = KVBackend(self.data_dir)
self.replication_backlog_lock = threading.RLock()
self.replication_backlog = {}
self.last_retention_write = time.time()
if os.path.exists(self.last_alive_file):
with open(self.last_alive_file, 'r') as f:
self.last_alive = json.loads(f.read())
else:
self.last_alive = int(time.time())
# Try to clean libexec and configuration directories
self.libexec_dir = libexec_dir
if self.libexec_dir:
self.libexec_dir = os.path.abspath(self.libexec_dir)
self.configuration_dir = self.cfg_dir
if self.configuration_dir:
self.configuration_dir = os.path.abspath(self.configuration_dir)
# Our main events dict, should not be too old or we will delete them
self.events_lock = threading.RLock()
self.events = {}
self.max_event_age = 30
# We will receive a list of path to update for libexec, and we will manage them
# in athread so the upd thread is not blocking
self.libexec_to_update = []
self.configuration_to_update = []
self.launch_update_libexec_cfg_thread()
print "LOADED CFG DATA", self.cfg_data
# by defualt do not launch timeserie listeners
self.ts = None
# Now no websocket
self.webso = None
# Compile the macro patern once
self.macro_pat = re.compile(r'(\$ *(.*?) *\$)+')
self.put_key_buffer = []
# Launch a thread that will reap all put key asked by the udp
self.put_key_reaper_thread = threader.create_and_launch(self.put_key_reaper, name='put-key-reaper')
# Execs launch as threads
self.execs = {}
# Challenge send so we can match the response when we will get them
self.challenges = {}
# Load docker thing if possible
dockermgr.launch()
# Our main object for gossip managment
self.gossip = Gossip(self.nodes, self.nodes_lock, self.addr, self.port, self.name, self.incarnation, self.uuid, self.tags, self.seeds, self.bootstrap)
# get the message in a pub-sub way
pubsub.sub('manage-message', self.manage_message_pub)
def load_cfg_dir(self):
if not os.path.exists(self.cfg_dir):
logger.log('ERROR: the configuration directory %s is missing' % self.cfg_dir)
sys.exit(2)
for root, dirs, files in os.walk(self.cfg_dir):
for name in files:
if name.endswith('.json'):
fp = os.path.join(root, name)
self.open_cfg_file(fp)
def open_cfg_file(self, fp):
o = {}
with open(fp, 'r') as f:
buf = f.read()
try:
o = json.loads(buf)
except Exception, exp:
logger.log('ERROR: the configuration file %s malformed: %s' % (fp, exp))
sys.exit(2)
if not isinstance(o, dict):
logger.log('ERROR: the configuration file %s content is not a valid dict' % fp)
sys.exit(2)
logger.debug("Configuration, opening file data", o, fp)
known_types = ['check', 'service', 'handler', 'generator',
'graphite', 'dns', 'statsd', 'websocket']
if 'check' in o:
check = o['check']
if not isinstance(check, dict):
logger.log('ERROR: the check from the file %s is not a valid dict' % fp)
sys.exit(2)
print fp
fname = fp[len(self.cfg_dir)+1:]
print "FNAME", fname
mod_time = int(os.path.getmtime(fp))
cname = os.path.splitext(fname)[0]
self.import_check(check, 'file:%s' % fname, cname, mod_time=mod_time)
if 'service' in o:
service = o['service']
if not isinstance(service, dict):
logger.log('ERROR: the service from the file %s is not a valid dict' % fp)
sys.exit(2)
mod_time = int(os.path.getmtime(fp))
fname = fp[len(self.cfg_dir)+1:]
sname = os.path.splitext(fname)[0]
self.import_service(service, 'file:%s' % fname, sname, mod_time=mod_time)
# HEHEHEHE
if 'handler' in o:
handler = o['handler']
if not isinstance(handler, dict):
logger.log('ERROR: the handler from the file %s is not a valid dict' % fp)
sys.exit(2)
mod_time = int(os.path.getmtime(fp))
fname = fp[len(self.cfg_dir)+1:]
hname = os.path.splitext(fname)[0]
self.import_handler(handler, 'file:%s' % hname, hname, mod_time=mod_time)
if 'generator' in o:
generator = o['generator']
if not isinstance(generator, dict):
logger.log('ERROR: the generator from the file %s is not a valid dict' % fp)
sys.exit(2)
mod_time = int(os.path.getmtime(fp))
fname = fp[len(self.cfg_dir)+1:]
gname = os.path.splitext(fname)[0]
self.import_generator(generator, 'file:%s' % fname, gname, mod_time=mod_time)
if 'graphite' in o:
graphite = o['graphite']
if not isinstance(graphite, dict):
logger.log('ERROR: the graphite from the file %s is not a valid dict' % fp)
sys.exit(2)
self.graphite = graphite
if 'dns' in o:
dns = o['dns']
if not isinstance(dns, dict):
logger.log('ERROR: the dns from the file %s is not a valid dict' % fp)
sys.exit(2)
self.dns = dns
if 'statsd' in o:
statsd = o['statsd']
if not isinstance(statsd, dict):
logger.log('ERROR: the statsd from the file %s is not a valid dict' % fp)
sys.exit(2)
self.statsd = statsd
if 'websocket' in o:
websocket = o['websocket']
if not isinstance(websocket, dict):
logger.log('ERROR: the websocket from the file %s is not a valid dict' % fp)
sys.exit(2)
self.websocket = websocket
# reindent this
if True:
# grok all others data so we can use them in our checks
parameters = self.__class__.parameters
for (k,v) in o.iteritems():
# chceck, service, ... are already managed
if k in known_types:
continue
# if k is not a internal parameters, use it in the cfg_data part
if not k in parameters:
print "SETTING RAW VALUE", k, v
self.cfg_data[k] = v
else: # cannot be check and service here
e = parameters[k]
_type = e['type']
mapto = e['mapto']
if _type == 'int':
try:
int(v)
except ValueError:
logger.error('The parameter %s is not an int' % k)
return
elif _type in ['path', 'string']:
if not isinstance(v, basestring):
logger.error('The parameter %s is not a string' % k)
return
elif _type == 'bool':
if not isinstance(v, bool):
logger.error('The parameter %s is not a bool' % k)
return
elif _type == 'list':
if not isinstance(v, list):
logger.error('The parameter %s is not a list' % k)
return
else:
logger.error('Unkown parameter type %s' % k)
return
# It's valid, I set it :)
print "VALID PARAM", mapto, v
setattr(self, mapto, v)
def load_check_retention(self):
if not os.path.exists(self.check_retention):
return
logger.log('CHECK loading check retention file %s' % self.check_retention)
with open(self.check_retention, 'r') as f:
loaded = json.loads(f.read())
for (cid, c) in loaded.iteritems():
if cid in self.checks:
check = self.checks[cid]
to_load = ['last_check', 'output', 'state', 'state_id']
for prop in to_load:
check[prop] = c[prop]
logger.log('CHECK loaded %s' % self.checks)
def load_service_retention(self):
if not os.path.exists(self.service_retention):
return
logger.log('CHECK loading service retention file %s' % self.service_retention)
with open(self.service_retention, 'r') as f:
loaded = json.loads(f.read())
for (cid, c) in loaded.iteritems():
if cid in self.services:
service = self.services[cid]
to_load = ['state_id', 'incarnation']
for prop in to_load:
service[prop] = c[prop]
logger.log('CHECK loaded %s' % self.services)
# Load and sanatize a check object in our configuration
def import_check(self, check, fr, name, mod_time=0, service=''):
check['from'] = fr
check['id'] = check['name'] = name
if not 'interval' in check:
check['interval'] = '10s'
if not 'script' in check:
check['script'] = ''
if not 'last_check' in check:
check['last_check'] = 0
if not 'notes' in check:
check['notes'] = ''
if service:
check['service'] = service
if not 'apply_on' in check:
# we take the basename of this check directory forthe apply_on
# and if /, take * (aka means all)
apply_on = os.path.basename(os.path.dirname(name))
if not apply_on:
apply_on = '*'
check['apply_on'] = apply_on
print "APPLY ON", apply_on
check['modification_time'] = mod_time
check['state'] = 'pending'
check['state_id'] = 3
check['output'] = ''
if not 'handlers' in check:
check['handlers'] = ['default']
self.checks[check['id']] = check
# We have a new check from the HTTP, save it where it need to be
def delete_check(self, cname):
p = os.path.normpath(os.path.join(self.cfg_dir, cname+'.json'))
if not p.startswith(self.cfg_dir):
raise Exception("Bad file path for your script, won't be in the cfg directory tree")
# clean on disk
if os.path.exists(p):
os.unlink(p)
# Now clean in memory too
if cname in self.checks:
del self.checks[cname]
self.link_checks()
# We have a new check from the HTTP, save it where it need to be
def save_check(self, cname, check):
p = os.path.normpath(os.path.join(self.cfg_dir, cname+'.json'))
if not p.startswith(self.cfg_dir):
raise Exception("Bad file path for your script, won't be in the cfg directory tree")
# Look if the file directory exists or if not cannot be created
p_dir = os.path.dirname(p)
if not os.path.exists(p_dir):
os.makedirs(p_dir)
# import a copy, so we dont mess with the fieldsweneed to save
to_import = copy.copy(check)
# Now importit in our running part
self.import_check(to_import, 'from:http', cname)
# and put the new one in the active running checks, maybe
self.link_checks()
# Now we can save the received entry, but first clean unless props
to_remove = ['from', 'last_check', 'modification_time', 'state', 'output', 'state_id', 'id']
for prop in to_remove:
try:
del check[prop]
except KeyError:
pass
o = {'check':check}
logger.debug('HTTP check saving the object %s into the file %s' % (o, p), part='http')
buf = json.dumps(o , sort_keys=True, indent=4)
tempdir = tempfile.mkdtemp()
f = open(os.path.join(tempdir, 'temp.json'), 'w')
f.write(buf)
f.close()
shutil.move(os.path.join(tempdir, 'temp.json'), p)
shutil.rmtree(tempdir)
def import_service(self, service, fr, sname, mod_time=0):
service['from'] = fr
service['name'] = service['id'] = sname
if not 'notes' in service:
service['notes'] = ''
if not 'apply_on' in service:
# we take the basename of this check directory forthe apply_on
# and if /, take the service name
apply_on = os.path.basename(os.path.dirname(sname))
if not apply_on:
apply_on = service['name']
service['apply_on'] = service['name']
print "APPLY SERVICE ON", apply_on
apply_on = service['apply_on']
if 'check' in service:
check = service['check']
cname = 'service:%s' % sname
# for the same apply_on of the check as ourself
check['apply_on'] = apply_on
self.import_check(check, fr, cname, mod_time=mod_time, service=service['id'])
# Put the default state to unknown, retention will load
# the old data
service['state_id'] = 3
service['modification_time'] = mod_time
service['incarnation'] = 0
# Add it into the services list
self.services[service['id']] = service
def import_handler(self, handler, fr, hname, mod_time=0):
handler['from'] = fr
handler['name'] = handler['id'] = hname
if not 'notes' in handler:
handler['notes'] = ''
handler['modification_time'] = mod_time
if not 'severities' in handler:
handler['severities'] = ['ok', 'warning', 'critical', 'unknown']
# look at types now
if not 'type' in handler:
handler['type'] = 'none'
_type = handler['type']
if _type == 'mail':
if not 'email' in handler:
handler['email'] = 'root@localhost'
# Add it into the list
self.handlers[handler['id']] = handler
# Generators will create files based on templates from
# data and nodes after a change on a node
def import_generator(self, generator, fr, gname, mod_time=0):
generator['from'] = fr
generator['name'] = generator['id'] = gname
if not 'notes' in generator:
generator['notes'] = ''
if not 'apply_on' in generator:
# we take the basename of this check directory for the apply_on
# and if /, take the generator name
apply_on = os.path.basename(os.path.dirname(gname))
if not apply_on:
apply_on = generator['name']
generator['apply_on'] = generator['name']
for prop in ['path', 'template']:
if not prop in generator:
logger.warning('Bad generator, missing property %s in the generator %s' % (prop, gname))
return
# Template must be from configuration path
generator['template'] = os.path.normpath(os.path.join(self.cfg_dir, 'templates', generator['template']))
if not generator['template'].startswith(self.cfg_dir):
logger.error("Bad file path for your template property of your %s generator, is not in the cfg directory tree" % gname)
return
# and path must be a abs path
generator['path'] = os.path.abspath(generator['path'])
# We will try not to hummer the generator
generator['modification_time'] = mod_time
# Add it into the generators list
self.generators[generator['id']] = generator
# We have a new service from the HTTP, save it where it need to be
def save_service(self, sname, service):
p = os.path.normpath(os.path.join(self.cfg_dir, sname+'.json'))
if not p.startswith(self.cfg_dir):
raise Exception("Bad file path for your script, won't be in the cfg directory tree")
# Look if the file directory exists or if not cannot be created
p_dir = os.path.dirname(p)
if not os.path.exists(p_dir):
os.makedirs(p_dir)
# import a copy, so we dont mess with the fieldsweneed to save
to_import = copy.copy(service)
# Now importit in our running part
self.import_service(to_import, 'from:http', sname)
# and put the new one in the active running checks, maybe
self.link_services()
# We maybe got a new service, so export this data to every one in the gossip way :)
node = self.nodes[self.uuid]
self.gossip.incarnation += 1
node['incarnation'] = self.gossip.incarnation
self.gossip.stack_alive_broadcast(node)
# Now we can save the received entry, but first clean unless props
to_remove = ['from', 'last_check', 'modification_time', 'state', 'output', 'state_id', 'id']
for prop in to_remove:
try:
del service[prop]
except KeyError:
pass
o = {'service':service}
logger.debug('HTTP service saving the object %s into the file %s' % (o, p), part='http')
buf = json.dumps(o , sort_keys=True, indent=4)
tempdir = tempfile.mkdtemp()
f = open(os.path.join(tempdir, 'temp.json'), 'w')
f.write(buf)
f.close()
shutil.move(os.path.join(tempdir, 'temp.json'), p)
shutil.rmtree(tempdir)
# We have a new check from the HTTP, save it where it need to be
def delete_service(self, sname):
p = os.path.normpath(os.path.join(self.cfg_dir, sname+'.json'))
if not p.startswith(self.cfg_dir):
raise Exception("Bad file path for your script, won't be in the cfg directory tree")
# clean on disk
if os.path.exists(p):
os.unlink(p)
# Now clean in memory too
if sname in self.services:
del self.services[sname]
self.link_services()
# We maybe got a less service, so export this data to every one in the gossip way :)
node = self.nodes[self.uuid]
self.gossip.incarnation += 1
node['incarnation'] = self.gossip.incarnation
self.gossip.stack_alive_broadcast(node)
# Look at our services dict and link the one we are apply_on
# so the other nodes are aware about our tags/service
def link_services(self):
logger.debug('LINK my services and my node entry')
node = self.nodes[self.uuid]
tags = node['tags']
for (sname, service) in self.services.iteritems():
logger.debug('LINK %s on ==> %s' % (service, tags))
apply_on = service.get('apply_on', '')
logger.debug('LINK apply on %s' % apply_on)
if apply_on and apply_on in tags:
logger.debug('LINK activate service %s' % sname)
node['services'][sname] = service
# For checks we will only populate our active_checks list
# with the name of the checks we are apply_on about
def link_checks(self):
logger.debug('LOOKING FOR our checks that match our tags')
node = self.nodes[self.uuid]
tags = node['tags']
active_checks = []
for (cname, check) in self.checks.iteritems():
apply_on = check.get('apply_on', '*')
logger.debug('LINK check apply on %s' % apply_on)
if apply_on == '*' or apply_on in tags:
logger.debug('LINK activate checke %s' % cname)
active_checks.append(cname)
self.active_checks = active_checks
# Also update our checks list in KV space
self.update_checks_kv()
# What to do when we receive a signal from the system
def manage_signal(self, sig, frame):
logger.log("I'm process %d and I received signal %s" % (os.getpid(), str(sig)))
if sig == signal.SIGUSR1: # if USR1, ask a memory dump
logger.log('MANAGE USR1')
elif sig == signal.SIGUSR2: # if USR2, ask objects dump
logger.log('MANAGE USR2')
else: # Ok, really ask us to die :)
self.set_interrupted()
# Callback for objects that want us to stop in a clean way
def set_interrupted(self):
self.interrupted = True
# and the global object too
stopper.interrupted = True
def set_exit_handler(self):
# First register the self.interrupted in the pubsub call
# interrupt
pubsub.sub('interrupt', self.set_interrupted)
func = self.manage_signal
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(func, True)
except ImportError:
version = ".".join(map(str, sys.version_info[:2]))
raise Exception("pywin32 not installed for Python " + version)
else:
for sig in (signal.SIGTERM, signal.SIGINT, signal.SIGUSR1, signal.SIGUSR2):
signal.signal(sig, func)
def log(self, *args):
logger.log(args)
def launch_check_thread(self):
self.check_thread = threader.create_and_launch(self.do_check_thread, name='check-thread')
def launch_generator_thread(self):
self.generator_thread = threader.create_and_launch(self.do_generator_thread, name='generator-thread')
def launch_replication_backlog_thread(self):
self.replication_backlog_thread = threader.create_and_launch(self.do_replication_backlog_thread, name='replication-backlog-thread')
def launch_replication_first_sync_thread(self):
self.replication_first_sync_thread = threader.create_and_launch(self.do_replication_first_sync_thread, name='replication-first-sync-thread')
def launch_listeners(self):
self.udp_thread = threader.create_and_launch(self.launch_udp_listener, name='udp-thread', essential=True)
self.tcp_thread = threader.create_and_launch(self.launch_tcp_listener, name='tcp-thread', essential=True)
self.webso_thread = threader.create_and_launch(self.launch_websocket_listener, name='websocket-thread', essential=True)
self.dns_thread = threader.create_and_launch(self.launch_dns_listener, name='dns-thread', essential=True)
def launch_udp_listener(self):
self.udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
print "OPENING UDP", self.addr
self.udp_sock.bind((self.listening_addr, self.port))
logger.log("UDP port open", self.port, part='udp')
while not self.interrupted:
try:
data, addr = self.udp_sock.recvfrom(65535) # buffer size is 1024 bytes
except socket.timeout, exp:
continue # nothing in few seconds? just loop again :)
# No data? bail out :)
if len(data) == 0:
continue
# Look if we use encryption
data = encrypter.decrypt(data)
# Maybe the decryption failed?
if data == '':
continue
logger.debug("UDP: received message:", data, addr, part='udp')
# Ok now we should have a json to parse :)
try:
raw = json.loads(data)
except ValueError:# garbage
continue
messages = []
if isinstance(raw, list):
messages = raw
else:
messages = [raw]
for m in messages:
t = m['type']
if t == 'ping':
ack = {'type':'ack', 'seqno':m['seqno']}
ret_msg = json.dumps(ack)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
enc_ret_msg = encrypter.encrypt(ret_msg)
sock.sendto(enc_ret_msg, addr)
sock.close()
logger.debug("PING RETURN ACK MESSAGE", ret_msg, part='gossip')
# now maybe the source was a suspect that just ping me? if so
# ask for a future ping
fr_uuid = m['from']
node = self.nodes.get(fr_uuid, None)
if node and node['state'] != 'alive':
logger.debug('PINGBACK +ing node', node['name'], part='gossip')
self.gossip.to_ping_back.append(fr_uuid)
elif t == 'ping-relay':
tgt = m.get('tgt')
_from = m.get('from', '')
if not tgt or not _from:
continue
# We are ask to do a indirect ping to tgt and return the ack to
# _from, do this in a thread so we don't lock here
def do_indirect_ping(self, tgt, _from, addr):
logger.debug('do_indirect_ping', tgt, _from, part='gossip')
ntgt = self.nodes.get(tgt, None)
nfrom = self.nodes.get(_from, None)
# If the dest or the from node are now unknown, exit this thread
if not ntgt or not nfrom:
return
# Now do the real ping
ping_payload = {'type':'ping', 'seqno':0, 'node': ntgt['name'], 'from': self.uuid}
message = json.dumps(ping_payload)
tgtaddr = ntgt['addr']
tgtport = ntgt['port']
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
enc_message = encrypter.encrypt(message)
sock.sendto(enc_message, (tgtaddr, tgtport) )
logger.debug('PING waiting %s ack message from a ping-relay' % ntgt['name'], part='gossip')
# Allow 3s to get an answer
sock.settimeout(3)
ret = sock.recv(65535)
logger.debug('PING (relay) got a return from %s' % ntgt['name'], ret, part='gossip')
# An aswer? great it is alive! Let it know our _from node
ack = {'type':'ack', 'seqno':0}
ret_msg = json.dumps(ack)
enc_ret_msg = encrypter.encrypt(ret_msg)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
sock.sendto(enc_ret_msg, addr)
sock.close()
except (socket.timeout, socket.gaierror), exp:
# cannot reach even us? so it's really dead, let the timeout do its job on _from
pass
# Do the indirect ping as a sub-thread
threader.create_and_launch(do_indirect_ping, name='indirect-ping-%s-%s' % (tgt, _from), args=(self, tgt, _from, addr))
elif t == '/kv/put':
k = m['k']
v = m['v']
fw = m.get('fw', False)
# For perf data we allow the udp send
self.put_key(k,v, allow_udp=True, fw=fw)
elif t == '/ts/new':
key = m.get('key', '')
# Skip this message for classic nodes
if self.ts is None or key == '':
continue
# if TS do not have it, it will propagate it
self.ts.set_name_if_unset(key)
# Someone is asking us a challenge, ok do it
elif t == '/exec/challenge/ask':
# If we don't have the public key, bailing out now
if self.mfkey_pub is None:
logger.debug('EXEC skipping exec call becaue we do not have a public key', part='exec')
continue
cid = uuid.uuid1().get_hex() # challgenge id
challenge = uuid.uuid1().get_hex()
e = {'ctime':int(time.time()), 'challenge':challenge}
self.challenges[cid] = e
# return a tuple with only the first element useful (str)
## TOCLEAN:: _c = self.mfkey_pub.encrypt(challenge, 0)[0] # encrypt 0=dummy param not used
_c = RSA.encrypt(challenge, self.mfkey_pub) # encrypt 0=dummy param not used
echallenge = base64.b64encode(_c)
ping_payload = {'type':'/exec/challenge/proposal', 'fr': self.uuid, 'challenge':echallenge, 'cid':cid}
message = json.dumps(ping_payload)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
enc_message = encrypter.encrypt(message)
logger.debug('EXEC asking us a challenge, return %s(%s) to %s' % (challenge, echallenge, addr), part='exec')
sock.sendto(enc_message, addr)
sock.close()
elif t == '/exec/challenge/return':
# Don't even look at it if we do not have a public key....
if self.mfkey_pub is None:
continue
cid = m.get('cid', '')
response64 = m.get('response', '')
cmd = m.get('cmd', '')
_from = m.get('fr', '')
# skip invalid packets
if not cid or not response64 or not cmd:
continue
# Maybe we got a bad or old challenge response...
p = self.challenges.get(cid, None)
if not p:
continue
try:
response = base64.b64decode(response64)
except ValueError:
logger.debug('EXEC invalid base64 response from %s' % addr, part='exec')
continue
logger.debug('EXEC got a challenge return from %s for %s:%s' % (_from, cid, response), part='exec')
# now try to decrypt the response of the other
# This function take a tuple of size=2, but only look at the first...
if response == p['challenge']:
logger.debug('EXEC GOT GOOD FROM A CHALLENGE, DECRYPTED DATA', cid, response, p['challenge'], response==p['challenge'], part='exec')
threader.create_and_launch(self.do_launch_exec, name='do-launch-exec-%s' % cid, args=(cid, cmd, addr))
else:
self.manage_message(m)
def launch_dns_listener(self):
if self.dns is None:
logger.log('No dns object defined in the configuration, skipping it')
return
enabled = self.dns.get('enabled', False)
if not enabled:
logger.log('Dns server is disabled, skipping it')
return
port = self.dns.get('port', 53)
domain = self.dns.get('domain', '.kunai')
# assume that domain is like .foo.
if not domain.endswith('.'):
domain += '.'
if not domain.startswith('.'):
domain = '.'+domain
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
logger.info('DNS launched server port %d' % port, part='dns')
sock.bind(('', port))
while not self.interrupted:
try:
data, addr = sock.recvfrom(1024)
except socket.timeout:
continue # loop until we got some data :)
try:
p = DNSQuery(data)
r = p.lookup_for_nodes(self.nodes, domain)
logger.debug("DNS lookup nodes response:", r, part='dns')
sock.sendto(p.response(r), addr)
except Exception, exp:
logger.log("DNS problem", exp, part='dns')
def launch_websocket_listener(self):
if self.websocket is None:
logger.log('No websocket object defined in the configuration, skipping it')
return
enabled = self.websocket.get('enabled', False)
if not enabled:
logger.log('Websocket is disabled, skipping it')
return
self.webso = WebSocketBackend(self)
# also load it in the websockermanager so other part
# can easily forward messages
websocketmgr.set(self.webso)
self.webso.run()
# TODO: SPLIT into modules :)
def launch_tcp_listener(self):
@route('/agent/state/:nname')
@route('/agent/state')
def get_state(nname=''):
response.content_type = 'application/json'
r = {'checks':{}, 'services':{}}
# by default it's us
# maybe its us, maybe not
if nname == '':
for (cid, check) in self.checks.iteritems():
# maybe this chck is not a activated one for us, if so, bail out
if not cid in self.active_checks:
continue
r['checks'][cid] = check
r['services'] = self.nodes[self.uuid]['services']
return r
else: # find the elements
node = None
with self.nodes_lock:
for n in self.nodes.values():
if n['name'] == nname:
node = n
if node is None:
return abort(404, 'This node is not found')
# Services are easy, we already got them
r['services'] = node['services']
# checks are harder, we must find them in the kv nodes
v = self.get_key('__health/%s' % nname)
if v is None:
logger.debug('Cannot access to the checks list for', nname, part='http')
return r
lst = json.loads(v)
for cid in lst:
v = self.get_key('__health/%s/%s' % (nname, cid))
if v is None: # missing check entry? not a real problem
continue
check = json.loads(v)
r['checks'][cid] = check
return r
@route('/agent/info')
def get_info():
response.content_type = 'application/json'
r = {'logs':logger.get_errors(), 'pid':os.getpid(), 'name':self.name,
'port':self.port, 'addr':self.addr, 'socket':self.socket_path,
'uuid':self.uuid, 'graphite':self.graphite,
'statsd':self.statsd, 'websocket':self.websocket,
'dns':self.dns, 'threads':threader.get_info(),
'version': VERSION,
'docker': dockermgr.get_info(),
}
if self.webso:
r['websocket_info'] = self.webso.get_info()
else:
r['websocket_info'] = None
r['httpservers'] = {}
# Look at both httpservers
for (k, server) in gserver.iteritems():
if server is None:
r['httpservers'][k] = None
continue
# if available get stats
s = server.stats
nb_threads = s['Threads'](s)
idle_threads = s['Threads Idle'](s)
q = s['Queue'](s)
r['httpservers'][k] = {'nb_threads':nb_threads, 'idle_threads':idle_threads, 'queue':q}
return r
@route('/push-pull')
def interface_push_pull():
response.content_type = 'application/json'
logger.debug("PUSH-PULL called by HTTP", part='gossip')
data = request.GET.get('msg')
msg = json.loads(data)
self.manage_message(msg)
nodes = {}
with self.nodes_lock:
nodes = copy.copy(self.nodes)
m = {'type': 'push-pull-msg', 'nodes': nodes}
logger.debug("PUSH-PULL returning my own nodes", part='gossip')
return json.dumps(m)
# We want a state of all our services, with the members
@route('/state/services')
def state_services():
response.content_type = 'application/json'
logger.debug("/state/services is called", part='http')
# We don't want to modify our services objects
services = copy.deepcopy(self.services)
for service in services.values():
service['members'] = []
service['passing-members'] = []
service['passing'] = 0
service['failing-members'] = []
service['failing'] = 0
with self.nodes_lock:
for (uuid, node) in self.nodes.iteritems():
for (sname, service) in node['services'].iteritems():
if sname not in services:
continue
services[sname]['members'].append(node['name'])
if service['state_id'] == 0:
services[sname]['passing'] += 1
services[sname]['passing-members'].append(node['name'])
else:
services[sname]['failing'] += 1
services[sname]['failing-members'].append(node['name'])
return services
# We want a state of all our services, with the members
@route('/state/services/:sname')
def state_service(sname):
response.content_type = 'application/json'
logger.debug("/state/services/%s is called" % sname, part='http')
# We don't want to modify our services objects
services = copy.deepcopy(self.services)
service = services.get(sname, {})
if not service:
return {}
service['members'] = []
service['passing-members'] = []
service['passing'] = 0
service['failing-members'] = []
service['failing'] = 0
sname = service.get('name')
with self.nodes_lock:
for (uuid, node) in self.nodes.iteritems():
if sname not in node['services']:
continue
service['members'].append(node['name'])
if service['state_id'] == 0:
service['passing'] += 1
service['passing-members'].append(node['name'])
else:
service['failing'] += 1
service['failing-members'].append(node['name'])
return service
@route('/agent/checks')
def agent_checks():
response.content_type = 'application/json'
logger.debug("/agent/checks is called", part='http')
return self.checks
@route('/agent/checks/:cname#.+#')
def agent_check(cname):
response.content_type = 'application/json'
logger.debug("/agent/checks is called for %s" % cname, part='http')
if not cname in self.checks:
return abort(404, 'check not found')
return self.checks[cname]
@route('/agent/checks/:cname#.+#', method='DELETE')
def agent_DELETE_check(cname):
logger.debug("/agent/checks DELETE is called for %s" % cname, part='http')
if not cname in self.checks:
return
self.delete_check(cname)
return
@route('/agent/checks/:cname#.+#', method='PUT')
def interface_PUT_agent_check(cname):
value = request.body.getvalue()
logger.debug("HTTP: PUT a new/change check %s (value:%s)" % (cname, value), part='http')
try:
check = json.loads(value)
except ValueError: # bad json
return abort(400, 'Bad json entry')
logger.debug("HTTP: PUT a new/change check %s (value:%s)" % (cname, check), part='http')
self.save_check(cname, check)
return
@route('/agent/services')
def agent_services():
response.content_type = 'application/json'
logger.debug("/agent/services is called", part='http')
return self.services
@route('/agent/services/:sname#.+#')
def agent_service(sname):
response.content_type = 'application/json'
logger.debug("/agent/service is called for %s" % sname, part='http')
if not sname in self.services:
return abort(404, 'service not found')
return self.services[sname]
@route('/agent/services/:sname#.+#', method='PUT')
def interface_PUT_agent_service(sname):
value = request.body.getvalue()
logger.debug("HTTP: PUT a new/change service %s (value:%s)" % (sname, value), part='http')
try:
service = json.loads(value)
except ValueError: # bad json
return abort(400, 'Bad json entry')
logger.debug("HTTP: PUT a new/change check %s (value:%s)" % (sname, service), part='http')
self.save_service(sname, service)
return
@route('/agent/services/:sname#.+#', method='DELETE')
def agent_DELETE_service(sname):
logger.debug("/agent/service DELETE is called for %s" % sname, part='http')
if not sname in self.services:
return
self.delete_service(sname)
return
@route('/agent/generators')
def agent_generators():
response.content_type = 'application/json'
logger.debug("/agent/generators is called", part='http')
return self.generators
@route('/agent/generators/:gname#.+#')
def agent_generator(gname):
response.content_type = 'application/json'
logger.debug("/agent/generator is called for %s" % gname, part='http')
if not gname in self.generators:
return abort(404, 'generator not found')
return self.generators[gname]
@route('/kv/:ukey#.+#', method='GET')
def interface_GET_key(ukey):
t0 = time.time()
logger.debug("GET KEY %s" % ukey, part='kv')
v = self.get_key(ukey)
if v is None:
logger.debug("GET KEY %s return a 404" % ukey, part='kv')
abort(404, '')
logger.debug("GET: get time %s" % (time.time() -t0), part='kv')
return v
@route('/kv/:ukey#.+#', method='PUT')
def interface_PUT_key(ukey):
value = request.body.getvalue()
logger.debug("KV: PUT KEY %s (len:%d)" % (ukey, len(value)), part='kv')
force = request.GET.get('force', 'False') == 'True'
meta = request.GET.get('meta', None)
if meta:
meta = json.loads(meta)
ttl = int(request.GET.get('ttl', '0'))
self.put_key(ukey, value, force=force, meta=meta, ttl=ttl)
return
@route('/kv/:ukey#.+#', method='DELETE')
def interface_DELETE_key(ukey):
logger.debug("KV: DELETE KEY %s" % ukey, part='kv')
self.delete_key(ukey)
@route('/kv/')
def list_keys():
response.content_type = 'application/json'
l = list(self.kv.db.RangeIter(include_value = False))
return json.dumps(l)
@route('/kv-meta/changed/:t', method='GET')
def changed_since(t):
response.content_type = 'application/json'
t = int(t)
return json.dumps(self.kv.changed_since(t))
@route('/agent/propagate/libexec', method='GET')
def propage_libexec():
logger.debug("Call to propagate-configuraion", part='http')
if not os.path.exists(self.libexec_dir):
abort(400, 'Libexec directory is not existing')
all_files = [os.path.join(dp, f) for dp, dn, filenames in os.walk(os.path.abspath(self.libexec_dir)) for f in filenames]
for fname in all_files:
path = fname[len(os.path.abspath(self.libexec_dir))+1:]
# first try to open the path and get a hash of the local file
f = open(fname, 'rb')
_hash = hashlib.sha1(f.read()).hexdigest()
f.close()
logger.debug("propagate saving FILE %s into the KV space" % fname, part='http')
f = tempfile.TemporaryFile()
with tarfile.open(fileobj=f, mode="w:gz") as tar:
tar.add(fname, arcname=path)
f.seek(0)
zbuf = f.read()
f.close()
buf64 = base64.b64encode(zbuf)
logger.debug("propagate READ A %d file %s and compressed into a %d one..." % (len(zbuf), path, len(buf64)), part='http')
key = '__libexec/%s' % path
self.put_key(key, buf64)
payload = {'type':'libexec', 'path':path, 'hash':_hash}
self.stack_event_broadcast(payload)
@route('/agent/propagate/configuration', method='GET')
def propage_configuration():
logger.debug("propagate conf call TO PROPAGATE CONFIGURATION", part='http')
if not os.path.exists(self.configuration_dir):
abort(400, 'Configuration directory is not existing')
all_files = [os.path.join(dp, f) for dp, dn, filenames in os.walk(os.path.abspath(self.configuration_dir)) for f in filenames]
# we keep a list of (path, sha1) combo for the
ok_files = []
for fname in all_files:
path = fname[len(os.path.abspath(self.configuration_dir))+1:]
# Do not send our local.json, it's local, not global!
if path == 'local.json':
continue
# first try to open the path and get a hash of the local file
f = open(fname, 'rb')
_hash = hashlib.sha1(f.read()).hexdigest()
f.close()
# save this entry
ok_files.append( (path, _hash) )
logger.debug("propagate conf SAVING FILE %s into the KV space" % fname, part='http')
# get a tar for this file, and base64 it
f = tempfile.TemporaryFile()
with tarfile.open(fileobj=f, mode="w:gz") as tar:
tar.add(fname, arcname=path)
f.seek(0)
zbuf = f.read()
f.close()
buf64 = base64.b64encode(zbuf)
print "READ A %d file %s and compressed into a %d one..." % (len(zbuf), path, len(buf64))
key = '__configuration/%s' % path
print "READ PUT KEY", key
self.put_key(key, buf64)
payload = {'type':'configuration', 'path':path, 'hash':_hash}
self.stack_event_broadcast(payload)
ok_files = [fname[len(os.path.abspath(self.configuration_dir))+1:] for fname in all_files]
logger.debug("propagate configuration All files", ok_files, part='http')
j = json.dumps(ok_files)
zj = zlib.compress(j, 9)
zj64 = base64.b64encode(zj)
self.put_key('__configuration', zj64)
payload = {'type':'configuration-cleanup'}
self.stack_event_broadcast(payload)
@route('/configuration/update', method='PUT')
def protected():
value = request.body.getvalue()
logger.debug("HTTP: configuration update put %s" % (value), part='http')
try:
update = json.loads(value)
except ValueError: # bad json...
return abort(400, 'Bad json data')
local_file = os.path.join(self.configuration_dir, 'local.json')
j = {}
with open(local_file, 'r') as f:
buf = f.read()
j = json.loads(buf)
j.update(update)
# Now save it
with open(local_file, 'w') as f:
f.write(json.dumps(j, sort_keys=True, indent=4))
# Load the data we can
self.open_cfg_file(local_file)
logger.debug('HTTP configuration update, now got %s' % j, part='http')
return
@route('/configuration', method='GET')
def get_configuration():
response.content_type = 'application/json'
logger.debug("HTTP: configuration get ", part='http')
local_file = os.path.join(self.configuration_dir, 'local.json')
j = {}
with open(local_file, 'r') as f:
buf = f.read()
j = json.loads(buf)
return j
@route('/list/')
@route('/list/:key')
def get_ts_keys(key=''):
response.content_type = 'application/json'
if self.ts is None:
return json.dumps([])
return json.dumps(self.ts.list_keys(key))
# TODO: only in the local socket http webserver
@route('/stop')
def do_stop():
pubsub.pub('interrupt')
return 'OK'
@route('/exec/:tag')
def launch_exec(tag='*'):
response.content_type = 'application/json'
if self.mfkey_priv is None:
return abort(400, 'No master private key')
cmd = request.GET.get('cmd', 'uname -a')
uid = self.launch_exec(cmd, tag)
return uid
@route('/exec-get/:cid')
def launch_exec(cid):
response.content_type = 'application/json'
res = self.execs.get(cid, None)
if res is None:
return abort(400, 'BAD cid')
return json.dumps(res)
self.external_http_thread = threader.create_and_launch(httpdaemon.run, name='external-http-thread', args=(self.listening_addr, self.port, ''), essential=True)
self.unixsocket_http_thread = threader.create_and_launch(httpdaemon.run, name='external-http-thread', args=('', 0, self.socket_path,), essential=True)
# Launch an exec thread and save its uuid so we can keep a look at it then
def launch_exec(self, cmd, tag):
uid = uuid.uuid1().get_hex()
e = {'cmd':cmd, 'tag':tag, 'thread':None, 'res':{}, 'nodes':[], 'ctime':int(time.time())}
self.execs[uid] = e
t = threader.create_and_launch(self.do_exec_thread, name='exec-%s' % uid, args=(uid,))
return uid
# Look at all nodes, ask them a challenge to manage with our priv key (they all got
# our pub key)
def do_exec_thread(self, uid):
# first look at which command we need to run
e = self.execs[uid]
tag = e['tag']
cmd = e['cmd']
logger.debug('EXEC ask for launching command', cmd, part='exec')
all_uuids = []
with self.nodes_lock: # get the nodes that follow the tag (or all in *)
for (uuid, n) in self.nodes.iteritems():
if tag == '*' or tag in n['tags']:
all_uuids.append(uuid)
e['nodes'] = all_uuids
asks = {}
e['res'] = asks
for nuid in all_uuids:
node = self.nodes.get(nuid, None)
if node is None: # was removed, don't play lotery today...
continue
# Get a socekt to talk with this node
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
d = {'node':node, 'challenge':'', 'state':'pending', 'rc':3, 'output':'', 'err':''}
asks[nuid] = d
logger.debug('EXEC asking for node %s' % node['name'], part='exec')
payload = {'type':'/exec/challenge/ask', 'fr': self.uuid}
packet = json.dumps(payload)
enc_packet = encrypter.encrypt(packet)
logger.debug('EXEC: sending a challenge request to %s' % node['name'], part='exec')
sock.sendto(enc_packet, (node['addr'], node['port']))
# Now wait for a return
sock.settimeout(3)
try:
raw = sock.recv(1024)
except socket.timeout, exp:
logger.error('EXEC challenge ask timeout from node %s : %s' % (node['name'], exp), part='exec')
sock.close()
d['state'] = 'error'
continue
msg = encrypter.decrypt(raw)
if msg is None:
logger.error('EXEC bad return from node %s' % node['name'], part='exec')
sock.close()
d['state'] = 'error'
continue
try:
ret = json.loads(msg)
except ValueError, exp:
logger.error('EXEC bad return from node %s : %s' % (node['name'], exp), part='exec')
sock.close()
d['state'] = 'error'
continue
cid = ret.get('cid', '') # challenge id
challenge64 = ret.get('challenge', '')
if not challenge64 or not cid:
logger.error('EXEC bad return from node %s : no challenge or challenge id' % node['name'], part='exec')
sock.close()
d['state'] = 'error'
continue
try:
challenge = base64.b64decode(challenge64)
except ValueError:
logger.error('EXEC bad return from node %s : invalid base64' % node['name'], part='exec')
sock.close()
d['state'] = 'error'
continue
# Now send back the challenge response # dumy: add real RSA cypher here of course :)
logger.debug('EXEC got a return from challenge ask from %s: %s' % (node['name'], cid), part='gossip')
try:
##TOCLEAN:: response = self.mfkey_priv.decrypt(challenge)
response = RSA.decrypt(challenge, self.mfkey_priv)
except Exception, exp:
logger.error('EXEC bad challenge encoding from %s:%s' % (node['name'], exp))
sock.close()
d['state'] = 'error'
continue
response64 = base64.b64encode(response)
payload = {'type':'/exec/challenge/return', 'fr': self.uuid,
'cid':cid, 'response':response64,
'cmd':cmd}
packet = json.dumps(payload)
enc_packet = encrypter.encrypt(packet)
logger.debug('EXEC: sending a challenge response to %s' % node['name'], part='exec')
sock.sendto(enc_packet, (node['addr'], node['port']))
# Now wait a return from this node exec
sock.settimeout(3)
try:
raw = sock.recv(1024)
except socket.timeout, exp:
logger.error('EXEC done return timeout from node %s : %s' % (node['name'], exp), part='exec')
sock.close()
d['state'] = 'error'
continue
msg = encrypter.decrypt(raw)
if msg is None:
logger.error('EXEC bad return from node %s' % node['name'], part='exec')
sock.close()
d['state'] = 'error'
continue
try:
ret = json.loads(msg)
except ValueError, exp:
logger.error('EXEC bad return from node %s : %s' % (node['name'], exp), part='exec')
sock.close()
d['state'] = 'error'
continue
cid = ret.get('cid', '') # challenge id
if not cid: # bad return?
logger.error('EXEC bad return from node %s : no cid' % node['name'], part='exec')
d['state'] = 'error'
continue
v = self.get_key('__exec/%s' % cid)
if v is None:
logger.error('EXEC void KV entry from return from %s and cid %s' % (node['name'], cid), part='exec')
d['state'] = 'error'
continue
print "EXEC FUCK", v, type(v)
try:
e = json.loads(v)
except ValueError, exp:
logger.error('EXEC bad json entry return from %s and cid %s: %s' % (node['name'], cid, exp), part='exec')
d['state'] = 'error'
continue
logger.debug('EXEC GOT A RETURN! %s %s %s %s' % (node['name'], cid, e['rc'], e['output']), part='exec')
d['state'] = 'done'
d['output'] = e['output']
d['err'] = e['err']
d['rc'] = e['rc']
# Get a key from whateverr me or another node
def get_key(self, ukey):
# we have to compute our internal key mapping. For user key it's: /data/KEY
key = ukey
hkey = hashlib.sha1(key).hexdigest()
nuuid = self.find_kv_node(hkey)
logger.debug('KV: key %s is managed by %s' % (ukey, nuuid), part='kv')
# that's me :)
if nuuid == self.uuid:
logger.debug('KV: (get) My job to find %s' % key, part='kv')
v = self.kv.get(key)
return v
else:
n = self.nodes.get(nuuid, None)
# Maybe the node disapears, if so bailout and say we got no luck
if n is None:
return None
uri = 'http://%s:%s/kv/%s' % (n['addr'], n['port'], ukey)
try:
logger.debug('KV: (get) relaying to %s: %s' % (n['name'], uri), part='kv')
r = rq.get(uri)
if r.status_code == 404:
logger.debug("GET KEY %s return a 404" % ukey, part='kv')
return None
logger.debug('KV: get founded (%d)' % len(r.text), part='kv')
return r.text
except rq.exceptions.RequestException, exp:
logger.debug('KV: error asking to %s: %s' % (n['name'], str(exp)), part='kv')
return None
def put_key(self, ukey, value, force=False, meta=None, allow_udp=False, ttl=0, fw=False):
# we have to compute our internal key mapping. For user key it's: /data/KEY
key = ukey
hkey = hashlib.sha1(key).hexdigest()
nuuid = self.find_kv_node(hkey)
_node = self.nodes.get(nuuid, None)
_name = ''
# The node can disapear with another thread
if _node is not None:
_name = _node['name']
logger.debug('KV: key should be managed by %s(%s) for %s' % (_name, nuuid, ukey), 'kv')
# that's me if it's really for me, or it's a force one, or it's already a forward one
if nuuid == self.uuid or force or fw:
logger.debug('KV: (put) I shoukd managed the key %s (force:%s) (fw:%s)' % (key, force, fw))
self.kv.put(key, value, ttl=ttl)
# We also replicate the meta data from the master node
if meta:
self.kv.put_meta(key, meta)
# If we are in a force mode, so we do not launch a repl, we are not
# the master node
if force:
return None
# remember to save the replication back log entry too
meta = self.kv.get_meta(ukey)
bl = {'value':(ukey, value), 'repl':[], 'hkey':hkey, 'meta':meta}
logger.debug('REPLICATION adding backlog entry %s' % bl, part='kv')
self.replication_backlog[ukey] = bl
return None
else:
n = self.nodes.get(nuuid, None)
if n is None: # oups, someone is playing iwth my nodes and delete it...
return None
# Maybe the user did allow weak consistency, so we can use udp (like metrics)
if allow_udp:
try:
payload = {'type':'/kv/put', 'k':ukey, 'v':value, 'ttl':ttl, 'fw':True}
packet = json.dumps(payload)
enc_packet = encrypter.encrypt(packet)
logger.debug('KV: PUT(udp) asking %s: %s:%s' % (n['name'], n['addr'], n['port']), part='kv')
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(enc_packet, (n['addr'], n['port']))
sock.close()
return None
except Exception, exp:
logger.debug('KV: PUT (udp) error asking to %s: %s' % (n['name'], str(exp)), part='kv')
return None
# ok no allow udp here, so we switch to a classic HTTP mode :)
uri = 'http://%s:%s/kv/%s?ttl=%s' % (n['addr'], n['port'], ukey, ttl)
try:
logger.debug('KV: PUT asking %s: %s' % (n['name'], uri), part='kv')
params = {'ttl': str(ttl)}
r = rq.put(uri, data=value, params=params)
logger.debug('KV: PUT return %s' % r.status_code, part='kv')
return None
except rq.exceptions.RequestException, exp:
logger.debug('KV: PUT error asking to %s: %s' % (n['name'], str(exp)), part='kv')
return None
def delete_key(self, ukey):
# we have to compute our internal key mapping. For user key it's: /data/KEY
key = ukey
hkey = hashlib.sha1(key).hexdigest()
nuuid = self.find_kv_node(hkey)
logger.debug('KV: DELETE node that manage the key %s' % nuuid, part='kv')
# that's me :)
if nuuid == self.uuid:
logger.debug('KV: DELETE My job to manage %s' % key, part='kv')
self.kv.delete(key)
return None
else:
n = self.nodes.get(nuuid, None)
# Maybe someone delete my node, it's not fair :)
if n is None:
return None
uri = 'http://%s:%s/kv/%s' % (n['addr'], n['port'], ukey)
try:
logger.debug('KV: DELETE relaying to %s: %s' % (n['name'], uri), part='kv')
r = rq.delete(uri, data=value)
logger.debug('KV: DELETE return %s' % r.status_code, part='kv')
return None
except rq.exceptions.RequestException, exp:
logger.debug('KV: DELETE error asking to %s: %s' % (n['name'], str(exp)), part='kv')
return None
def stack_put_key(self, k, v, ttl=0):
self.put_key_buffer.append( (k,v, ttl) )
# put from udp should be clean quick from the thread so it can listen to udp again and
# not lost any udp message
def put_key_reaper(self):
while not self.interrupted:
put_key_buffer = self.put_key_buffer
self.put_key_buffer = []
_t = time.time()
if len(put_key_buffer) != 0:
logger.info("PUT KEY BUFFER LEN", len(put_key_buffer))
for (k,v, ttl) in put_key_buffer:
self.put_key(k, v, ttl=ttl, allow_udp=True)
if len(put_key_buffer) != 0:
logger.info("PUT KEY BUFFER IN", time.time() - _t)
# only sleep if we didn't work at all (busy moment)
if len(put_key_buffer) == 0:
time.sleep(0.1)
def start_ts_listener(self):
# launch metric based listeners and backend
self.ts = TSListener(self)
# I try to get the nodes before myself in the nodes list
def get_my_replicats(self):
kv_nodes = self.find_kv_nodes()
kv_nodes.sort()
# Maybe soneone ask us a put but we are not totally joined
# if so do not replicate this
if not self.uuid in kv_nodes:
logger.log('WARNING: too early put, myself %s is not a kv nodes currently' % self.uuid, part='kv')
return []
# You can't have more replicats that you got of kv nodes
nb_rep = min(REPLICATS, len(kv_nodes))
idx = kv_nodes.index(self.uuid)
replicats = []
for i in range(idx-nb_rep, idx):
nuuid = kv_nodes[i]
# we can't be a replicat of ourselve
if nuuid == self.uuid:
continue
replicats.append(nuuid)
rnames = []
for uuid in replicats:
# Maybe someone delete the nodes just here, so we must care about it
n = self.nodes.get(uuid, None)
if n:
rnames.append(n['name'])
logger.debug('REPLICATS: myself %s replicats are %s' % (self.name, rnames), part='kv')
return replicats
def do_replication_backlog_thread(self):
logger.log('REPLICATION thread launched', part='kv')
while not self.interrupted:
e = None
# Standard switch
replication_backlog = self.replication_backlog
self.replication_backlog = {}
replicats = self.get_my_replicats()
if len(replicats) == 0:
time.sleep(1)
for (ukey, bl) in replication_backlog.iteritems():
# REF: bl = {'value':(ukey, value), 'repl':[], 'hkey':hkey, 'meta':meta}
hkey = bl['hkey']
_, value = bl['value']
for uuid in replicats:
_node = self.nodes.get(uuid, None)
# Someone just delete my node, not fair :)
if _node is None:
continue
logger.debug('REPLICATION thread manage entry to %s(%s) : %s' % (_node['name'], uuid, bl), part='kv')
# Now send it :)
n = _node
uri = 'http://%s:%s/kv/%s?force=True' % (n['addr'], n['port'], ukey)
try:
logger.debug('KV: PUT(force) asking %s: %s' % (n['name'], uri), part='kv')
params = {'force': True, 'meta':json.dumps(bl['meta'])}
r = rq.put(uri, data=value, params=params)
logger.debug('KV: PUT(force) return %s' % r, part='kv')
except rq.exceptions.RequestException, exp:
logger.debug('KV: PUT(force) error asking to %s: %s' % (n['name'], str(exp)), part='kv')
time.sleep(1)
# The first sync thread will ask to our replicats for their lately changed value
# and we will get the key/value from it
def do_replication_first_sync_thread(self):
if not 'kv' in self.tags:
logger.log('SYNC no need, I am not a KV node', part='kv')
return
logger.log('SYNC thread launched', part='kv')
# We will look until we found a repl that answer us :)
while True:
repls = self.get_my_replicats()
for repluuid in repls:
repl = self.nodes.get(repluuid, None)
# Maybe someone just delete my node, if so skip it
if repl is None:
continue
addr = repl['addr']
port = repl['port']
logger.log('SYNC try to sync from %s since the time %s' % (repl['name'], self.last_alive), part='kv')
uri = 'http://%s:%s/kv-meta/changed/%d' % (addr, port, self.last_alive)
try:
r = rq.get(uri)
logger.debug("SYNC kv-changed response from %s "%repl['name'], r, part='kv')
try:
to_merge = json.loads(r.text)
except ValueError, exp:
logger.debug('SYNC : error asking to %s: %s' % (repl['name'], str(exp)), part='kv')
continue
self.kv.do_merge(to_merge)
logger.debug("SYNC thread done, bailing out", part='kv')
return
except rq.exceptions.RequestException, exp:
logger.debug('SYNC : error asking to %s: %s' % (repl['name'], str(exp)), part='kv')
continue
time.sleep(1)
# Main thread for launching checks (each with its own thread)
def do_check_thread(self):
logger.log('CHECK thread launched', part='check')
cur_launchs = {}
while not self.interrupted:
now = int(time.time())
for (cid, check) in self.checks.iteritems():
# maybe this chck is not a activated one for us, if so, bail out
if not cid in self.active_checks:
continue
# maybe a check is already running
if cid in cur_launchs:
continue
# else look at the time
last_check = check['last_check']
interval = int(check['interval'].split('s')[0]) # todo manage like it should
#in the conf reading phase
interval = random.randint(int(0.9*interval), int(1.1*interval))
#interval = random.randint(1, 2*interval)
if last_check < now - interval:
# randomize a bit the checks
script = check['script']
logger.debug('CHECK: launching check %s:%s' % (cid, script), part='check')
print "LAUCN CHECK", cid, script
t = threader.create_and_launch(self.launch_check, name='check-%s' % cid, args=(check,))
cur_launchs[cid] = t
to_del = []
for (cid, t) in cur_launchs.iteritems():
if not t.is_alive():
t.join()
to_del.append(cid)
for cid in to_del:
del cur_launchs[cid]
time.sleep(1)
# Main thread for launching generators
def do_generator_thread(self):
logger.log('GENERATOR thread launched', part='generator')
cur_launchs = {}
while not self.interrupted:
now = int(time.time())
for (gname, gen) in self.generators.iteritems():
logger.debug('LOOK AT GENERATOR', gen)
apply_on = gen['apply_on']
# Maybe this generator is not for us...
if apply_on != '*' and apply_on not in self.tags:
continue
g = Generator(gen)
g.generate(self)
should_launch = g.write_if_need()
if should_launch:
g.launch_command()
time.sleep(1)
# Try to find the params for a macro in the foloowing objets, in that order:
# * check
# * service
# * main configuration
def _found_params(self, m, check):
parts = [m]
# if we got a |, we got a default value somewhere
if '|' in m:
parts = m.split('|', 1)
change_to = ''
for p in parts:
elts = [p]
if '.' in p:
elts = p.split('.')
elts = [e.strip() for e in elts]
# we will try to grok into our cfg_data for the k1.k2.k3 =>
# self.cfg_data[k1][k2][k3] entry if exists
d = None
founded = False
# We will look into the check>service>global order
# but skip serviec if it's not related with the check
sname = check.get('service', '')
service = {}
find_into = [check, self.cfg_data]
if sname and sname in self.services:
service = self.services.get(sname)
find_into = [check, service, self.cfg_data]
for tgt in find_into:
(lfounded, ld) = self._found_params_inside(elts, tgt)
if not lfounded:
continue
if lfounded:
founded = True
d = ld
break
if not founded:
continue
change_to = str(d)
break
return change_to
# Try to found a elts= k1.k2.k3 => d[k1][k2][k3] entry
# if exists
def _found_params_inside(self, elts, d):
founded = False
for e in elts:
if not e in d:
founded = False
break
d = d[e]
founded = True
return (founded, d)
# Launch a check sub-process as a thread
def launch_check(self, check):
rc = 3 # by default unknown state and output
output = 'Check not configured'
err = ''
script = check['script']
logger.debug("CHECK start: MACRO launching %s" % script, part='check')
# First we need to change the script with good macros (between $$)
it = self.macro_pat.finditer(script)
macros = [m.groups() for m in it]
# can be ('$ load.warning | 95$', 'load.warning | 95') for example
for (to_repl, m) in macros:
change_to = self._found_params(m, check)
script = script.replace(to_repl, change_to)
logger.debug("MACRO finally computed", script, part='check')
p = subprocess.Popen(script, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, preexec_fn=os.setsid)
output, err = p.communicate()
rc = p.returncode
# not found error like (127) should be catch as unknown check
if rc > 3:
rc = 3
logger.debug("CHECK RETURN %s : %s %s %s" % (check['id'], rc, output, err), part='check')
did_change = (check['state_id'] != rc)
check['state'] = {0:'ok', 1:'warning', 2:'critical', 3:'unknown'}.get(rc, 'unknown')
if 0 <= rc <= 3:
check['state_id'] = rc
else:
check['state_id'] = 3
check['output'] = output + err
check['last_check'] = int(time.time())
self.analyse_check(check)
# Launch the handlers, some need the data if the element did change or not
self.launch_handlers(check, did_change)
# get a check return and look it it did change a service state. Also save
# the result in the __health KV
def analyse_check(self, check):
logger.debug('CHECK we got a check return, deal with it for %s' % check, part='check')
# If the check is related to a service, import the result into the service
# and look for a service state change
sname = check.get('service', '')
if sname and sname in self.services:
service = self.services.get(sname)
logger.debug('CHECK is related to a service, deal with it! %s => %s' % (check, service), part='check')
sstate_id = service.get('state_id')
cstate_id = check.get('state_id')
if cstate_id != sstate_id:
service['state_id'] = cstate_id
logger.log('CHECK: we got a service state change from %s to %s for %s' % (sstate_id, cstate_id, service['name']), part='check')
# This node cannot be deleted, so we don't need a protection here
node = self.nodes.get(self.uuid)
self.gossip.incarnation += 1
node['incarnation'] = self.gossip.incarnation
self.gossip.stack_alive_broadcast(node)
else:
logger.debug('CHECK: service %s did not change (%s)' % (service['name'], sstate_id), part='check')
# We finally put the result in the KV database
self.put_check(check)
# Save the check as a jsono object into the __health/ KV part
def put_check(self, check):
value = json.dumps(check)
key = '__health/%s/%s' % (self.name, check['name'])
logger.debug('CHECK SAVING %s:%s(len=%d)' % (key, value, len(value)), part='check')
self.put_key(key, value, allow_udp=True)
# Now groking metrics from check
elts = check['output'].split('|', 1)
output = elts[0]
try:
perfdata = elts[1]
except IndexError:
perfdata = ''
# if not perfdata, bail out
if not perfdata:
return
datas = []
cname = check['name'].replace('/','.')
now = int(time.time())
perfdatas = PerfDatas(perfdata)
for m in perfdatas:
if m.name is None or m.value is None:
continue # skip this invalid perfdata
logger.debug('GOT PERFDATAS', m, part='check')
logger.debug('GOT PERFDATAS', m.name, part='check')
logger.debug('GOT PERFDATAS', m.value, part='check')
e = {'mname':'.'.join([self.name, cname, m.name]), 'timestamp':now, 'value':m.value}
logger.debug('PUT PERFDATA', e, part='check')
datas.append(e)
self.put_graphite_datas(datas)
def send_mail(self, handler, check):
addr_from = handler.get('addr_from', '[email protected]')
smtp_server = handler.get("smtp_server", "localhost")
smtps = handler.get("smtps", False)
contacts = handler.get('contacts', ['[email protected]'])
subject_p = handler.get('subject_template', 'email.subject.tpl')
text_p = handler.get('text_template', 'email.text.tpl')
# go connect now
try:
print "EMAIL connection to", smtp_server
s = smtplib.SMTP(smtp_server, timeout=30)
tolist = contacts
_time = datetime.datetime.fromtimestamp( int(time.time())).strftime('%Y-%m-%d %H:%M:%S')
subject_f = os.path.join(self.configuration_dir, 'templates', subject_p)
subject_buf = ''
text_f = os.path.join(self.configuration_dir, 'templates', text_p)
text_buf = ''
print "SUBJECT F", subject_f
if not os.path.exists(subject_f):
logger.error('Missing template file %s' % subject_f)
return
if not os.path.exists(text_f):
logger.error('Missing template file %s' % text_f)
return
with open(subject_f) as f:
subject_buf = f.read()
with open(text_f) as f:
text_buf = f.read()
subject_tpl = jinja2.Template(subject_buf)
subject_m = subject_tpl.render(handler=handler, check=check, _time=_time)
text_tpl = jinja2.Template(text_buf)
text_m = text_tpl.render(handler=handler, check=check, _time=_time)
msg = '''\
From: %s
Subject: %s
%s
''' % (addr_from, subject_m, text_m)
#% (addr_from, check['name'], check['state'], _time, check['output'])
print "SENDING EMAIL", addr_from, contacts, msg
r = s.sendmail(addr_from, contacts, msg)
s.quit()
except Exception, exp:
logger.error('Cannot send email: %s' % traceback.format_exc())
def launch_handlers(self, check, did_change):
for hname in check['handlers']:
handler = self.handlers.get(hname, None)
# maybe some one did atomize this handler? if so skip it :)
if handler is None:
continue
# Look at the state and should match severities
if not check['state'] in handler['severities']:
continue
# maybe it's a none (untyped) handler, if so skip it
if handler['type'] == 'none':
continue
elif handler['type'] == 'mail':
if did_change:
print "HANDLER EMAIL"*10, did_change, handler
self.send_mail(handler, check)
else:
logger.warning('Unknown handler type %s for %s' % (handler['type'], handler['name']))
# TODO: RE-factorize with the TS code part
def put_graphite_datas(self, datas):
forwards = {}
for e in datas:
mname, value, timestamp = e['mname'], e['value'], e['timestamp']
hkey = hashlib.sha1(mname).hexdigest()
ts_node_manager = self.find_ts_node(hkey)
# if it's me that manage this key, I add it in my backend
if ts_node_manager == self.uuid:
logger.debug("I am the TS node manager")
print "HOW ADDING", timestamp, mname, value, type(timestamp), type(mname), type(value)
if self.ts:
self.ts.tsb.add_value(timestamp, mname, value)
# not me? stack a forwarder
else:
logger.debug("The node manager for this Ts is ", ts_node_manager)
l = forwards.get(ts_node_manager, [])
##Transform into a graphite line
line = '%s %s %s' % (mname, value, timestamp)
l.append(line)
forwards[ts_node_manager] = l
for (uuid, lst) in forwards.iteritems():
node = self.nodes.get(uuid, None)
# maybe the node disapear? bail out, we are not lucky
if node is None:
continue
packets = []
# first compute the packets
buf = ''
for line in lst:
buf += line+'\n'
if len(buf) > 1024:
packets.append(buf)
buf = ''
if buf != '':
packets.append(buf)
# UDP
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for packet in packets:
# do NOT use the node['port'], it's the internal communication, not the graphite one!
sock.sendto(packet, (node['addr'], 2003))
sock.close()
# Will delete all checks into the kv and update new values, but in a thread
def update_checks_kv(self):
def do_update_checks_kv(self):
logger.debug("CHECK UPDATING KV checks", part='kv')
names = []
for (cid, check) in self.checks.iteritems():
# Only the checks that we are really managing
if cid in self.active_checks:
names.append(check['name'])
self.put_check(check)
all_checks = json.dumps(names)
key = '__health/%s' % self.name
self.put_key(key, all_checks)
# Ok go launch it :)
threader.create_and_launch(do_update_checks_kv, name='do_update_checks_kv', args=(self,))
# Someone ask us to launch a new command (was already auth by RSA keys)
def do_launch_exec(self, cid, cmd, addr):
logger.debug('EXEC launching a command %s' % cmd, part='exec')
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, preexec_fn=os.setsid)
output, err = p.communicate() # Will lock here
rc = p.returncode
logger.debug("EXEC RETURN for command %s : %s %s %s" % (cmd, rc, output, err), part='exec')
o = {'output':output, 'rc':rc, 'err':err}
j = json.dumps(o)
# Save the return and put it in the KV space
key = '__exec/%s' % cid
self.put_key(key, j, ttl=3600) # only one hour live is good :)
# Now send a finish to the asker
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
payload = {'type':'/exec/done', 'cid': cid}
packet = json.dumps(payload)
enc_packet = encrypter.encrypt(packet)
logger.debug('EXEC: sending a exec done packet %s:%s' % addr, part='exec')
try:
sock.sendto(enc_packet, addr)
sock.close()
except Exception, exp:
sock.close()
# Thread that will look for libexec/configuration change events,
# will get the newest value in the KV and dump the files
def launch_update_libexec_cfg_thread(self):
def do_update_libexec_cfg_thread(self):
while not self.interrupted:
# work on a clean list
libexec_to_update = self.libexec_to_update
self.libexec_to_update = []
for (p, _hash) in libexec_to_update:
logger.debug("LIBEXEC WE NEED TO UPDATE THE LIBEXEC PATH", p, "with the hash", _hash, part='propagate')
fname = os.path.normpath(os.path.join(self.libexec_dir, p))
# check if we are still in the libexec dir and not higer, somewhere
# like in a ~/.ssh or an /etc...
if not fname.startswith(self.libexec_dir):
logger.log('WARNING (SECURITY): trying to update the path %s that is not in libexec dir, bailing out' % fname, part='propagate')
continue
# If it exists, try to look at the _hash so maybe we don't have to load it again
if os.path.exists(fname):
try:
f = open(fname, 'rb')
_lhash = hashlib.sha1(f.read()).hexdigest()
f.close()
except Exception, exp:
logger.log('do_update_libexec_cfg_thread:: error in opening the %s file: %s' % (fname, exp), part='propagate')
_lhash = ''
if _lhash == _hash:
logger.debug('LIBEXEC update, not need for the local file %s, hash are the same' % fname, part='propagate')
continue
# ok here we need to load the KV value (a base64 tarfile)
v64 = self.get_key('__libexec/%s' % p)
if v64 is None:
logger.log('WARNING: cannot load the libexec script from kv %s' % p, part='propagate')
continue
vtar = base64.b64decode(v64)
f = cStringIO.StringIO(vtar)
with tarfile.open(fileobj=f, mode="r:gz") as tar:
files = tar.getmembers()
if len(files) != 1:
logger.log('WARNING: too much files in a libexec KV entry %d' % len(files), part='propagate')
continue
_f = files[0]
_fname = os.path.normpath(_f.name)
if not _f.isfile() or os.path.isabs(_fname):
logger.log('WARNING: (security) invalid libexec KV entry (not a file or absolute path) for %s' % _fname, part='propagate')
continue
# ok the file is good, we can extract it
tempdir = tempfile.mkdtemp()
tar.extract(_f, path=tempdir)
# now we can move all the tempdir content into the libexec dir
to_move = os.listdir(tempdir)
for e in to_move:
copy_dir(os.path.join(tempdir, e), self.libexec_dir)
logger.debug('LIBEXEC: we just upadte the %s file with a new version' % _fname, part='propagate')
# we can clean the tempdir as we don't use it anymore
shutil.rmtree(tempdir)
f.close()
# Now the configuration part
configuration_to_update = self.configuration_to_update
self.configuration_to_update = []
for (p, _hash) in configuration_to_update:
logger.debug("CONFIGURATION WE NEED TO UPDATE THE CONFIGURATION PATH", p, "with the hash", _hash, part='propagate')
fname = os.path.normpath(os.path.join(self.configuration_dir, p))
# check if we are still in the configuration dir and not higer, somewhere
# like in a ~/.ssh or an /etc...
if not fname.startswith(self.configuration_dir):
logger.log('WARNING (SECURITY): trying to update the path %s that is not in configuration dir, bailing out' % fname, part='propagate')
continue
# If it exists, try to look at the _hash so maybe we don't have to load it again
if os.path.exists(fname):
try:
f = open(fname, 'rb')
_lhash = hashlib.sha1(f.read()).hexdigest()
f.close()
except Exception, exp:
logger.log('do_update_configuration_cfg_thread:: error in opening the %s file: %s' % (fname, exp), part='propagate')
_lhash = ''
if _lhash == _hash:
logger.debug('CONFIGURATION update, not need for the local file %s, hash are the same' % fname, part='propagate')
continue
# ok here we need to load the KV value (a base64 tarfile)
v64 = self.get_key('__configuration/%s' % p)
if v64 is None:
logger.log('WARNING: cannot load the configuration script from kv %s' % p, part='propagate')
continue
vtar = base64.b64decode(v64)
f = cStringIO.StringIO(vtar)
with tarfile.open(fileobj=f, mode="r:gz") as tar:
files = tar.getmembers()
if len(files) != 1:
logger.log('WARNING: too much files in a configuration KV entry %d' % len(files), part='propagate')
continue
_f = files[0]
_fname = os.path.normpath(_f.name)
if not _f.isfile() or os.path.isabs(_fname):
logger.log('WARNING: (security) invalid configuration KV entry (not a file or absolute path) for %s' % _fname, part='propagate')
continue
# ok the file is good, we can extract it
tempdir = tempfile.mkdtemp()
tar.extract(_f, path=tempdir)
# now we can move all the tempdir content into the configuration dir
to_move = os.listdir(tempdir)
for e in to_move:
copy_dir(os.path.join(tempdir, e), self.configuration_dir)
logger.debug('CONFIGURATION: we just upadte the %s file with a new version' % _fname, part='propagate')
# we can clean the tempdir as we don't use it anymore
shutil.rmtree(tempdir)
f.close()
# We finish to load all, we take a bit sleep now...
time.sleep(1)
# Go launch it
threader.create_and_launch(do_update_libexec_cfg_thread, args=(self,))
# find all nearly alive nodes with a specific tag
def find_tag_nodes(self, tag):
nodes = []
with self.nodes_lock:
for (uuid, node) in self.nodes.iteritems():
if node['state'] in ['dead', 'leave']:
continue
tags = node['tags']
if tag in tags:
nodes.append(uuid)
return nodes
# find the good ring node for a tag and for a key
def find_tag_node(self, tag, hkey):
kv_nodes = self.find_tag_nodes(tag)
# No kv nodes? oups, set myself so
if len(kv_nodes) == 0:
return self.uuid
kv_nodes.sort()
idx = bisect.bisect_right(kv_nodes, hkey) - 1
#logger.debug("IDX %d" % idx, hkey, kv_nodes, len(kv_nodes))
nuuid = kv_nodes[idx]
return nuuid
def find_kv_nodes(self):
return self.find_tag_nodes('kv')
def find_kv_node(self, hkey):
return self.find_tag_node('kv', hkey)
def find_ts_nodes(self, hkey):
return self.find_tag_nodes('ts')
def find_ts_node(self, hkey):
return self.find_tag_node('ts', hkey)
def retention_nodes(self, force=False):
# Ok we got no nodes? something is strange, we don't save this :)
if len(self.nodes) == 0:
return
now = int(time.time())
if force or (now - 60 > self.last_retention_write):
with open(self.nodes_file+'.tmp', 'w') as f:
nodes = {}
with self.nodes_lock:
nodes = copy.copy(self.nodes)
f.write(json.dumps(nodes))
# now more the tmp file into the real one
shutil.move(self.nodes_file+'.tmp', self.nodes_file)
# Same for the incarnation data!
with open(self.incarnation_file+'.tmp', 'w') as f:
f.write(json.dumps(self.gossip.incarnation))
# now more the tmp file into the real one
shutil.move(self.incarnation_file+'.tmp', self.incarnation_file)
with open(self.check_retention+'.tmp', 'w') as f:
f.write(json.dumps(self.checks))
# now move the tmp into the real one
shutil.move(self.check_retention+'.tmp', self.check_retention)
with open(self.service_retention+'.tmp', 'w') as f:
f.write(json.dumps(self.services))
# now move the tmp into the real one
shutil.move(self.service_retention+'.tmp', self.service_retention)
with open(self.last_alive_file+'.tmp', 'w') as f:
f.write(json.dumps(int(time.time())))
# now move the tmp into the real one
shutil.move(self.last_alive_file+'.tmp', self.last_alive_file)
self.last_retention_write = now
def count(self, state):
nodes = {}
with self.nodes_lock:
nodes = copy.copy(self.nodes)
return len( [n for n in nodes.values() if n['state'] == state])
# Guess what? yes, it is the main function
def main(self):
# be sure the check list are really updated now our litners are ok
self.update_checks_kv()
logger.log('Go go run!')
i = -1
while not self.interrupted:
i += 1
if i % 10 == 0:
#logger.debug('KNOWN NODES: %s' % ','.join([ n['name'] for n in self.nodes.values()] ) )
nodes = {}
nodes = self.nodes.copy()
logger.debug('KNOWN NODES: %d, alive:%d, suspect:%d, dead:%d, leave:%d' % (len(self.nodes), self.count('alive'), self.count('suspect'), self.count('dead'), self.count('leave')), part='gossip')
if self.count('dead') > 0:
logger.debug('DEADS: %s' % ','.join([ n['name'] for n in nodes.values() if n['state'] == 'dead']), part='gossip')
if i % 15 == 0:
threader.create_and_launch(self.gossip.launch_full_sync, name='launch-full-sync')
if i % 2 == 1:
threader.create_and_launch(self.gossip.ping_another, name='ping-another')
self.gossip.launch_gossip()
self.gossip.look_at_deads()
self.retention_nodes()
self.clean_old_events()
# Look if we lost some threads or not
threader.check_alives()
time.sleep(1)
#if i % 30 == 0:
# from meliae import scanner
# scanner.dump_all_objects( '/tmp/memory-%s' % self.name)
self.retention_nodes(force=True)
# Clean lock file so daemon after us will be happy
self.clean_lock()
logger.info('Exiting')
def clean_lock(self):
if os.path.exists(self.lock_path):
logger.info('Cleaning lock file at %s' % self.lock_path)
try:
os.unlink(self.lock_path)
except Exception, exp:
logger.error('Cannot remove lock file %s: %s' % (self.lock_path, exp))
def stack_event_broadcast(self, payload):
msg = self.create_event_msg(payload)
b = {'send':0, 'msg':msg}
broadcaster.broadcasts.append(b)
return
# interface for manage_message, in pubsub
def manage_message_pub(self, msg=None):
if msg is None:
return
self.manage_message(msg)
# Manage a udp message
def manage_message(self, m):
#print "MANAGE", m
t = m['type']
if t == 'push-pull-msg':
self.gossip.merge_nodes(m['nodes'])
elif t == 'ack':
logger.debug("GOT AN ACK?")
elif t == 'alive':
self.gossip.set_alive(m)
elif t in ['suspect', 'dead']:
self.gossip.set_suspect(m)
# Where the fuck is 'dead'??? <--- TODO
elif t =='leave':
self.gossip.set_leave(m)
elif t == 'event':
self.manage_event(m)
else:
logger.debug('UNKNOWN MESSAGE', m)
def manage_event(self, m):
eventid = m.get('eventid', '')
payload = m.get('payload', {})
# if bad event or already known one, delete it
with self.events_lock:
if not eventid or not payload or eventid in self.events:
return
# ok new one, add a broadcast so we diffuse it, and manage it
b = {'send':0, 'msg':m}
broadcaster.broadcasts.append(b)
with self.events_lock:
self.events[eventid] = m
# I am the sender for this event, do not handle it
if m.get('from', '') == self.uuid:
return
_type = payload.get('type', '')
if not _type:
return
# If we got a libexec file update message, we append this path to the list
# libexec_to_update so a thread will grok the new version from KV
if _type == 'libexec':
path = payload.get('path', '')
_hash = payload.get('hash', '')
if not path or not _hash:
return
logger.debug('LIBEXEC UPDATE asking update for the path %s wit the hash %s' % (path, _hash), part='propagate')
self.libexec_to_update.append((path, _hash))
# Ok but for the configuration part this time
elif _type == 'configuration':
path = payload.get('path', '')
_hash = payload.get('hash', '')
if not path or not _hash:
return
if 'path' == 'local.json':
# We DONT update our local.json file, it's purely local
return
logger.debug('CONFIGURATION UPDATE asking update for the path %s wit the hash %s' % (path, _hash), part='propagate')
self.configuration_to_update.append((path, _hash))
# Maybe we are ask to clean our configuration, if so launch a thread because we can't block this
# thread while doing it
elif _type == 'configuration-cleanup':
threader.create_and_launch(self.do_configuration_cleanup, name='configuration-cleanup')
else:
logger.debug('UNKNOWN EVENT %s' % m)
return
# Look at the /kv/configuration/ entry, uncompress the json string
# and clean old files into the configuration directory that is not in this list
# but not the local.json that is out of global conf
def do_configuration_cleanup(self):
zj64 = self.get_key('__configuration')
if zj64 is None:
logger.log('WARNING cannot grok kv/__configuration entry', part='propagate')
return
zj = base64.b64decode(zj64)
j = zlib.decompress(zj)
lst = json.loads(j)
logger.debug("WE SHOULD CLEANUP all but not", lst, part='propagate')
local_files = [os.path.join(dp, f)
for dp, dn, filenames in os.walk(os.path.abspath(self.configuration_dir))
for f in filenames]
for fname in local_files:
path = fname[len(os.path.abspath(self.configuration_dir))+1:]
# Ok, we should not have the local.json entry, but even if we got it, do NOT rm it
if path == 'local.json':
continue
if not path in lst:
full_path = os.path.join(self.configuration_dir, path)
logger.debug("CLEANUP we should clean the file", full_path, part='propagate')
try:
os.remove(full_path)
except OSError, exp:
logger.log('WARNING: cannot cleanup the configuration file %s (%s)' % (full_path, exp), part='propagate')
# We are joining the seed members and lock until we reach at least one
def join(self):
self.gossip.join()
# each second we look for all old events in order to clean and delete them :)
def clean_old_events(self):
now = int(time.time())
to_del = []
with self.events_lock:
for (cid, e) in self.events.iteritems():
ctime = e.get('ctime', 0)
if ctime < now - self.max_event_age:
to_del.append(cid)
# why sleep here? because I don't want to take the lock twice as quik is an udp thread
# is also waiting for it, it is prioritary, not me
time.sleep(0.01)
with self.events_lock:
for cid in to_del:
try:
del self.events[cid]
except IndexError: # if already delete, we don't care
pass | PypiClean |
/Flask-REST-multiformat-api-0.1.0.tar.gz/Flask-REST-multiformat-api-0.1.0/flask_rest_multiformat_api/view.py |
from flask import (abort, request, make_response)
from flask.views import MethodView
from .serialize import serialise, apply_data_to_model
from .utils import build_filter, loads_filters
from .queries import get_single, get_many
import json
from werkzeug.exceptions import BadRequest, MethodNotAllowed
from marshmallow import ValidationError
from sqlalchemy.orm import Query
from .format import DATA_FORMATER
from .exceptions import ApiException
from flask_rest_multiformat_api.errors import (ApiError, ObjectNotFoundError,
InvalidDataError
)
DEFAULT_FORMATER = DATA_FORMATER['jsonapi']
class BaseView(MethodView):
model = None
session = None
allowed_methods = ['GET', 'POST', 'PATCH', 'PUT', 'DELETE']
type = ''
links = {}
data_format = "jsonapi"
handle_exception = (ValidationError)
_decorators = {}
def __init__(self, *args, **kwargs):
super(MethodView, self).__init__(*args, **kwargs)
allowed_method = [method.lower() for method in self.allowed_methods]
methods = [meth.lower() for meth in self.methods]
Dataformater = DATA_FORMATER.get(self.data_format,
DEFAULT_FORMATER
)
self.data_formater = Dataformater()
for method in methods:
if method not in allowed_method:
setattr(self, method, None)
def apply_decorators(self, meth):
decorators = self._decorators.get(request.method.lower(), [])
for decorator in decorators:
meth = decorator(meth)
return meth
def dispatch_request(self, *args, **kwargs):
meth = getattr(self, request.method.lower(), None)
print('meth :', meth)
# print('methodes: ', lower_methods,request.method.lower() )
# If the request method is HEAD and we don't have a handler for it
# retry with GET.
if meth is None and request.method == 'HEAD':
meth = getattr(self, 'get', None)
if meth is None:
raise MethodNotAllowed('%s method not allowed.' %
request.method.lower()
)
assert meth is not None, 'Unimplemented method %r' % request.method
try:
meth = self.apply_decorators(meth)
return meth(*args, **kwargs)
except (ApiException, ValidationError) as e:
if isinstance(e, ValidationError):
errors = [InvalidDataError(e.messages)]
return self.data_formater.build_error_response(errors)
return self.data_formater.build_error_response(e.errors)
class ModelDetailView(BaseView):
allowed_methods = ['GET', 'PUT', 'PATCH', 'DELETE']
def get_object(self, *args, **kwargs):
id = kwargs.get("id")
model_object = get_single(self.session, self.model, id)
return model_object
def get(self, *args, **kwargs):
print(args, kwargs)
orm_obj = self.get_object(*args, **kwargs)
if not orm_obj:
error = ObjectNotFoundError(self.model, kwargs.get("id"))
raise ApiException([error], 404)
orm_obj_json = serialise(orm_obj, self)
return self.data_formater.create_response(orm_obj_json, 200)
def update(self, *args, **kwargs):
code = 201
model_obj = self.get_object(*args, **kwargs)
# print("MODEL OBJ: ", model_obj)
if model_obj is None:
error = ObjectNotFoundError(self.model, kwargs.get("id"))
raise ApiException([error], 404)
data = self.data_formater.parse_data(request.data)
model_obj = apply_data_to_model(self.model, model_obj, data) if \
isinstance(data, dict) else data
self.session.commit()
response = serialise(model_obj, self)
return self.data_formater.create_response(response, code)
def delete(self, *args, **kwargs):
orm_obj = self.get_object(*args, **kwargs)
self.before_delete_object(orm_obj, *args, **kwargs)
if not orm_obj:
error = ObjectNotFoundError(self.model, kwargs.get("id"))
raise ApiException([error], 404)
self.session.delete(orm_obj)
self.session.commit()
self.after_delete_object(orm_obj, *args, **kwargs)
return '', 202
def put(self, *args, **kwargs):
return self.update(*args, **kwargs)
def patch(self, *args, **kwargs):
return self.update(*args, **kwargs)
def before_delete_object(self, object, *args, **kwargs):
pass
def after_delete_object(self, object, *args, **kwargs):
pass
class ModelListView(BaseView):
allowed_methods = ['GET', 'POST']
def get_objects(self, *args, **kwargs):
filters_dict = loads_filters(request)
order = request.args.get('sort', '')
order_by = request.args.get('sort_by', '')
number_par_page = request.args.get('per_page', 50)
page_number = request.args.get('page', 0)
model_objects = get_many(self.session, self.model,
filters_dict, order_by, order,
number_par_page, page_number
)
return model_objects
def get(self, *args, **kwargs):
orm_objs = self.get_objects(*args, **kwargs)
page_number = request.args.get('page', 0)
orm_objs_json = serialise(orm_objs, self,
page_number=page_number,
)
return orm_objs_json, 200
def post(self, *args, **kwargs):
code = 201
data = self.data_formater.parse_data(request.data)
self.before_post(args, kwargs, data)
model_obj = self.create_object(data, *args, **kwargs)
self.after_create_object(model_obj, *args, **kwargs)
response = serialise(model_obj, self)
self.after_post(model_obj, args, kwargs)
return response, code
def create_object(self, data, *args, **kwargs):
model_obj = self.schema().load(data, partial=True)
self.session.add(model_obj)
self.session.commit()
return model_obj
def after_create_object(self, new_object, *args, **kwargs):
pass
def before_post(self, args, kwargs, data=None):
pass
def after_post(self, new_object, args, kwargs):
pass
class RelationshipView(BaseView):
model = None
session = None
relation_attribute_name = ''
queries = {'single': get_single}
methods = ['GET', 'POST', 'DELETE']
allowed_methods = ['GET', 'POST', 'DELETE']
data_format = "jsonapi"
def __init__(self, *args, **kwargs):
super(MethodView, self).__init__(*args, **kwargs)
allowed_method = [method.lower() for method in self.allowed_methods]
methods = [meth.lower() for meth in self.methods]
self.data_formater = DATA_FORMATER.get(self.data_format, DEFAULT_FORMATER)
for method in methods:
if method not in allowed_method:
setattr(self, method, None)
def get_object(self, *args, **kwargs):
id = kwargs.get("id")
model_object = get_single(self.session, self.model, id)
return model_object
def get_related_object(self, orm_obj):
relation_object = getattr(orm_obj, self.relation_attribute_name, None)
return relation_object
def get(self, *args, **kwargs):
orm_object = self.get_object(*args, **kwargs)
related_object = self.get_related_object(orm_object)
# to do: add filter for performance
relation_objects = related_object.all() if \
isinstance(related_object, Query)\
else related_object
relation_model = relation_objects.__class__ if not isinstance(relation_objects, list) \
else relation_objects[0].__class__
id_relation = kwargs.get("id_relation")
if id_relation:
object = None
if relation_objects:
for relation_object in relation_objects:
if relation_object.id == id_relation:
object_str = serialise(relation_object, self)
else:
object_str = serialise(relation_objects, self)
return object_str, 200
def post(self, id):
print("post request")
data = json.loads(request.data)
id_relation = data.get('id', None)
if not id_relation:
return 'Id relation must be specified', 400
query_function = self.queries['single']
orm_obj = query_function(self.session, self.model, id)
relation_objects = getattr(orm_obj, self.relation_attribute_name, [])
model_attr = getattr(self.model, self.relation_attribute_name, None)
relation_model = model_attr.property.mapper.class_
relation_obj = query_function(self.session, relation_model, id_relation)
if not relation_obj:
return 'Object for relation not found', 400
relation_objects.append(relation_obj)
self.session.commit()
object_str = serialise(relation_obj, self)
return object_str, 201
def delete(self, id, id_relation):
print("delete request")
query_function = self.queries['single']
orm_obj = query_function(self.session, self.model, id)
relation_objects = getattr(orm_obj, self.relation_attribute_name, [])
model_attr = getattr(self.model, self.relation_attribute_name, None)
relation_model = model_attr.property.mapper.class_
relation_obj = query_function(self.session, relation_model, id_relation)
if not relation_obj:
return 'Object for relation not found', 400
relation_objects.remove(relation_obj)
self.session.commit()
return '', 201 | PypiClean |
/Helios_Scanner-1.1-py3-none-any.whl/helios/webapp/phpmyadmin.py | from helios.webapp import base_app
import re
from helios.core.utils import requests_response_to_dict
import json
import requests
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
# This script detects vulnerabilities in the following PHP based products:
# - phpMyAdmin
class Scanner(base_app.BaseAPP):
def __init__(self):
self.name = "phpMyAdmin"
self.types = []
def detect(self, url):
directories = ['phpmyadmin', 'pma', 'phpMyAdmin', '']
for d in directories:
path = urljoin(url, d)
response = self.send(path)
if response and response.status_code == 200 and "phpmyadmin.css.php" in response.text:
self.app_url = path
return True
return False
def test(self, url):
docu_url = urljoin(url, "Documentation.html")
docu_response = self.send(docu_url)
version = None
if docu_response:
get_version = re.search(r'<title>phpMyAdmin\s([\d\.]+)\s', docu_response.text)
if get_version:
version = get_version.group(1)
self.logger.info("phpMyAdmin version %s was identified from the Documentation.html file" % version)
if version:
db = self.get_db("phpmyadmin_vulns.json")
data = json.loads(db)
pma_vulns = data['phpMyAdmin']
self.match_versions(pma_vulns, version, url)
self.test_auth(url)
def test_auth(self, url):
sess = requests.session()
default_creds = ['root:', 'root:admin', 'root:root']
init_req = sess.get(url)
if not init_req:
self.logger.warning("Unable to test authentication, invalid initial response")
return
token_re = re.search('name="token".+?value="(.+?)"', init_req.text)
for entry in default_creds:
if not token_re:
self.logger.warning("Unable to test authentication, no token")
return
user, passwd = entry.split(':')
payload = {'lang': 'en', 'pma_username': user, 'pma_password': passwd, 'token': token_re.group(1)}
post_url = urljoin(url, 'index.php')
post_response = sess.post(post_url, payload)
if post_response and 'Refresh' in post_response.headers:
returl = post_response.headers['Refresh'].split(';')[1].strip()
retdata = sess.get(returl)
if retdata:
if 'class="loginform">' not in retdata.text:
match_str = "Possible positive authentication for user: %s and password %s on %s " % \
(user, passwd, url)
result = {
'request': requests_response_to_dict(post_response),
'match': match_str
}
self.logger.info(match_str)
self.results.append(result)
return
else:
token_re = re.search('name="token".+?value="(.+?)"', retdata.text) | PypiClean |
/LEOS9-0.0.tar.gz/LEOS9-0.0/src/libs/create_rinex.py |
#%%
import sys
import numpy as np
import numpy.matlib
import georinex as gr
import matplotlib.pyplot as plt
from datetime import datetime
from netCDF4 import Dataset
import datetime as dt
libs_dir='D:\\INCUBATE\\codes\\PNT_simulation'; # dir where the model is located
sys.path.insert(0, libs_dir); # path of local libs
import funclib as func
from scipy.interpolate import interp1d
def inteprolateNavPar(time,par):
# f=interp1d(time, par,'previous', bounds_error=False)
idNoNan = tuple([~np.isnan(par)]);
idNoNan[0][0]=np.array(True);
f=interp1d(time[idNoNan], par[idNoNan],'previous', fill_value="extrapolate");
return f
def inteprolateNavTime(time,par,parwithNan):
idNoNan = tuple([~np.isnan(parwithNan)]);
idNoNan[0][0]=np.array(True);
f=interp1d(time[idNoNan], par[idNoNan],'previous', fill_value="extrapolate");
return f
def check_t(time):
#Function accounting for beginning or end of week crossover
half_week = 302400; # seconds
corrTime = time*1;
return corrTime
def inteprolateGPS(GPSTime,M0,DeltaN,sqrtA,Eccentricity,omega,Cuc,Cus,Crc,Crs,Io,IDOT,Cic,Cis,Omega0,OmegaDot,TGD,Toe,GPSWeek,SVclockBias,SVclockDrift,SVclockDriftRate,Toc):
# Constants
GM = 3.986005e14;
cr = 6.283185307179600; # CIRCLE_RAD;
Omegae_dot = 7.2921151467e-5; # angular velocity of the Earth rotation [rad/s]
# Parameters for satellite positions
M0 = M0*1;
deltan = DeltaN*1;
roota = sqrtA*1;
ecc = Eccentricity*1;
omega = omega*1;
cuc = Cuc*1;
cus = Cus*1;
crc = Crc*1;
crs = Crs*1;
i0 = Io*1;
IDOT = IDOT*1;
cic = Cic*1;
cis = Cis*1;
Omega0 = Omega0*1;
Omega_dot = OmegaDot*1;
tgd = TGD*1;
toe = Toe*1;
time_eph = GPSWeek*7*86400 + Toe;
t = GPSTime*1;
# Parameters for clocks
af0 = SVclockBias*1;
af1 = SVclockDrift*1;
af2 = SVclockDriftRate*1;
ref_toc = Toc*1;
# Calculations
A = roota*roota; #semi-major axis
tk= check_t(t - time_eph); #time from the ephemerides reference epoch
n0 = np.sqrt(GM/(A**3)); #computed mean motion [rad/sec]
n = n0 + deltan; #corrected mean motion [rad/sec]
Mk = M0 + n*tk; #mean anomaly
Mk = np.fmod(Mk+cr,cr);
Ek = Mk*1;
max_iter = 12;
for i in range(1,max_iter+1,1):
Ek_old = Ek*1;
Ek = Mk+ecc*np.sin(Ek);
dEk = np.fmod(Ek-Ek_old,cr);
dEkNoNan=dEk[~np.isnan(dEk)];
treshold=np.where(np.abs(dEkNoNan)>1e-12); treshold=treshold[0];
if len(treshold)==0:
break
if i==12:
print('WARNING: Eccentric anomaly does not converge.')
Ek = np.fmod(Ek+cr,cr);
tk = check_t(t - time_eph); #time from the ephemeris reference epoch
fk = np.arctan2(np.sqrt(1-ecc*ecc)*np.sin(Ek), np.cos(Ek) - ecc); # true anomaly
phik = fk + omega; # argument of latitude
phik = np.fmod(phik,cr);
uk = phik + cuc*np.cos(2*phik) + cus*np.sin(2*phik); # corrected argument of latitude
rk = A*(1 - ecc*np.cos(Ek)) + crc*np.cos(2*phik) + crs*np.sin(2*phik); # corrected radial distance
ik = i0 + IDOT*tk + cic*np.cos(2*phik) + cis*np.sin(2*phik); # corrected inclination of the orbital plane
# satellite positions in the orbital plane
x1k = np.cos(uk)*rk;
y1k = np.sin(uk)*rk;
# corrected longitude of the ascending node
Omegak = Omega0 + (Omega_dot - Omegae_dot)*tk - Omegae_dot*toe;
Omegak = np.fmod(Omegak + cr, cr);
# satellite Earth-fixed coordinates (X,Y,Z)
xk = x1k*np.cos(Omegak) - y1k*np.cos(ik)*np.sin(Omegak);
yk = x1k*np.sin(Omegak) + y1k*np.cos(ik)*np.cos(Omegak);
zk = y1k*np.sin(ik);
# Interpolation of clocks for the given GPSTime
dt = check_t(GPSTime - ref_toc);
clks = (af2*dt + af1)*dt + af0;
# Relativity calculation
Relativity = -4.442807633e-10 * ecc * roota * np.sin(Ek);
# Clks correction from relativity and tgd
clks=clks+Relativity-tgd;
return xk, yk, zk, clks
#%% read rinex observation
fname='D:\\INCUBATE\\data\\COSMIC-2\\podRnx\\2021\\006\\podCrx_2021.006.006.36.02_crx.rnx'
nsatsGPS=32
nsats=nsatsGPS
sat_typesGPS=['G'+str(np.arange(1,nsatsGPS+1,1)[i]).zfill(2) for i in range(nsatsGPS)]
sat_types=np.array(sat_typesGPS)
# header
header = gr.rinexheader(fname)
# observations
obs = gr.load(fname)
obs_sats = obs.sv.values
obs_times = obs.time
obs_types = list(obs)
# coords = list(obs.coords)
ntimes=len(obs_times)
P1 = np.full((ntimes,nsats),np.nan);
P2 = np.full((ntimes,nsats),np.nan);
L1 = np.full((ntimes,nsats),np.nan);
L2 = np.full((ntimes,nsats),np.nan);
if( ('L1' in obs_types) & ('L2' in obs_types) & ('C1' in obs_types) & ('C2' in obs_types)):
case='C1C2L1L2'
if(case=='C1C2L1L2'):
for i in range(len(obs_sats)):
constelation=obs.sv.values[i][0]
prn=int(obs.sv.values[i][1:3])
if constelation=='G':
P1[:,prn-1] = obs.C1[:,i].values*1
P2[:,prn-1] = obs.C2[:,i].values*1
L1[:,prn-1] = obs.L1[:,i].values*1
L2[:,prn-1] = obs.L2[:,i].values*1
DATE = obs_times.values.astype('datetime64[us]')
GPSTime = (DATE-np.datetime64('1980-01-06T00:00:00'))/ np.timedelta64(1,'s')
GPSTime = np.matlib.repmat(GPSTime,nsats,1)
GPSTime = GPSTime.T*1
DATE=DATE.astype(datetime)
#%% read rinex navigation
nav_prev=gr.load('D:\\INCUBATE\\data\\GNSS\\BRDC\\brdc0050.21n')
nav_curr=gr.load('D:\\INCUBATE\\data\\GNSS\\BRDC\\brdc0060.21n')
nav_next=gr.load('D:\\INCUBATE\\data\\GNSS\\BRDC\\brdc0070.21n')
if(all((nav_prev.sv.values==nav_prev.sv.values) & (nav_prev.sv.values==nav_next.sv.values))==False): print('error between nav files');
nsat=nav_prev.dims['sv']
ntime=nav_prev.dims['time']+nav_curr.dims['time']+nav_next.dims['time']
SVclockBias=np.full((ntime,nsatsGPS),np.nan)
SVclockBias=np.full((ntime,nsatsGPS),np.nan)
SVclockDrift=np.full((ntime,nsatsGPS),np.nan)
SVclockDriftRate=np.full((ntime,nsatsGPS),np.nan)
Crs=np.full((ntime,nsatsGPS),np.nan)
DeltaN=np.full((ntime,nsatsGPS),np.nan)
M0=np.full((ntime,nsatsGPS),np.nan)
Cuc=np.full((ntime,nsatsGPS),np.nan)
Eccentricity=np.full((ntime,nsatsGPS),np.nan)
Cus=np.full((ntime,nsatsGPS),np.nan)
sqrtA=np.full((ntime,nsatsGPS),np.nan)
Toe=np.full((ntime,nsatsGPS),np.nan)
Cic=np.full((ntime,nsatsGPS),np.nan)
Omega0=np.full((ntime,nsatsGPS),np.nan)
Cis=np.full((ntime,nsatsGPS),np.nan)
Io=np.full((ntime,nsatsGPS),np.nan)
Crc=np.full((ntime,nsatsGPS),np.nan)
Omega=np.full((ntime,nsatsGPS),np.nan)
OmegaDot=np.full((ntime,nsatsGPS),np.nan)
IDOT=np.full((ntime,nsatsGPS),np.nan)
CodesL2=np.full((ntime,nsatsGPS),np.nan)
GPSWeek=np.full((ntime,nsatsGPS),np.nan)
SVacc=np.full((ntime,nsatsGPS),np.nan)
TGD=np.full((ntime,nsatsGPS),np.nan)
TransTime=np.full((ntime,nsatsGPS),np.nan)
NavTime=np.full((ntime,nsatsGPS),np.nan)
NavGPSTime=np.full((ntime,nsatsGPS),np.nan)
for i in range(nsat):
prn=int(nav_prev['sv'].values[i][1:])
SVclockBias[:,prn-1]=np.concatenate((nav_prev.SVclockBias[:,i].values,nav_curr.SVclockBias[:,i].values,nav_next.SVclockBias[:,i].values))
SVclockDrift[:,prn-1]=np.concatenate((nav_prev.SVclockDrift[:,i].values,nav_curr.SVclockDrift[:,i].values,nav_next.SVclockDrift[:,i].values))
SVclockDriftRate[:,prn-1]=np.concatenate((nav_prev.SVclockDriftRate[:,i].values,nav_curr.SVclockDriftRate[:,i].values,nav_next.SVclockDriftRate[:,i].values))
Crs[:,prn-1]=np.concatenate((nav_prev.Crs[:,i].values,nav_curr.Crs[:,i].values,nav_next.Crs[:,i].values))
DeltaN[:,prn-1]=np.concatenate((nav_prev.DeltaN[:,i].values,nav_curr.DeltaN[:,i].values,nav_next.DeltaN[:,i].values))
M0[:,prn-1]=np.concatenate((nav_prev.M0[:,i].values,nav_curr.M0[:,i].values,nav_next.M0[:,i].values))
Cuc[:,prn-1]=np.concatenate((nav_prev.Cuc[:,i].values,nav_curr.Cuc[:,i].values,nav_next.Cuc[:,i].values))
Eccentricity[:,prn-1]=np.concatenate((nav_prev.Eccentricity[:,i].values,nav_curr.Eccentricity[:,i].values,nav_next.Eccentricity[:,i].values))
Cus[:,prn-1]=np.concatenate((nav_prev.Cus[:,i].values,nav_curr.Cus[:,i].values,nav_next.Cus[:,i].values))
sqrtA[:,prn-1]=np.concatenate((nav_prev.sqrtA[:,i].values,nav_curr.sqrtA[:,i].values,nav_next.sqrtA[:,i].values))
Toe[:,prn-1]=np.concatenate((nav_prev.Toe[:,i].values,nav_curr.Toe[:,i].values,nav_next.Toe[:,i].values))
Cic[:,prn-1]=np.concatenate((nav_prev.Cic[:,i].values,nav_curr.Cic[:,i].values,nav_next.Cic[:,i].values))
Omega0[:,prn-1]=np.concatenate((nav_prev.Omega0[:,i].values,nav_curr.Omega0[:,i].values,nav_next.Omega0[:,i].values))
Cis[:,prn-1]=np.concatenate((nav_prev.Cis[:,i].values,nav_curr.Cis[:,i].values,nav_next.Cis[:,i].values))
Io[:,prn-1]=np.concatenate((nav_prev.Io[:,i].values,nav_curr.Io[:,i].values,nav_next.Io[:,i].values))
Crc[:,prn-1]=np.concatenate((nav_prev.Crc[:,i].values,nav_curr.Crc[:,i].values,nav_next.Crc[:,i].values))
Omega[:,prn-1]=np.concatenate((nav_prev.omega[:,i].values,nav_curr.omega[:,i].values,nav_next.omega[:,i].values))
OmegaDot[:,prn-1]=np.concatenate((nav_prev.OmegaDot[:,i].values,nav_curr.OmegaDot[:,i].values,nav_next.OmegaDot[:,i].values))
IDOT[:,prn-1]=np.concatenate((nav_prev.IDOT[:,i].values,nav_curr.IDOT[:,i].values,nav_next.IDOT[:,i].values))
CodesL2[:,prn-1]=np.concatenate((nav_prev.CodesL2[:,i].values,nav_curr.CodesL2[:,i].values,nav_next.CodesL2[:,i].values))
GPSWeek[:,prn-1]=np.concatenate((nav_prev.GPSWeek[:,i].values,nav_curr.GPSWeek[:,i].values,nav_next.GPSWeek[:,i].values))
SVacc[:,prn-1]=np.concatenate((nav_prev.SVacc[:,i].values,nav_curr.SVacc[:,i].values,nav_next.SVacc[:,i].values))
TGD[:,prn-1]=np.concatenate((nav_prev.TGD[:,i].values,nav_curr.TGD[:,i].values,nav_next.TGD[:,i].values))
TransTime[:,prn-1]=np.concatenate((nav_prev.TransTime[:,i].values,nav_curr.TransTime[:,i].values,nav_next.TransTime[:,i].values))
NavTime_prev = (nav_prev.time.values.astype('datetime64[us]')-np.datetime64('1980-01-06T00:00:00'))/ np.timedelta64(1,'s')
NavTime_curr = (nav_curr.time.values.astype('datetime64[us]')-np.datetime64('1980-01-06T00:00:00'))/ np.timedelta64(1,'s')
NavTime_next = (nav_next.time.values.astype('datetime64[us]')-np.datetime64('1980-01-06T00:00:00'))/ np.timedelta64(1,'s')
NavGPSTime[:,prn-1]=np.concatenate((NavTime_prev,NavTime_curr,NavTime_next))
Toc=SVclockBias*0
gpsweek=GPSTime*0; toc=GPSTime*0; svclockbias=GPSTime*0; svclockdrift=GPSTime*0; svclockdriftrate=GPSTime*0; crs=GPSTime*0;
deltaN=GPSTime*0; m0=GPSTime*0; cuc=GPSTime*0; eccentricity=GPSTime*0; cus=GPSTime*0; sqrta=GPSTime*0; toe=GPSTime*0
cic=GPSTime*0; omega0=GPSTime*0; cis=GPSTime*0; io=GPSTime*0; crc=GPSTime*0; omega=GPSTime*0; omegadot=GPSTime*0
idot=GPSTime*0; codesL2=GPSTime*0; svacc=GPSTime*0; tgd=GPSTime*0; transtime=GPSTime*0
for i in range(0,nsat,1):
f = inteprolateNavPar(NavGPSTime[:,i], GPSWeek[:,i]);
gpsweek[:,i]=f(GPSTime[:,i]);
f = inteprolateNavTime(NavGPSTime[:,i], NavGPSTime[:,i], SVclockBias[:,i]);
toc[:,i]=f(GPSTime[:,i]);
f = inteprolateNavPar(NavGPSTime[:,i], SVclockBias[:,i])
svclockbias[:,i]=f(GPSTime[:,i])
f = inteprolateNavPar(NavGPSTime[:,i], SVclockDrift[:,i])
svclockdrift[:,i]=f(GPSTime[:,i])
f = inteprolateNavPar(NavGPSTime[:,i], SVclockDriftRate[:,i])
svclockdriftrate[:,i]=f(GPSTime[:,i])
f = inteprolateNavPar(NavGPSTime[:,i], Crs[:,i])
crs[:,i]=f(GPSTime[:,i])
f = inteprolateNavPar(NavGPSTime[:,i], DeltaN[:,i])
deltaN[:,i]=f(GPSTime[:,i])
f = inteprolateNavPar(NavGPSTime[:,i], M0[:,i])
m0[:,i]=f(GPSTime[:,i])
f = inteprolateNavPar(NavGPSTime[:,i], Cuc[:,i])
cuc[:,i]=f(GPSTime[:,i])
f = inteprolateNavPar(NavGPSTime[:,i], Eccentricity[:,i])
eccentricity[:,i]=f(GPSTime[:,i])
f = inteprolateNavPar(NavGPSTime[:,i], Cus[:,i])
cus[:,i]=f(GPSTime[:,i])
f = inteprolateNavPar(NavGPSTime[:,i], sqrtA[:,i])
sqrta[:,i]=f(GPSTime[:,i])
f = inteprolateNavPar(NavGPSTime[:,i], Toe[:,i])
toe[:,i]=f(GPSTime[:,i])
f = inteprolateNavPar(NavGPSTime[:,i], Cic[:,i])
cic[:,i]=f(GPSTime[:,i])
f = inteprolateNavPar(NavGPSTime[:,i], Omega0[:,i])
omega0[:,i]=f(GPSTime[:,i])
f = inteprolateNavPar(NavGPSTime[:,i], Cis[:,i])
cis[:,i]=f(GPSTime[:,i])
f = inteprolateNavPar(NavGPSTime[:,i], Io[:,i])
io[:,i]=f(GPSTime[:,i])
f = inteprolateNavPar(NavGPSTime[:,i], Crc[:,i])
crc[:,i]=f(GPSTime[:,i])
f = inteprolateNavPar(NavGPSTime[:,i], Omega[:,i])
omega[:,i]=f(GPSTime[:,i])
f = inteprolateNavPar(NavGPSTime[:,i], OmegaDot[:,i])
omegadot[:,i]=f(GPSTime[:,i])
f = inteprolateNavPar(NavGPSTime[:,i], IDOT[:,i])
idot[:,i]=f(GPSTime[:,i])
f = inteprolateNavPar(NavGPSTime[:,i], CodesL2[:,i])
codesL2[:,i]=f(GPSTime[:,i])
f = inteprolateNavPar(NavGPSTime[:,i], SVacc[:,i])
svacc[:,i]=f(GPSTime[:,i])
f = inteprolateNavPar(NavGPSTime[:,i], TGD[:,i])
tgd[:,i]=f(GPSTime[:,i])
f = inteprolateNavPar(NavGPSTime[:,i], TransTime[:,i])
transtime[:,i]=f(GPSTime[:,i])
xs,ys,zs,clks = inteprolateGPS(GPSTime,m0,deltaN,sqrta,eccentricity,omega,cuc,cus,crc,crs,io,idot,cic,cis,omega0,omegadot,tgd,toe,gpsweek,svclockbias,svclockdrift,svclockdriftrate,toc)
#%% read podTEC
GPSTimeR=[]; TEC=[]; XS=[]; YS=[]; ZS=[]; XR=[]; YR=[]; ZR=[]; UT=[]; ID_mission=[]
fname='D:\\INCUBATE\\data\\COSMIC-2\\podTec\\2021\\006\\podTc2_C2E6.2021.006.22.16.0016.G08.01_0001.0001_nc'
dataset = Dataset(fname);
DATE=dt.datetime.strptime('{} {} {}'.format(dataset.day, dataset.month, dataset.year),'%d %m %Y')
GPSTIME=(np.array(DATE,dtype='datetime64')-np.datetime64('1980-01-06T00:00:00'))/ np.timedelta64(1,'s')
GPSTimeR.append(np.array(dataset.variables.get('time')[:]))
XR.append(np.array(dataset.variables.get('x_LEO')[:])*1e3)
YR.append(np.array(dataset.variables.get('y_LEO')[:])*1e3)
ZR.append(np.array(dataset.variables.get('z_LEO')[:])*1e3)
XS.append(np.array(dataset.variables.get('x_GPS')[:])*1e3)
YS.append(np.array(dataset.variables.get('y_GPS')[:])*1e3)
ZS.append(np.array(dataset.variables.get('z_GPS')[:])*1e3)
prnpod=dataset.prn_id*1
dataset.close()
gpstimepod=np.hstack(GPSTimeR)
utpod=(gpstimepod-GPSTIME)/3600
xrpod=np.hstack(XR)
yrpod=np.hstack(YR)
zrpod=np.hstack(ZR)
xspod=np.hstack(XS)
yspod=np.hstack(YS)
zspod=np.hstack(ZS)
# plt.plot(gpstimepod,xspod,'ko')
# plt.plot(GPSTime[:,prnpod-1],xs[:,prnpod-1],'r.')
#%% create rinex_obs header
rnx_version='{:9.2f}'.format(header['version'])+' '
systems=header['systems']+' (MIXED) '
line1=rnx_version+'OBSERVATION DATA '+systems+'RINEX VERSION / TYPE\n'
line2=header['PGM / RUN BY / DATE']+'PGM / RUN BY / DATE\n'
line3=header['MARKER NAME']+'MARKER NAME\n'
line4=header['MARKER TYPE']+'MARKER TYPE\n'
line5=header['OBSERVER / AGENCY']+'OBSERVER / AGENCY\n'
line6=header['REC # / TYPE / VERS']+'REC # / TYPE / VERS\n'
line7=header['ANT # / TYPE']+'ANT # / TYPE\n'
line8=header['ANTENNA: DELTA X/Y/Z']+'ANTENNA: DELTA X/Y/Z\n'
line9=header['ANTENNA: B.SIGHT XYZ']+'ANTENNA: B.SIGHT XYZ\n'
line10=header['CENTER OF MASS: XYZ']+'CENTER OF MASS: XYZ\n'
line11=header['WAVELENGTH FACT L1/2']+'WAVELENGTH FACT L1/2\n'
if(case=='C1C2L1L2'): line12=' 4'+' C1'+' C2'+' L1'+' L2'+' '+' '+' '+' '+' '+'# / TYPES OF OBSERV\n'
line13=header['TIME OF FIRST OBS']+'TIME OF FIRST OBS\n'
line14=header['TIME OF LAST OBS']+'TIME OF LAST OBS\n'
line15=' END OF HEADER\n'
header_msg=line1+line2+line3+line4+line5+line6+line7+line8+line9+line10+line11+line12+line13+line14+line15
#%% create rinex_obs observations
# https://gage.upc.edu/sites/default/files/gLAB/HTML/Observation_Rinex_v2.11.html
rinex_msg=''
fname_rinex='D:\\INCUBATE\\codes\\PNT_simulation\\rinex_LEO.txt'
for i in range(ntimes):
idx=np.argwhere(~np.isnan(P1[i,:]) & ~np.isnan(P2[i,:]) & ~np.isnan(L1[i,:]) & ~np.isnan(L2[i,:]))
idx=[int(idx[j][0]) for j in range(len(idx))]
YY=' '+'{:2d}'.format(DATE[i].year-2000)
MM=' '+'{:2d}'.format(DATE[i].month)
DD=' '+'{:2d}'.format(DATE[i].day)
hh=' '+'{:2d}'.format(DATE[i].hour)
mm=' '+'{:2d}'.format(DATE[i].minute)
ss=' '+'{:10.7f}'.format(DATE[i].second+DATE[i].microsecond/1e6)
epoch_flag=' 0'
num_sats='{:3d}'.format(len(idx))
sats=''
for j in range(len(idx)):
if(j>11):
sats=sats+'\n '
sats=sats+sat_types[idx[j]]
hdr=YY+MM+DD+hh+mm+ss+epoch_flag+num_sats+sats+'\n'
rinex_msg=rinex_msg+hdr
for j in idx:
p1='{:14.3f}'.format(P1[i,j])+' '
p2='{:14.3f}'.format(P2[i,j])+' '
l1='{:14.3f}'.format(L1[i,j])+' '
l2='{:14.3f}'.format(L2[i,j])+'\n'
rinex_msg=rinex_msg+p1+p2+l1+l2
#%% write rinex
f = open(fname_rinex, 'w')
f.write(header_msg+rinex_msg)
f.close()
#%% | PypiClean |
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Tool/MSCommon/common.py | from __future__ import print_function
__revision__ = "src/engine/SCons/Tool/MSCommon/common.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import copy
import json
import os
import re
import subprocess
import sys
import SCons.Util
# SCONS_MSCOMMON_DEBUG is internal-use so undocumented:
# set to '-' to print to console, else set to filename to log to
LOGFILE = os.environ.get('SCONS_MSCOMMON_DEBUG')
if LOGFILE == '-':
def debug(message):
print(message)
elif LOGFILE:
import logging
logging.basicConfig(
format='%(relativeCreated)05dms:pid%(process)05d:MSCommon/%(filename)s:%(message)s',
filename=LOGFILE,
level=logging.DEBUG)
debug = logging.getLogger(name=__name__).debug
else:
debug = lambda x: None
# SCONS_CACHE_MSVC_CONFIG is public, and is documented.
CONFIG_CACHE = os.environ.get('SCONS_CACHE_MSVC_CONFIG')
if CONFIG_CACHE in ('1', 'true', 'True'):
CONFIG_CACHE = os.path.join(os.path.expanduser('~'), '.scons_msvc_cache')
def read_script_env_cache():
""" fetch cached msvc env vars if requested, else return empty dict """
envcache = {}
if CONFIG_CACHE:
try:
with open(CONFIG_CACHE, 'r') as f:
envcache = json.load(f)
#TODO can use more specific FileNotFoundError when py2 dropped
except IOError:
# don't fail if no cache file, just proceed without it
pass
return envcache
def write_script_env_cache(cache):
""" write out cache of msvc env vars if requested """
if CONFIG_CACHE:
try:
with open(CONFIG_CACHE, 'w') as f:
json.dump(cache, f, indent=2)
except TypeError:
# data can't serialize to json, don't leave partial file
os.remove(CONFIG_CACHE)
except IOError:
# can't write the file, just skip
pass
_is_win64 = None
def is_win64():
"""Return true if running on windows 64 bits.
Works whether python itself runs in 64 bits or 32 bits."""
# Unfortunately, python does not provide a useful way to determine
# if the underlying Windows OS is 32-bit or 64-bit. Worse, whether
# the Python itself is 32-bit or 64-bit affects what it returns,
# so nothing in sys.* or os.* help.
# Apparently the best solution is to use env vars that Windows
# sets. If PROCESSOR_ARCHITECTURE is not x86, then the python
# process is running in 64 bit mode (on a 64-bit OS, 64-bit
# hardware, obviously).
# If this python is 32-bit but the OS is 64, Windows will set
# ProgramW6432 and PROCESSOR_ARCHITEW6432 to non-null.
# (Checking for HKLM\Software\Wow6432Node in the registry doesn't
# work, because some 32-bit installers create it.)
global _is_win64
if _is_win64 is None:
# I structured these tests to make it easy to add new ones or
# add exceptions in the future, because this is a bit fragile.
_is_win64 = False
if os.environ.get('PROCESSOR_ARCHITECTURE', 'x86') != 'x86':
_is_win64 = True
if os.environ.get('PROCESSOR_ARCHITEW6432'):
_is_win64 = True
if os.environ.get('ProgramW6432'):
_is_win64 = True
return _is_win64
def read_reg(value, hkroot=SCons.Util.HKEY_LOCAL_MACHINE):
return SCons.Util.RegGetValue(hkroot, value)[0]
def has_reg(value):
"""Return True if the given key exists in HKEY_LOCAL_MACHINE, False
otherwise."""
try:
SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, value)
ret = True
except SCons.Util.WinError:
ret = False
return ret
# Functions for fetching environment variable settings from batch files.
def normalize_env(env, keys, force=False):
"""Given a dictionary representing a shell environment, add the variables
from os.environ needed for the processing of .bat files; the keys are
controlled by the keys argument.
It also makes sure the environment values are correctly encoded.
If force=True, then all of the key values that exist are copied
into the returned dictionary. If force=false, values are only
copied if the key does not already exist in the copied dictionary.
Note: the environment is copied."""
normenv = {}
if env:
for k in list(env.keys()):
normenv[k] = copy.deepcopy(env[k])
for k in keys:
if k in os.environ and (force or k not in normenv):
normenv[k] = os.environ[k]
# This shouldn't be necessary, since the default environment should include system32,
# but keep this here to be safe, since it's needed to find reg.exe which the MSVC
# bat scripts use.
sys32_dir = os.path.join(os.environ.get("SystemRoot",
os.environ.get("windir", r"C:\Windows\system32")),
"System32")
if sys32_dir not in normenv['PATH']:
normenv['PATH'] = normenv['PATH'] + os.pathsep + sys32_dir
# Without Wbem in PATH, vcvarsall.bat has a "'wmic' is not recognized"
# error starting with Visual Studio 2017, although the script still
# seems to work anyway.
sys32_wbem_dir = os.path.join(sys32_dir, 'Wbem')
if sys32_wbem_dir not in normenv['PATH']:
normenv['PATH'] = normenv['PATH'] + os.pathsep + sys32_wbem_dir
debug("PATH: %s"%normenv['PATH'])
return normenv
def get_output(vcbat, args = None, env = None):
"""Parse the output of given bat file, with given args."""
if env is None:
# Create a blank environment, for use in launching the tools
env = SCons.Environment.Environment(tools=[])
# TODO: This is a hard-coded list of the variables that (may) need
# to be imported from os.environ[] for v[sc]*vars*.bat file
# execution to work. This list should really be either directly
# controlled by vc.py, or else derived from the common_tools_var
# settings in vs.py.
vs_vc_vars = [
'COMSPEC',
# VS100 and VS110: Still set, but modern MSVC setup scripts will
# discard these if registry has values. However Intel compiler setup
# script still requires these as of 2013/2014.
'VS140COMNTOOLS',
'VS120COMNTOOLS',
'VS110COMNTOOLS',
'VS100COMNTOOLS',
'VS90COMNTOOLS',
'VS80COMNTOOLS',
'VS71COMNTOOLS',
'VS70COMNTOOLS',
'VS60COMNTOOLS',
]
env['ENV'] = normalize_env(env['ENV'], vs_vc_vars, force=False)
if args:
debug("Calling '%s %s'" % (vcbat, args))
popen = SCons.Action._subproc(env,
'"%s" %s & set' % (vcbat, args),
stdin='devnull',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
debug("Calling '%s'" % vcbat)
popen = SCons.Action._subproc(env,
'"%s" & set' % vcbat,
stdin='devnull',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Use the .stdout and .stderr attributes directly because the
# .communicate() method uses the threading module on Windows
# and won't work under Pythons not built with threading.
with popen.stdout:
stdout = popen.stdout.read()
with popen.stderr:
stderr = popen.stderr.read()
# Extra debug logic, uncomment if necessary
# debug('get_output():stdout:%s'%stdout)
# debug('get_output():stderr:%s'%stderr)
if stderr:
# TODO: find something better to do with stderr;
# this at least prevents errors from getting swallowed.
# Nuitka: this is writing bytes to stderr which wants unicode
sys.stderr.write(stderr.decode("mbcs"))
if popen.wait() != 0:
raise IOError(stderr.decode("mbcs"))
output = stdout.decode("mbcs")
return output
KEEPLIST = ("INCLUDE", "LIB", "LIBPATH", "PATH", 'VSCMD_ARG_app_plat')
# Nuitka: Keep the Windows SDK version too
KEEPLIST += ("WindowsSDKVersion",)
def parse_output(output, keep=KEEPLIST):
"""
Parse output from running visual c++/studios vcvarsall.bat and running set
To capture the values listed in keep
"""
# dkeep is a dict associating key: path_list, where key is one item from
# keep, and path_list the associated list of paths
dkeep = dict([(i, []) for i in keep])
# rdk will keep the regex to match the .bat file output line starts
rdk = {}
for i in keep:
rdk[i] = re.compile('%s=(.*)' % i, re.I)
def add_env(rmatch, key, dkeep=dkeep):
path_list = rmatch.group(1).split(os.pathsep)
for path in path_list:
# Do not add empty paths (when a var ends with ;)
if path:
# XXX: For some reason, VC98 .bat file adds "" around the PATH
# values, and it screws up the environment later, so we strip
# it.
path = path.strip('"')
dkeep[key].append(str(path))
for line in output.splitlines():
for k, value in rdk.items():
match = value.match(line)
if match:
add_env(match, k)
return dkeep
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/Diofant-0.14.0a2.tar.gz/Diofant-0.14.0a2/docs/modules/vector/api/orienterclasses.rst | =============================
Orienter classes (docstrings)
=============================
.. module:: diofant.vector.orienters
Orienter
========
.. autoclass:: Orienter
:members:
AxisOrienter
============
.. autoclass:: AxisOrienter
:members:
:special-members:
BodyOrienter
============
.. autoclass:: BodyOrienter
:members:
:special-members:
SpaceOrienter
=============
.. autoclass:: SpaceOrienter
:members:
:special-members:
QuaternionOrienter
==================
.. autoclass:: QuaternionOrienter
:members:
:special-members:
| PypiClean |
/KAVICA-1.3.4.tar.gz/KAVICA-1.3.4/README.md | <div align="center">
<img src="https://github.com/kavehmahdavi/kavica/raw/main/doc/web/icon.png"><br>
</div>
-----------------
# KAVICA: Powerful Python Cluster Analysis and Inference Toolkit
[](https://pypi.org/project/kavica/)
[](https://anaconda.org/anaconda/pandas/)
[](https://pypi.org/project/kavica/)
[](https://github.com/kavehmahdavi/kavica_container/blob/main/LICENSE)
[](https://pepy.tech/project/kavica)
[](https://pepy.tech/project/kavica)
[](https://stackoverflow.com/questions/tagged/kavica)
## What is it?
**kavica** is a Python package that provides semi-automated, flexible, and expressive clustering
analysis designed to make working with "unlabeled" data easy and intuitive.
It aims to be the fundamental high-level building block for doing practical, **real world** cluster analysis in Python.
Additionally, it has the broader goal of becoming **A powerful and flexible open source AutoML unsupervised / clustering
analysis tool and pipeline**. It is already well on its way towards this goal.
## Main Features
Here are just a few of the things that kavica does well:
- Intelligent [**Density
Maping**](https://github.com/kavehmahdavi/kavica/tree/main/kavica/cluster_inference_system/space_curvature_map.py)
to model the density structuer of the data in analogy to
[Einstein's theory of relativity](https://www.space.com/17661-theory-general-relativity.html),
and automated [**Density
Homogenizing**](https://github.com/kavehmahdavi/kavica/tree/main/kavica/cluster_inference_system/bilinear_transformation.py)
to prepare the
data for the density-based clustering (e.g DBSCAN)
- Automatic, and powerful [**Organization Component
Analysis**](https://github.com/kavehmahdavi/kavica/tree/main/kavica/cluster_inference_system/OCA.py) to interpret
the clustering result by understanding the topological structuer of each cluster
- Topological and powerful [**Self-Organizing Maps Inference
System**](https://github.com/kavehmahdavi/kavica/tree/main/kavica/cluster_inference_system/somis.py) to
use the self-learning ability of the SOM to understand the topological structuer of the data
- Automated and Bayesian-based [**DBSCAN Hyper-parameter
Tuner**](https://github.com/kavehmahdavi/kavica/tree/main/kavica/tuner) to select the optimal
hyper-parameters configuration of the DBSCAN clustering algorithm
- Efficient handling of [**feature
selection**](https://github.com/kavehmahdavi/kavica/tree/main/kavica/feature_selection/) in a potentially
high-dimensional and
massive datasets
- Gravitational implementation of Kohonen [**Generational Self-Organizing Maps (
GSOM)**](https://github.com/kavehmahdavi/kavica/tree/main/kavica/cluster_inference_system/som.py) useful
for unsupervised learning and supper-clustering by providing an enriched graphics, plots and animations features.
- Computational geometrical model [**Polygonal
Cage**](https://github.com/kavehmahdavi/kavica/tree/main/kavica/cluster_inference_system/polygon_cage.py) to transfer
feature vectors from a curved non-euclidean feature space to a new euclidean one.
- Robust [**factor analysis**](https://github.com/kavehmahdavi/kavica/tree/main/kavica/factor_analysis;) to reduce a
large number of variables into fewer numbers
- Easy handling of [**missing data**](https://github.com/kavehmahdavi/kavica/tree/main/kavica/imputation/) (represented
as `NaN`, `NA`, or `NaT`) in floating point
as well as non-floating point data
- Flexible implementation of directed and undirected [**graph data
structuer**](https://github.com/kavehmahdavi/kavica/tree/main/kavica/graph_data_structur.py) and
algorithms.
- Intuitive [**resampling**](https://github.com/kavehmahdavi/kavica/tree/main/kavica/resampling/) data sets
- Powerful, flexible [**parser**](https://github.com/kavehmahdavi/kavica/tree/main/kavica/parser) functionality to
perform parsing, manipulating, and generating
operations on flat, massive and unstructured [Traces](https://tools.bsc.es/paraver/trace_generation) datasets
which are generated by [MareNostrum](https://www.bsc.es/marenostrum/marenostrum)
- [**Utilities**](https://github.com/kavehmahdavi/kavica/tree/main/kavica/utils) functionality: intuitive explanatory
data analysis, plotting, load and generate
data, and etc...
## Examples:
- [**Feature Space Curvature Map**](https://github.com/kavehmahdavi/kavica/tree/main/kavica/cluster_inference_system/space_curvature_map.py)
<div align="center">
<img src="https://github.com/kavehmahdavi/kavica/raw/main/doc/web/circel.gif" width="800"><br>
</div>
- [**Density
Homogenizing**](https://github.com/kavehmahdavi/kavica/tree/main/kavica/cluster_inference_system/bilinear_transformation.py)

Application of Feature Space Curvature Map on a multi-density 2D dataset Synt10 containing ten clusters. (a) A scatter
plot of clusters with varied densities. The legend shows the size/N(μ,σ2) per cluster, the colors represent the data
original labeling and the
red lines draw the initial FSF. (b) shows the FSC model that is computed with our FSCM method. Note that the red lines
show the deformation of the FSF. (c) scatter plots the data (a) projected by applying our transformation through
model (b).
As a result, the diversity of the clusters’ density scaled appropriately to achieve a better density-based clustering
performance.

- [**Polygonal Cage**](https://github.com/kavehmahdavi/kavica/tree/main/kavica/cluster_inference_system/polygon_cage.py)
Multilinear transformation
Feature Space Curvetuer | Feature Space Fabric
--- |---
 | 
Data point transformation between a bent FSC (a) and regular FSF (b) based on the Multi-linear transformation in
R<sup>2</sup>.
- [**Organization Component Analysis**](https://github.com/kavehmahdavi/kavica/tree/main/kavica/cluster_inference_system/OCA.py)
<div align="center">
<img src="https://github.com/kavehmahdavi/kavica/raw/main/doc/web/oca.png" width="800"><br>
</div>
Application of the OCA on the Iris dataset
## Video
<div align="center">
<a href="https://www.youtube.com/watch?v=lxL3niQmBcU&t=27s">
<img src="https://github.com/kavehmahdavi/kavica/raw/main/doc/web/OCA_presentation.png" width="600">
</a>
</div>
## Where to get it
The source code is currently hosted on GitHub at: [kavica](https://github.com/kavehmahdavi/kavica)
Binary installers for the latest released version are available at the
[Python Package Index (PyPI)](https://pypi.org/project/KAVICA/) and on [Conda](https://docs.conda.io/en/latest/).
The recommended way to install kavica is to use:
```sh
# PyPI
pip install kavica
```
But it can also be installed using:
```sh
# or conda
conda config --add channels conda-forge
conda install kavica
```
To verify your setup, start Python from the command line and run the following:
```sh
import kavica
```
## Dependencies
See the [requirement.txt](/requirements.txt) for installing the required packages:
```sh
pip install -r requirements.txt
```
## Publications
[Unsupervised Feature Selection for Noisy Data](https://doi.org/10.1007/978-3-030-35231-8_6)
[Organization Component Analysis: The method for extracting insights from the shape of cluster](https://doi.org/10.1109/IJCNN52387.2021.9533650)
[Feature Space Curvature Map: A Method To Homogenize Cluster Densities](https://doi.org/10.1109/IJCNN55064.2022.9892921)
## Issue tracker
If you find a bug, please help us solve it by [filing a report](https://github.com/kavehmahdavi/kavica/issues).
## Contributing
If you want to contribute, check out the
[contribution guidelines](https://kavehmahdavi.github.io/kavica/main/contributions.html).
## License
The main library of **kavica** is
[released under the BSD 3 clause license](https://kavehmahdavi.github.io/kavica/main/license.html).
| PypiClean |
/KDVS-2.0.0.tar.gz/KDVS-2.0.0/kdvs/fw/Annotation.py |
r"""
Provides abstract functionality for handling annotations.
"""
from kdvs.core.error import Warn
from kdvs.fw.DBTable import DBTemplate
import collections
# NOTE: please do not modify this table!
# NOTE: this general table utilize multifield 'pkc_data' of the following format:
# 'feature1=value1,...,featureN=valueN', where 'feature' is the property specific
# for PK source, and 'value' is the value of this property for specified PKC.
# Typical features of PK may be: PKC name, PKC description, higher level grouping
# of PKCs, e.g. into domains, etc.
# NOTE: it is advisable to create and use more specific tables tailored for
# specificity of individual PK sources, due to limited querying power of this
# table and potentially high cost of parsing the multifield
PKC2EM_TMPL = DBTemplate({
'name' : 'pkc2em',
'columns' : ('pkc_id', 'em_id', 'pkc_data'),
'id_column' : 'pkc_id',
'indexes' : ('pkc_id',),
})
r"""
Database table template for storing mapping between prior knowledge concepts and
measurements (PKC->M). It defines the name 'pkc2em' and columns 'pkc_id', 'em_id',
'pkc_data'. The ID column 'pkc_id' is also indexed. This general table utilizes
multifield 'pkc_data' of the following format:
* 'feature1=value1,...,featureN=valueN',
where 'feature' is the property specific for PK source, and 'value' is the value
of this property for specified PKC. Typical features of PK may be: PKC name,
PKC description, higher level grouping of PKCs, e.g. into domains, etc.
NOTE: it is advisable to create and use more specific tables tailored for
specificity of individual PK sources, due to limited querying power of this
table and potentially high computational cost of parsing the multifield.
"""
# NOTE: please do not modify this table!
# NOTE: this general table contains selected set of annotations for given EM;
# this selection is arbitrary and may not reflect all needs of the user; in that
# case it is advised to use different, more specific table
EM2ANNOTATION_TMPL = DBTemplate({
'name' : 'em2annotation',
'columns' : ('em_id', 'gene_symbol', 'repr_id', 'gb_acc', 'entrez_gene_id', 'ensembl_id', 'refseq_id'),
'id_column' : 'em_id',
'indexes' : ('em_id',),
})
r"""
Database table template that provides annotations for measurements. This template
is tailored specifically for Gene Ontology. It defines the name 'em2annotation' and
columns 'em_id', 'gene_symbol', 'repr_id', 'gb_acc', 'entrez_gene_id', 'ensembl_id', 'refseq_id'.
The ID column 'em_id' is also indexed. This table contains selected set of
annotations for given measurement (i.e. probeset for microarray, gene for RNASeq etc).
The selection is arbitrary and may not reflect all needs of the user; in that case
it is advisable to use different, more specific table.
"""
MULTIFIELD_SEP = ';'
r"""
Default separator for the multifield 'pkc_data' used in generic annotation
database template.
"""
def get_em2annotation(em2annotation_dt):
r"""
Obtain the dictionary with mapping between measurements and annotations,
stored in specified DBTable instance.
Parameters
----------
em2annotation_dt : :class:`~kdvs.fw.DBTable.DBTable`
wrapped content of 'em2annotation' database table
Returns
-------
em2a : collections.defaultdict
mapping between measurements and annotations
"""
em2a_s = collections.defaultdict(list)
query_em2a_columns = '*'
em2a_cs = em2annotation_dt.get(columns=query_em2a_columns)
for em2a_row in em2a_cs:
em2a_strs = [str(r) for r in em2a_row]
em_id = em2a_strs[0]
anno_data = em2a_strs[1:]
em2a_s[em_id].append(anno_data)
em2a_cs.close()
em2a = dict()
for emid, d in em2a_s.iteritems():
# data from table shall be unique across emids, so we take first record
em2a[emid] = d[0]
# should not happen but to be safe
if len(d) != 1:
raise Warn('%s annotated with more than 1 record!' % emid)
return em2a | PypiClean |
/MiTepid-0.0.5-py3-none-any.whl/mitepid/models.py | def SEIR(states, t, B, Gamma, Mu, Sigma):
"""
Simulate SEIR compartmental Model.
Parameters
----------
states : numpy array
array of size 3*Ng for Infective (I) and Recovered (R) trajectories.
t : numpy array
time array of interest.
B : numpy 2D array
matrix of contact rates. b_{ij} describes the influence of group j on group i.
Gamma : numpy array
a diagonal matrix of transmission rates.
Mu: numpy array
a diagonal matrix of birth/death rates (it is assumed birth and death rates are equal)
Sigma: numpy array
a diagonal matrix of inhibitions rate (related to how long is latent period)
Returns
-------
dsdt : numpy array
solution of the model.
"""
import numpy as np
Ng = B.shape[0];
I = states[:Ng] # I
R = states[Ng:2*Ng] # R
E = states[2*Ng:3*Ng] # E
dIdt = np.zeros(Ng)
dRdt = np.zeros(Ng)
dEdt = np.zeros(Ng)
for i in np.arange(Ng):
Sum_j_x = 0
for j in np.arange(Ng):
Sum_j_x = Sum_j_x + B[i, j]* I[j]
# E
dEdt[i] = (1 - I[i] - R[i] - E[i]) * Sum_j_x - (Mu[i, i] + Sigma[i, i]) * E[i]
# I
dIdt[i] = Sigma[i, i] * E[i] - (Mu[i, i] + Gamma[i, i]) * I[i]
# R
dRdt[i] = Gamma[i, i] * I[i] - Mu[i,i] * R[i]
dsdt = np.concatenate((dIdt, dRdt, dEdt))
return dsdt
def SIR(states, t, B, Gamma, Mu):
"""
Simulate SIR Model.
Parameters
----------
states : numpy array
array of size 2*Ng for Infective (I) and Recovered (R) trajectories.
t : numpy array
time array of interest.
B : numpy 2D array
matrix of contact rates. b_{ij} describes the influence of group j on group i.
Gamma : numpy array
a diagonal matrix of transmission rates.
Mu: numpy array
a diagonal matrix of birth/death rates (it is assumed birth and death rates are equal)
Returns
-------
dsdt : numpy array
solution to the model.
"""
import numpy as np
Ng = B.shape[0];
I = states[:Ng]
R = states[Ng:]
dIdt = np.zeros(Ng);
dRdt = np.zeros(Ng);
for i in np.arange(Ng):
#
Sum_j_x = 0
for j in np.arange(Ng):
Sum_j_x = Sum_j_x + B[i, j]* I[j]
# I
dIdt[i] = (1-I[i]) * Sum_j_x - R[i] * Sum_j_x - (Mu[i, i] + Gamma[i, i]) * I[i]
# R
dRdt[i] = Gamma[i, i] * I[i] - Mu[i, i] * R[i]
dsdt = np.concatenate((dIdt, dRdt))
return dsdt
def SIS(I, t, B, Gamma, Mu):
"""
Simulate SIS model.
Parameters
----------
I : numpy array
array of size Ng for Infective (I) trajectories.
t : numpy array
time array of interest.
B : numpy 2D array
matrix of contact rates. b_{ij} describes the influence of group j on group i.
Gamma : numpy array
a diagonal matrix of transmission rates.
Mu: numpy array
a diagonal matrix of birth/death rates (it is assumed birth and death rates are equal)
Returns
-------
dIdt : numpy array
solution to the model.
"""
import numpy as np
Ng = I.size;
dIdt = np.zeros(Ng)
for i in np.arange(Ng):
Sum_j = 0
for j in np.arange(Ng):
Sum_j = Sum_j + B[i, j]* I[j]
dIdt[i] = (1-I[i]) * Sum_j - (Mu[i, i] + Gamma[i, i]) * I[i]
return dIdt | PypiClean |
/CheckMyTex-0.10.5.tar.gz/CheckMyTex-0.10.5/checkmytex/finding/languagetool.py | import json
import shutil
import typing
from checkmytex.latex_document import LatexDocument, Origin
from .abstract_checker import Checker
from .problem import Problem
class Languagetool(Checker):
def __init__(self, lang="en-US"):
"""
If you modify the language, you may also have to modify the
`disable_rules`. These can be modified any time before running `check`.
:param lang: Language option. E.g. "de-DE" for german. The original
language tool code used with `-l` in the CLI.
"""
super().__init__()
self._lang = lang
self.disable_rules = [
f"MORFOLOGIK_RULE_{lang.upper().replace('-', '_').strip()}",
# disable spell checking because it is very slow.
"WHITESPACE_RULE",
# The whitespaces will be off due to detexing.
"COMMA_PARENTHESIS_WHITESPACE",
# Also not reliable in detexed text.
"THE_SUPERLATIVE", # not true in computer science and math where we can have, e.g., multiple equivalent extremal solutions.
]
def _get_languagetool_json(self, document: LatexDocument) -> dict:
result, err, ex = self._run(
f"{shutil.which('languagetool')} --json -l {self._lang} "
f"--disable {','.join(self.disable_rules)}",
input=document.get_text(),
)
if err:
self.log(err)
lines = result.split("\n")
lines.sort(key=len, reverse=True)
for line in lines:
try:
return json.loads(line)
except json.JSONDecodeError:
continue
self.log("ERROR: Could not read output of languagetool!")
self.log(result)
return {}
def check(self, document: LatexDocument) -> typing.Iterable[Problem]:
self.log("Running Langugagetool...")
data = self._get_languagetool_json(document=document)
for problem in data["matches"]:
try:
look_up_url = problem["rule"]["urls"][0]["value"]
except KeyError:
look_up_url = None
origin = document.get_simplified_origin_of_text(
problem["offset"], problem["offset"] + problem["length"]
)
# using source context because the detexed context is too unstable with math
context = document.get_source_context(origin)
# the rule 'a/an' results in many false positives before math or commands.
if self._is_an_before_math(problem, origin, document):
continue
# the upper case sentence start rule has an issue with "ABR.\ bla"
if self._is_uppercase_letter_after_backslash(problem, origin, document):
continue
yield Problem(
origin=origin,
context=problem["context"]["text"],
message=problem["message"],
long_id=problem["message"] + context,
rule=problem["rule"]["id"],
tool="languagetool",
look_up_url=look_up_url,
)
def _is_an_before_math(
self, problem, origin: Origin, document: LatexDocument
) -> bool:
"""
'A' before math or a command will frequently lead to false positives.
"""
if problem["rule"]["id"] != "EN_A_VS_AN":
return False
source = document.get_source()
context = source[
origin.end.source.index : max(len(source), origin.end.source.index + 10)
].strip()
if context and context[0] in ("\\", "$"):
return True
return False
def _is_uppercase_letter_after_backslash(
self, problem, origin: Origin, document: LatexDocument
) -> bool:
if problem["rule"]["id"] != "UPPERCASE_SENTENCE_START":
return False
source = document.get_source()
context = source[
max(0, origin.begin.source.index - 10) : origin.begin.source.index
].strip()
if context and context[-1] == "\\":
return True
return False
def is_available(self) -> bool:
return bool(shutil.which("languagetool"))
def needs_detex(self):
return True
def installation_guide(self) -> str:
return (
"You can probably install install languagetool directly"
" with your package manager.\n"
" e.g. brew install languagetool\n"
" apt-get install languagetool\n"
" pacman -S languagetool\n"
"...\n"
"Otherwise, you can install it by hand:"
" https://github.com/languagetool-org/languagetool"
) | PypiClean |
/Djaloha-0.4.2.tar.gz/Djaloha-0.4.2/djaloha/static/aloha.0.20/plugins/extra/hints/dep/jquery.poshytip.js | (function($) {
var tips = [],
reBgImage = /^url\(["']?([^"'\)]*)["']?\);?$/i,
rePNG = /\.png$/i,
ie6 = $.browser.msie && $.browser.version == 6;
// make sure the tips' position is updated on resize
function handleWindowResize() {
$.each(tips, function() {
this.refresh(true);
});
}
$(window).resize(handleWindowResize);
$.Poshytip = function(elm, options) {
this.$elm = $(elm);
this.opts = $.extend({}, $.fn.poshytip.defaults, options);
this.$tip = $(['<div class="',this.opts.className,'">',
'<div class="tip-inner tip-bg-image"></div>',
'<div class="tip-arrow tip-arrow-top tip-arrow-right tip-arrow-bottom tip-arrow-left"></div>',
'</div>'].join('')).appendTo(document.body);
this.$arrow = this.$tip.find('div.tip-arrow');
this.$inner = this.$tip.find('div.tip-inner');
this.disabled = false;
this.content = null;
this.init();
};
$.Poshytip.prototype = {
init: function() {
tips.push(this);
// save the original title and a reference to the Poshytip object
var title = this.$elm.attr('title');
this.$elm.data('title.poshytip', title !== undefined ? title : null)
.data('poshytip', this);
// hook element events
if (this.opts.showOn != 'none') {
this.$elm.bind({
'mouseenter.poshytip': $.proxy(this.mouseenter, this),
'mouseleave.poshytip': $.proxy(this.mouseleave, this)
});
switch (this.opts.showOn) {
case 'hover':
if (this.opts.alignTo == 'cursor')
this.$elm.bind('mousemove.poshytip', $.proxy(this.mousemove, this));
if (this.opts.allowTipHover)
this.$tip.hover($.proxy(this.clearTimeouts, this), $.proxy(this.mouseleave, this));
break;
case 'focus':
this.$elm.bind({
'focus.poshytip': $.proxy(this.show, this),
'blur.poshytip': $.proxy(this.hide, this)
});
break;
}
}
},
mouseenter: function(e) {
if (this.disabled)
return true;
this.$elm.attr('title', '');
if (this.opts.showOn == 'focus')
return true;
this.clearTimeouts();
this.showTimeout = setTimeout($.proxy(this.show, this), this.opts.showTimeout);
},
mouseleave: function(e) {
if (this.disabled || this.asyncAnimating && (this.$tip[0] === e.relatedTarget || jQuery.contains(this.$tip[0], e.relatedTarget)))
return true;
var title = this.$elm.data('title.poshytip');
if (title !== null)
this.$elm.attr('title', title);
if (this.opts.showOn == 'focus')
return true;
this.clearTimeouts();
this.hideTimeout = setTimeout($.proxy(this.hide, this), this.opts.hideTimeout);
},
mousemove: function(e) {
if (this.disabled)
return true;
this.eventX = e.pageX;
this.eventY = e.pageY;
if (this.opts.followCursor && this.$tip.data('active')) {
this.calcPos();
this.$tip.css({left: this.pos.l, top: this.pos.t});
if (this.pos.arrow)
this.$arrow[0].className = 'tip-arrow tip-arrow-' + this.pos.arrow;
}
},
show: function() {
if (this.disabled || this.$tip.data('active'))
return;
this.reset();
this.update();
this.display();
if (this.opts.timeOnScreen)
setTimeout($.proxy(this.hide, this), this.opts.timeOnScreen);
},
hide: function() {
if (this.disabled || !this.$tip.data('active'))
return;
this.display(true);
},
reset: function() {
this.$tip.queue([]).detach().css('visibility', 'hidden').data('active', false);
this.$inner.find('*').poshytip('hide');
if (this.opts.fade)
this.$tip.css('opacity', this.opacity);
this.$arrow[0].className = 'tip-arrow tip-arrow-top tip-arrow-right tip-arrow-bottom tip-arrow-left';
this.asyncAnimating = false;
},
update: function(content, dontOverwriteOption) {
if (this.disabled)
return;
var async = content !== undefined;
if (async) {
if (!dontOverwriteOption)
this.opts.content = content;
if (!this.$tip.data('active'))
return;
} else {
content = this.opts.content;
}
// update content only if it has been changed since last time
var self = this,
newContent = typeof content == 'function' ?
content.call(this.$elm[0], function(newContent) {
self.update(newContent);
}) :
content == '[title]' ? this.$elm.data('title.poshytip') : content;
if (this.content !== newContent) {
this.$inner.empty().append(newContent);
this.content = newContent;
}
this.refresh(async);
},
refresh: function(async) {
if (this.disabled)
return;
if (async) {
if (!this.$tip.data('active'))
return;
// save current position as we will need to animate
var currPos = {left: this.$tip.css('left'), top: this.$tip.css('top')};
}
// reset position to avoid text wrapping, etc.
this.$tip.css({left: 0, top: 0}).appendTo(document.body);
// save default opacity
if (this.opacity === undefined)
this.opacity = this.$tip.css('opacity');
// check for images - this code is here (i.e. executed each time we show the tip and not on init) due to some browser inconsistencies
var bgImage = this.$tip.css('background-image').match(reBgImage),
arrow = this.$arrow.css('background-image').match(reBgImage);
if (bgImage) {
var bgImagePNG = rePNG.test(bgImage[1]);
// fallback to background-color/padding/border in IE6 if a PNG is used
if (ie6 && bgImagePNG) {
this.$tip.css('background-image', 'none');
this.$inner.css({margin: 0, border: 0, padding: 0});
bgImage = bgImagePNG = false;
} else {
this.$tip.prepend('<table border="0" cellpadding="0" cellspacing="0"><tr><td class="tip-top tip-bg-image" colspan="2"><span></span></td><td class="tip-right tip-bg-image" rowspan="2"><span></span></td></tr><tr><td class="tip-left tip-bg-image" rowspan="2"><span></span></td><td></td></tr><tr><td class="tip-bottom tip-bg-image" colspan="2"><span></span></td></tr></table>')
.css({border: 0, padding: 0, 'background-image': 'none', 'background-color': 'transparent'})
.find('.tip-bg-image').css('background-image', 'url("' + bgImage[1] +'")').end()
.find('td').eq(3).append(this.$inner);
}
// disable fade effect in IE due to Alpha filter + translucent PNG issue
if (bgImagePNG && !$.support.opacity)
this.opts.fade = false;
}
// IE arrow fixes
if (arrow && !$.support.opacity) {
// disable arrow in IE6 if using a PNG
if (ie6 && rePNG.test(arrow[1])) {
arrow = false;
this.$arrow.css('background-image', 'none');
}
// disable fade effect in IE due to Alpha filter + translucent PNG issue
this.opts.fade = false;
}
var $table = this.$tip.find('table');
if (ie6) {
// fix min/max-width in IE6
this.$tip[0].style.width = '';
$table.width('auto').find('td').eq(3).width('auto');
var tipW = this.$tip.width(),
minW = parseInt(this.$tip.css('min-width')),
maxW = parseInt(this.$tip.css('max-width'));
if (!isNaN(minW) && tipW < minW)
tipW = minW;
else if (!isNaN(maxW) && tipW > maxW)
tipW = maxW;
this.$tip.add($table).width(tipW).eq(0).find('td').eq(3).width('100%');
} else if ($table[0]) {
// fix the table width if we are using a background image
// IE9, FF4 use float numbers for width/height so use getComputedStyle for them to avoid text wrapping
// for details look at: http://vadikom.com/dailies/offsetwidth-offsetheight-useless-in-ie9-firefox4/
$table.width('auto').find('td').eq(3).width('auto').end().end().width(document.defaultView && document.defaultView.getComputedStyle && parseFloat(document.defaultView.getComputedStyle(this.$tip[0], null).width) || this.$tip.width()).find('td').eq(3).width('100%');
}
this.tipOuterW = this.$tip.outerWidth();
this.tipOuterH = this.$tip.outerHeight();
this.calcPos();
// position and show the arrow image
if (arrow && this.pos.arrow) {
this.$arrow[0].className = 'tip-arrow tip-arrow-' + this.pos.arrow;
this.$arrow.css('visibility', 'inherit');
}
if (async) {
this.asyncAnimating = true;
var self = this;
this.$tip.css(currPos).animate({left: this.pos.l, top: this.pos.t}, 200, function() { self.asyncAnimating = false; });
} else {
this.$tip.css({left: this.pos.l, top: this.pos.t});
}
},
display: function(hide) {
var active = this.$tip.data('active');
if (active && !hide || !active && hide)
return;
this.$tip.stop();
if ((this.opts.slide && this.pos.arrow || this.opts.fade) && (hide && this.opts.hideAniDuration || !hide && this.opts.showAniDuration)) {
var from = {}, to = {};
// this.pos.arrow is only undefined when alignX == alignY == 'center' and we don't need to slide in that rare case
if (this.opts.slide && this.pos.arrow) {
var prop, arr;
if (this.pos.arrow == 'bottom' || this.pos.arrow == 'top') {
prop = 'top';
arr = 'bottom';
} else {
prop = 'left';
arr = 'right';
}
var val = parseInt(this.$tip.css(prop));
from[prop] = val + (hide ? 0 : (this.pos.arrow == arr ? -this.opts.slideOffset : this.opts.slideOffset));
to[prop] = val + (hide ? (this.pos.arrow == arr ? this.opts.slideOffset : -this.opts.slideOffset) : 0) + 'px';
}
if (this.opts.fade) {
from.opacity = hide ? this.$tip.css('opacity') : 0;
to.opacity = hide ? 0 : this.opacity;
}
this.$tip.css(from).animate(to, this.opts[hide ? 'hideAniDuration' : 'showAniDuration']);
}
hide ? this.$tip.queue($.proxy(this.reset, this)) : this.$tip.css('visibility', 'inherit');
this.$tip.data('active', !active);
},
disable: function() {
this.reset();
this.disabled = true;
},
enable: function() {
this.disabled = false;
},
destroy: function() {
this.reset();
this.$tip.remove();
delete this.$tip;
this.content = null;
this.$elm.unbind('.poshytip').removeData('title.poshytip').removeData('poshytip');
tips.splice($.inArray(this, tips), 1);
},
clearTimeouts: function() {
if (this.showTimeout) {
clearTimeout(this.showTimeout);
this.showTimeout = 0;
}
if (this.hideTimeout) {
clearTimeout(this.hideTimeout);
this.hideTimeout = 0;
}
},
calcPos: function() {
var pos = {l: 0, t: 0, arrow: ''},
$win = $(window),
win = {
l: $win.scrollLeft(),
t: $win.scrollTop(),
w: $win.width(),
h: $win.height()
}, xL, xC, xR, yT, yC, yB;
if (this.opts.alignTo == 'cursor') {
xL = xC = xR = this.eventX;
yT = yC = yB = this.eventY;
} else { // this.opts.alignTo == 'target'
var elmOffset = this.$elm.offset(),
elm = {
l: elmOffset.left,
t: elmOffset.top,
w: this.$elm.outerWidth(),
h: this.$elm.outerHeight()
};
xL = elm.l + (this.opts.alignX != 'inner-right' ? 0 : elm.w); // left edge
xC = xL + Math.floor(elm.w / 2); // h center
xR = xL + (this.opts.alignX != 'inner-left' ? elm.w : 0); // right edge
yT = elm.t + (this.opts.alignY != 'inner-bottom' ? 0 : elm.h); // top edge
yC = yT + Math.floor(elm.h / 2); // v center
yB = yT + (this.opts.alignY != 'inner-top' ? elm.h : 0); // bottom edge
}
// keep in viewport and calc arrow position
switch (this.opts.alignX) {
case 'right':
case 'inner-left':
pos.l = xR + this.opts.offsetX;
if (pos.l + this.tipOuterW > win.l + win.w)
pos.l = win.l + win.w - this.tipOuterW;
if (this.opts.alignX == 'right' || this.opts.alignY == 'center')
pos.arrow = 'left';
break;
case 'center':
pos.l = xC - Math.floor(this.tipOuterW / 2);
if (pos.l + this.tipOuterW > win.l + win.w)
pos.l = win.l + win.w - this.tipOuterW;
else if (pos.l < win.l)
pos.l = win.l;
break;
default: // 'left' || 'inner-right'
pos.l = xL - this.tipOuterW - this.opts.offsetX;
if (pos.l < win.l)
pos.l = win.l;
if (this.opts.alignX == 'left' || this.opts.alignY == 'center')
pos.arrow = 'right';
}
switch (this.opts.alignY) {
case 'bottom':
case 'inner-top':
pos.t = yB + this.opts.offsetY;
// 'left' and 'right' need priority for 'target'
if (!pos.arrow || this.opts.alignTo == 'cursor')
pos.arrow = 'top';
if (pos.t + this.tipOuterH > win.t + win.h) {
pos.t = yT - this.tipOuterH - this.opts.offsetY;
if (pos.arrow == 'top')
pos.arrow = 'bottom';
}
break;
case 'center':
pos.t = yC - Math.floor(this.tipOuterH / 2);
if (pos.t + this.tipOuterH > win.t + win.h)
pos.t = win.t + win.h - this.tipOuterH;
else if (pos.t < win.t)
pos.t = win.t;
break;
default: // 'top' || 'inner-bottom'
pos.t = yT - this.tipOuterH - this.opts.offsetY;
// 'left' and 'right' need priority for 'target'
if (!pos.arrow || this.opts.alignTo == 'cursor')
pos.arrow = 'bottom';
if (pos.t < win.t) {
pos.t = yB + this.opts.offsetY;
if (pos.arrow == 'bottom')
pos.arrow = 'top';
}
}
this.pos = pos;
}
};
$.fn.poshytip = function(options) {
if (typeof options == 'string') {
var args = arguments,
method = options;
Array.prototype.shift.call(args);
// unhook live events if 'destroy' is called
if (method == 'destroy')
this.die('mouseenter.poshytip').die('focus.poshytip');
return this.each(function() {
var poshytip = $(this).data('poshytip');
if (poshytip && poshytip[method])
poshytip[method].apply(poshytip, args);
});
}
var opts = $.extend({}, $.fn.poshytip.defaults, options);
// generate CSS for this tip class if not already generated
if (!$('#poshytip-css-' + opts.className)[0])
$(['<style id="poshytip-css-',opts.className,'" type="text/css">',
'div.',opts.className,'{visibility:hidden;position:absolute;top:0;left:0;}',
'div.',opts.className,' table, div.',opts.className,' td{margin:0;font-family:inherit;font-size:inherit;font-weight:inherit;font-style:inherit;font-variant:inherit;}',
'div.',opts.className,' td.tip-bg-image span{display:block;font:1px/1px sans-serif;height:',opts.bgImageFrameSize,'px;width:',opts.bgImageFrameSize,'px;overflow:hidden;}',
'div.',opts.className,' td.tip-right{background-position:100% 0;}',
'div.',opts.className,' td.tip-bottom{background-position:100% 100%;}',
'div.',opts.className,' td.tip-left{background-position:0 100%;}',
'div.',opts.className,' div.tip-inner{background-position:-',opts.bgImageFrameSize,'px -',opts.bgImageFrameSize,'px;}',
'div.',opts.className,' div.tip-arrow{visibility:hidden;position:absolute;overflow:hidden;font:1px/1px sans-serif;}',
'</style>'].join('')).appendTo('head');
// check if we need to hook live events
if (opts.liveEvents && opts.showOn != 'none') {
var deadOpts = $.extend({}, opts, { liveEvents: false });
switch (opts.showOn) {
case 'hover':
this.live('mouseenter.poshytip', function() {
var $this = $(this);
if (!$this.data('poshytip'))
$this.poshytip(deadOpts).poshytip('mouseenter');
});
break;
case 'focus':
this.live('focus.poshytip', function() {
var $this = $(this);
if (!$this.data('poshytip'))
$this.poshytip(deadOpts).poshytip('show');
});
break;
}
return this;
}
return this.each(function() {
new $.Poshytip(this, opts);
});
}
// default settings
$.fn.poshytip.defaults = {
content: '[title]', // content to display ('[title]', 'string', element, function(updateCallback){...}, jQuery)
className: 'tip-yellow', // class for the tips
bgImageFrameSize: 10, // size in pixels for the background-image (if set in CSS) frame around the inner content of the tip
showTimeout: 500, // timeout before showing the tip (in milliseconds 1000 == 1 second)
hideTimeout: 100, // timeout before hiding the tip
timeOnScreen: 0, // timeout before automatically hiding the tip after showing it (set to > 0 in order to activate)
showOn: 'hover', // handler for showing the tip ('hover', 'focus', 'none') - use 'none' to trigger it manually
liveEvents: false, // use live events
alignTo: 'cursor', // align/position the tip relative to ('cursor', 'target')
alignX: 'right', // horizontal alignment for the tip relative to the mouse cursor or the target element
// ('right', 'center', 'left', 'inner-left', 'inner-right') - 'inner-*' matter if alignTo:'target'
alignY: 'top', // vertical alignment for the tip relative to the mouse cursor or the target element
// ('bottom', 'center', 'top', 'inner-bottom', 'inner-top') - 'inner-*' matter if alignTo:'target'
offsetX: -22, // offset X pixels from the default position - doesn't matter if alignX:'center'
offsetY: 18, // offset Y pixels from the default position - doesn't matter if alignY:'center'
allowTipHover: true, // allow hovering the tip without hiding it onmouseout of the target - matters only if showOn:'hover'
followCursor: false, // if the tip should follow the cursor - matters only if showOn:'hover' and alignTo:'cursor'
fade: true, // use fade animation
slide: true, // use slide animation
slideOffset: 8, // slide animation offset
showAniDuration: 300, // show animation duration - set to 0 if you don't want show animation
hideAniDuration: 300 // hide animation duration - set to 0 if you don't want hide animation
};
})(jQuery); | PypiClean |
/flask-pony-3.1b1.tar.gz/flask-pony-3.1b1/flask_pony/orm.py | from collections import OrderedDict
from datetime import date, datetime, time
from pony.orm import ormtypes
import six
import wtforms.fields as wtf_fields
import wtforms.validators as wtf_validators
from .forms import Form, EntityField
from . import validators
class Factory(object):
"""A simple factory for functions and methods that generate form elements."""
def __init__(self):
self.__types = {}
def get(self, tp):
return self.__types.get(tp)
def __call__(self, *types):
def decorator(func):
for tp in types:
self.__types[tp] = func
return func
return decorator
class FormBuilder(object):
field_constructor = Factory()
def __init__(self, entity_class, base_class=None, excludes=None, skip_pk=True):
self._fields = OrderedDict()
self._buttons = OrderedDict()
self._entity_class = entity_class
self._base_class = base_class
self._excludes = set(excludes or [])
self._skip_pk = skip_pk
def _field_numeric(self, attr, options):
miN = attr.kwargs.get('min', attr.kwargs.get('unsigned') and 0)
maX = attr.kwargs.get('max')
options['validators'].append(wtf_validators.NumberRange(miN, maX))
def _field_string(self, attr, options):
args = list(attr.args)
max_len = args.pop() if args else attr.kwargs.get('max_len')
if max_len:
options['validators'].append(wtf_validators.Length(max=max_len))
def _get_field_method(self, tp):
"""Returns a reference to the form element's constructor method."""
method = self.field_constructor.get(tp)
if method and hasattr(self, method.__name__):
return getattr(self, method.__name__)
return method
def _create_collection_field(self, attr, options):
"""Creates the form element for working with the collection of entities."""
return None, options
def _create_plain_field(self, attr, options):
"""Creates the form element."""
method = self._get_field_method(attr.py_type) or self._create_other_field
klass, options = method(attr, options)
if attr.is_unique:
options['validators'].append(validators.UniqueEntityValidator(attr.entity))
return klass, options
def _create_pk_field(self, attr, options):
"""Creates the form element for working with primary key."""
if attr.auto or self._skip_pk:
return None, options
def _create_relational_field(self, attr, options):
"""Creates the form element for working with entity relationships."""
options['entity_class'] = attr.py_type
options['allow_empty'] = not attr.is_required
return EntityField, options
def _create_other_field(self, attr, options):
"""Creates a custom form element. Called when the element was not found."""
return None, options
def add(self, attr, field_class=None, **options):
"""Adds an element to the form based on the entity attribute."""
# print(attr.name, attr.py_type, getattr(attr, 'set', None))
# print(dir(attr))
# print(attr, attr.is_relation, attr.is_collection)
# print(attr.is_pk, attr.auto, attr.is_unique, attr.is_part_of_unique_index, attr.composite_keys)
def add(klass, options):
if klass:
self._fields[attr.name] = field_class(**options) if field_class else klass(**options)
return self
kwargs = {
'label': attr.name,
'default': attr.default,
'validators': [],
}
kwargs.update(options)
if attr.is_pk:
return add(*self._create_pk_field(attr, kwargs))
if attr.is_collection:
return add(*self._create_collection_field(attr, kwargs))
validator = wtf_validators.InputRequired() if attr.is_required and not attr.is_pk else wtf_validators.Optional()
kwargs['validators'].insert(0, validator)
if attr.is_relation:
return add(*self._create_relational_field(attr, kwargs))
return add(*self._create_plain_field(attr, kwargs))
def add_button(self, name, button_class=wtf_fields.SubmitField, **options):
"""Adds a button to the form."""
self._buttons[name] = button_class(**options)
def build_form(self):
for attr in self._entity_class._attrs_:
if attr.name not in self._excludes:
self.add(attr)
def get_form(self):
self.build_form()
classname = '{}Form'.format(self._entity_class.__class__.__name__)
base = self._base_class or Form
props = OrderedDict()
props.update(self._fields)
props.update(self._buttons)
form = type(classname, (base,), props)
form._attr_names_ = self._fields.keys()
return form
@field_constructor(bool)
def field_bool(self, attr, options):
return wtf_fields.BooleanField, options
@field_constructor(int)
def field_int(self, attr, options):
self._field_numeric(attr, options)
return wtf_fields.IntegerField, options
@field_constructor(float)
def field_float(self, attr, options):
self._field_numeric(attr, options)
return wtf_fields.FloatField, options
@field_constructor(ormtypes.Decimal)
def field_decimal(self, attr, options):
self._field_numeric(attr, options)
options.setdefault('places', None)
return wtf_fields.DecimalField, options
@field_constructor(str, six.text_type)
def field_string(self, attr, options):
self._field_string(attr, options)
return wtf_fields.StringField, options
@field_constructor(ormtypes.LongStr, ormtypes.LongUnicode)
def field_textarea(self, attr, options):
self._field_string(attr, options)
return wtf_fields.TextAreaField, options
@field_constructor(date)
def field_date(self, attr, options):
return wtf_fields.DateField, options
@field_constructor(datetime)
def field_datetime(self, attr, options):
return wtf_fields.DateTimeField, options
@field_constructor(ormtypes.Json)
def field_json(self, attr, options):
return wtf_fields.TextAreaField, options
@field_constructor(ormtypes.UUID)
def field_uuid(self, attr, options):
"""Creates a form element for the UUID type."""
options['validators'].append(validators.UUIDValidator(attr.entity))
return wtf_fields.StringField, options
@classmethod
def get_instance(cls, entity_class, *args, **kwargs):
return cls(entity_class, *args, **kwargs).get_form() | PypiClean |
/DeploymentTool-1.5.zip/DeploymentTool-1.5/Deployment/Deployment/urls.py | from django.conf.urls import patterns, include, url
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'Deployment.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'Deploymentapp.views.home', name = 'home'),
url(r'^login', 'Deploymentapp.views.signin', name = 'signin'),
url(r'^tasklist/$', 'Deploymentapp.views.tasklist', name = 'task'),
url(r'^projectload/(?P<idtask>[-\w]+)$','Deploymentapp.views.loadtask', name = 'tasklist'),
url(r'^deletetask/(?P<idtask>[-\w]+)$','Deploymentapp.views.deletetask', name = 'delete'),
url(r'^addnewtask/$', 'Deploymentapp.views.addnewtask', name = 'addnew'),
url(r'^projecttask/$', 'Deploymentapp.views.addproject', name = 'projecttask'),
url(r'^authentication/$', 'Deploymentapp.views.authentication', name = 'authentic'),
url(r'^search/$', 'Deploymentapp.views.searchtask', name = 'search'),
url(r'^changehostuser/$', 'Deploymentapp.views.changehostuser', name = 'chnghostusr'),
url(r'^changehosttask/$', 'Deploymentapp.views.changehosttask', name = 'chnghosttask'),
url(r'^loggedout/$', 'Deploymentapp.views.loggedout', name = 'loggedout'),
url(r'^deletecommand/(?P<idcommand>[-\w]+)$','Deploymentapp.views.deletecommand', name = 'deletecommand'),
url(r'^editcommand/(?P<idcommand>[-\w]+)$','Deploymentapp.views.runedit', name = 'editcommand'),
url(r'^successedit/$', 'Deploymentapp.views.editcommand', name = 'edited'),
url(r'^dorevert/(?P<idtask>[-\w]+)$', 'Deploymentapp.views.reverttask', name = 'reverttask'),
#baru
url(r'^copy/', 'Deploymentapp.views.copy', name='copy'),
url(r'^newfile/', 'Deploymentapp.views.createfile', name='newfile'),
url(r'^deletedirectory/', 'Deploymentapp.views.deletedir', name='deletedirectory'),
url(r'^deletefile/', 'Deploymentapp.views.deletefile', name='deletefile'),
url(r'^createdirectory/', 'Deploymentapp.views.makedir', name='createdirectory'),
url(r'^uploadfile/', 'Deploymentapp.views.uploadfile', name='uploadfile'),
url(r'^executetask/', 'Deploymentapp.views.executetask', name='executetask'),
url(r'^deleteallcommand/','Deploymentapp.views.deleteallcommand', name = 'deleteallcommand'),
)
handler404 = "Deploymentapp.views.error404" | PypiClean |
/LinkPython-0.1.1.tar.gz/LinkPython-0.1.1/modules/pybind11/docs/advanced/cast/functional.rst | Functional
##########
The following features must be enabled by including :file:`pybind11/functional.h`.
Callbacks and passing anonymous functions
=========================================
The C++11 standard brought lambda functions and the generic polymorphic
function wrapper ``std::function<>`` to the C++ programming language, which
enable powerful new ways of working with functions. Lambda functions come in
two flavors: stateless lambda function resemble classic function pointers that
link to an anonymous piece of code, while stateful lambda functions
additionally depend on captured variables that are stored in an anonymous
*lambda closure object*.
Here is a simple example of a C++ function that takes an arbitrary function
(stateful or stateless) with signature ``int -> int`` as an argument and runs
it with the value 10.
.. code-block:: cpp
int func_arg(const std::function<int(int)> &f) {
return f(10);
}
The example below is more involved: it takes a function of signature ``int -> int``
and returns another function of the same kind. The return value is a stateful
lambda function, which stores the value ``f`` in the capture object and adds 1 to
its return value upon execution.
.. code-block:: cpp
std::function<int(int)> func_ret(const std::function<int(int)> &f) {
return [f](int i) {
return f(i) + 1;
};
}
This example demonstrates using python named parameters in C++ callbacks which
requires using ``py::cpp_function`` as a wrapper. Usage is similar to defining
methods of classes:
.. code-block:: cpp
py::cpp_function func_cpp() {
return py::cpp_function([](int i) { return i+1; },
py::arg("number"));
}
After including the extra header file :file:`pybind11/functional.h`, it is almost
trivial to generate binding code for all of these functions.
.. code-block:: cpp
#include <pybind11/functional.h>
PYBIND11_MODULE(example, m) {
m.def("func_arg", &func_arg);
m.def("func_ret", &func_ret);
m.def("func_cpp", &func_cpp);
}
The following interactive session shows how to call them from Python.
.. code-block:: pycon
$ python
>>> import example
>>> def square(i):
... return i * i
...
>>> example.func_arg(square)
100L
>>> square_plus_1 = example.func_ret(square)
>>> square_plus_1(4)
17L
>>> plus_1 = func_cpp()
>>> plus_1(number=43)
44L
.. warning::
Keep in mind that passing a function from C++ to Python (or vice versa)
will instantiate a piece of wrapper code that translates function
invocations between the two languages. Naturally, this translation
increases the computational cost of each function call somewhat. A
problematic situation can arise when a function is copied back and forth
between Python and C++ many times in a row, in which case the underlying
wrappers will accumulate correspondingly. The resulting long sequence of
C++ -> Python -> C++ -> ... roundtrips can significantly decrease
performance.
There is one exception: pybind11 detects case where a stateless function
(i.e. a function pointer or a lambda function without captured variables)
is passed as an argument to another C++ function exposed in Python. In this
case, there is no overhead. Pybind11 will extract the underlying C++
function pointer from the wrapped function to sidestep a potential C++ ->
Python -> C++ roundtrip. This is demonstrated in :file:`tests/test_callbacks.cpp`.
.. note::
This functionality is very useful when generating bindings for callbacks in
C++ libraries (e.g. GUI libraries, asynchronous networking libraries, etc.).
The file :file:`tests/test_callbacks.cpp` contains a complete example
that demonstrates how to work with callbacks and anonymous functions in
more detail.
| PypiClean |
/DeepGMAP-0.2.0.tar.gz/DeepGMAP-0.2.0/deepgmap/post_train_tools/class_saliency_extraction.py | import sys
import tensorflow as tf
import numpy as np
import time
import math
import os
import getopt
from glob import glob
from natsort import natsorted
import pyBigWig as pbw
import subprocess as subp
PATH_SEP=os.path.sep
def longest_common_prefix(seq1, seq2):
start = 0
while start < min(len(seq1), len(seq2)):
if seq1[start] != seq2[start]:
break
start += 1
return seq1[:start]
def longest_common_suffix(seq1, seq2):
return longest_common_prefix(seq1[::-1], seq2[::-1])[::-1]
start=time.time()
def genome_scan(filename):
with open(filename, 'r') as f1:
file_name=f1.name
path_sep=os.path.sep
file_name1=file_name.split(path_sep)
file_name2=file_name1[-1].split('_')
chromosome=file_name2[2]
a=file_name2[3]
b=a.split('.')
chr_position=int(b[0])
#window_id=(file_name2[3])[:3]
genome_seq=np.load(f1)
shape_of_genome=genome_seq['genome'].shape
genome_seq_re=np.reshape(genome_seq['genome'], (shape_of_genome[0], shape_of_genome[1], 4, 1))
genome_seq_re_list=np.array_split(genome_seq_re, 100)
return genome_seq_re_list, chromosome, chr_position #, window_id
def run(args=None):
a=time.asctime()
b=a.replace(':', '')
start_at=b.replace(' ', '_')
BATCH_SIZE=1000
prefix="class_saliency_map"
GPUID="0"
genome_file=""
class_of_interest=-1
if args==None:
try:
options, args =getopt.getopt(sys.argv[1:], 'l:t:o:c:G:g:p:',
['log=','test_genome=','output_dir=',"class_of_interest=", "GPUID=", "genoem_file=","prefix="])
except getopt.GetoptError as err:
print(str(err))
sys.exit(2)
if len(options)<3:
print('too few argument')
sys.exit(0)
for opt, arg in options:
if opt in ('-l', '--log'):
log_file_name=arg
elif opt in ('-t', '--test_genome'):
test_genome=arg
elif opt in ('-o', '--output_dir'):
output_dir=arg
if not output_dir.endswith("/"):
output_dir+="/"
elif opt in ('-p','--prefix'):
prefix=arg
elif opt in ('-c', "--class_of_interest"):
class_of_interest=int(arg)
elif opt in ('-G', "--GPUID"):
GPUID=arg
elif opt in ('-g', '--genome_file'):
genome_file=arg
else:
log_file_name=args.log
test_genome=args.test_genome
output_dir=args.output_dir
if not output_dir.endswith(PATH_SEP):
output_dir+=PATH_SEP
prefix=args.prefix
class_of_interest=args.class_of_interest
GPUID=args.GPUID
genome_file=args.genome_file
chromosome_sizes={}
with open(genome_file, "r") as fin:
for line in fin:
line=line.split()
chromosome_sizes[line[0]]=int(line[1])
input_file_prefix= os.path.splitext(log_file_name)[0]
current_variable=np.load(input_file_prefix+"_trained_variables.npz")
with open(log_file_name, 'r') as fin:
for line in fin:
if line.startswith('<tf.Variable'):
line=line.split(' ')
print(line)
if line[1]=="'prediction/W_fc1:0'":
line=line[2].split('=(')[1].strip(",'")
first_fc_shape=int(line)
elif line.startswith("data"):
line=line.split(':')[1]
data_length=int(line)
elif line.startswith("Total class number:"):
class_num=int(line.split(': ')[1])
elif line.startswith("Labeled file:"):
sample_list=[]
line=line.split(": ")[1].strip("\n")
#print line
if os.path.isfile(line):
label_file=line
else:
line=line.strip(".")
cwd = os.getcwd()
label_file=cwd+line
if not os.path.isfile(label_file):
sys.exit("cannot find "+line)
with open(label_file) as fin:
line2=fin.next()
line2=line2.split()[1:]
common_prefix = longest_common_prefix(line2[0],line2[-1])
common_suffix = longest_common_suffix(line2[0],line2[-1])
#print common_prefix, common_suffix
common_prefix_len=len(common_prefix)
common_suffix_len=len(common_suffix)
for l in line2:
l=l[common_prefix_len:]
l=l[:-common_suffix_len]
sample_list.append(l)
print(sample_list)
if not "*" in test_genome:
test_genome+=PATH_SEP+"*npz"
test_genome_list=natsorted(glob(test_genome))
if len(test_genome_list)==0:
sys.exit(test_genome+" does not exist.")
def recon_variable(shape, variable_name):
initial = tf.truncated_normal(shape, mean=0.02, stddev=0.02)
return tf.Variable(initial, name=variable_name, trainable=True)
def conv2d_1(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='VALID')
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 2, 1, 1], padding='VALID')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 1, 1], strides=[1, 2, 1, 1], padding='SAME')
def max_pool_4x1(x):
return tf.nn.max_pool(x, ksize=[1, 4, 1, 1], strides=[1, 4, 1, 1], padding='SAME')
def max_pool_2x2_with_argmax(x):
return tf.nn.max_pool_with_argmax(x, ksize=[1, 2, 1, 1], strides=[1, 2, 1, 1], padding='SAME')
def max_pool_4x1_with_argmax(x):
return tf.nn.max_pool_with_argmax(x, ksize=[1, 4, 1, 1], strides=[1, 4, 1, 1], padding='SAME')
#x_image_recon = recon_variable([1, 1000, 4, 1], 'x_image_recon')
#print GPUID
if class_of_interest==-1:
classes =range(class_num)
else:
classes=[class_of_interest]
for c in classes:
_tmp_prefix=prefix+"_class_"+sample_list[c]
out_pf=output_dir+_tmp_prefix
if os.path.isfile(out_pf+".bw"):
print("skipping "+prefix+"_class_"+sample_list[c])
continue
"""elif c<=18:
print("skipping "+prefix+"_class_"+sample_list[c])
continue"""
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess2 = tf.Session(config=config)
with tf.device('/device:GPU:'+GPUID):
x_image_recon=tf.placeholder(tf.float32, shape=[BATCH_SIZE, data_length, 4, 1])
with tf.GradientTape() as g:
g.watch(x_image_recon)
h_conv11_re=conv2d_1(x_image_recon, current_variable["prediction/W_conv1:0"])
h_conv12_re=conv2d_1(x_image_recon, tf.reverse(current_variable["prediction/W_conv1:0"], [0,1]))
h_conv11_re_ = tf.nn.relu(h_conv11_re)
h_conv12_re_ = tf.nn.relu(h_conv12_re)
h_pool1_re = max_pool_2x2(h_conv11_re_)
h_pool1_rc_re = max_pool_2x2(h_conv12_re_)
h_conv2_re = tf.add(tf.nn.relu(conv2d_1(h_pool1_re, current_variable["prediction/W_conv2:0"])), tf.nn.relu(conv2d_1(h_pool1_rc_re, tf.reverse(current_variable["prediction/W_conv2:0"], [0,1]))))
h_pool2_re = max_pool_2x2(h_conv2_re)
h_conv21_re = tf.nn.relu(conv2d_1(h_pool2_re, current_variable["prediction/W_conv21:0"]))
h_pool21_re = max_pool_2x2(h_conv21_re)
h_conv22_re = tf.nn.relu(conv2d_1(h_pool21_re, current_variable["prediction/W_conv22:0"]))
h_pool22_re = max_pool_4x1(h_conv22_re)
h_pool3_flat_re = tf.reshape(h_pool22_re, [-1, 1*first_fc_shape])
h_fc1_re = tf.nn.relu(tf.add(tf.matmul(h_pool3_flat_re, current_variable["prediction/W_fc1:0"]), current_variable["prediction/b_fc1:0"]))
y_conv_re=tf.add(tf.matmul(h_fc1_re,current_variable["prediction/W_fc4:0"]), current_variable["prediction/b_fc4:0"])
#cost =-tf.reshape(tf.nn.sigmoid(y_conv_re[0][0])/(tf.nn.sigmoid(y_conv_re[0][2])+tf.nn.sigmoid(y_conv_re[0][0])+tf.nn.sigmoid(y_conv_re[0][1])+0.000001),[1])+tf.reduce_sum(tf.square(x_image_recon))/2000.0
#print y_conv_re.shape
#cost =tf.nn.sigmoid(y_conv_re[:,class_of_interest])
#cost =tf.nn.relu(y_conv_re[:,class_of_interest])
cost =tf.clip_by_value(y_conv_re[:,c], -4.0, 1000000000.0)
#cost =y_conv_re[:,class_of_interest]
#print cost.shape
w=g.gradient(cost, x_image_recon)
sess2.run(tf.global_variables_initializer())
position_list=[]
sal_map=[]
BREAK=False
i=0
chrom_list=[]
start_list=[]
end_list=[]
value_list=[]
chrom_set=[]
header_list=[]
bw=None
bw_file_list=[]
bw_file_merged_list=[]
for test_genome_ in test_genome_list:
print("reading "+test_genome_)
genome_data=np.load(test_genome_)
position_list_, seq_list=genome_data['positions'], genome_data['sequences']
seq_list=np.array(seq_list, np.int16).reshape(-1, data_length, 4, 1)
seq_length=seq_list.shape[0]
#print(seq_length)
loop=int(math.ceil(float(seq_length)/BATCH_SIZE))
for i in range(loop):
if i*BATCH_SIZE>seq_length:
break
scanning=seq_list[i*BATCH_SIZE:(i+1)*BATCH_SIZE]
position=position_list_[i*BATCH_SIZE:(i+1)*BATCH_SIZE]
len_scanning=len(scanning)
if len_scanning<BATCH_SIZE:
dummy_array=np.zeros([(BATCH_SIZE-len_scanning), data_length, 4, 1])
scanning=np.concatenate([scanning, dummy_array])
w_tmp=sess2.run(w, feed_dict={x_image_recon: scanning})
#print w_tmp.shape
#print w_tmp[1]
w_tmp_shape=w_tmp.shape
#print w_tmp[0]
w_tmp=np.reshape(w_tmp,[w_tmp_shape[0], w_tmp_shape[1],w_tmp_shape[2]])
#w_tmp=np.amax(np.absolute(np.clip(w_tmp, None, 0.0)), axis=2)
w_tmp=np.sum(np.absolute(w_tmp), axis=2)
if len_scanning<BATCH_SIZE:
w_tmp=w_tmp[:len_scanning]
for j in range(len_scanning):
sal_map=np.reshape(w_tmp[j], [-1])
#print np.sum(sal_map)
if not np.sum(sal_map)==0:
#print position[j]
current_chr, current_pos=position[j].strip(">").split(':')
if not current_chr in chrom_set:
chrom_set.append(current_chr)
if bw is not None:
bw.close()
if len(bw_file_list)==50:
k=len(bw_file_merged_list)
tmp_name=out_pf+"_"+start_at+"tmp_"+str(k)
subp.check_call(["bigWigMerge"]+bw_file_list+[ tmp_name+".bedGraph"])
with open(tmp_name+".bedGraph" ,"w") as tmp_out:
subp.check_call(["sort", "-k1,1", "-k2,2n",tmp_name+".bedGraph"], stdout=tmp_out)
subp.check_call(["bedGraphToBigWig", tmp_name+".bedGraph", genome_file,tmp_name+".bw"])
bw_file_merged_list.append(tmp_name+".bw")
subp.check_call(["rm", tmp_name+".bedGraph"])
for _file in bw_file_list:
subp.check_call(["rm", _file])
bw_file_list=[]
bw=pbw.open(out_pf+"_"+current_chr+".bw", "w")
print("writing "+out_pf+"_"+current_chr+".bw")
bw_file_list.append(out_pf+"_"+current_chr+".bw")
bw.addHeader([(current_chr, chromosome_sizes[current_chr])])
start, end =map(int, current_pos.split("-"))
#print current_chr, len(sal_map), start,end
bw.addEntries([current_chr]*len(sal_map),
range(start,end),
ends=range(start+1,end+1),
values=sal_map)
"""if len(sal_map)==0:
sal_map=np.reshape(w_tmp, [-1])
else:
sal_map=np.concatenate([sal_map,np.reshape(w_tmp, [-1])],axis=0)
"""
if len(bw_file_merged_list)==50:
BREAK=True
break
if BREAK:
break
bw.close()
if len(bw_file_merged_list)==0:
#h, t=os.path.split(bw_file_list[0])
if len(bw_file_list)==1:
subp.check_call(["mv", bw_file_list[0], out_pf+".bw"])
else:
tmp_name=out_pf+"_"+start_at+"_tmp"
print("merging files to create "+out_pf+".bw")
#print bw_file_list
subp.check_call(["bigWigMerge"]+bw_file_list+[tmp_name+".bedGraph"])
with open(out_pf+".bedGraph" ,"w") as tmp_out:
subp.check_call(["sort", "-k1,1", "-k2,2n",tmp_name+".bedGraph"], stdout=tmp_out)
subp.check_call(["bedGraphToBigWig", out_pf+".bedGraph", genome_file, out_pf+".bw"])
subp.check_call(["rm", out_pf+".bedGraph"])
subp.check_call(["rm", tmp_name+".bedGraph"])
for _file in bw_file_list:
subp.check_call(["rm", _file])
else:
if len(bw_file_list)>1:
#print bw_file_list
k=len(bw_file_merged_list)
tmp_name=out_pf+"_"+start_at+"tmp_"+str(k)
k=len(bw_file_merged_list)
subp.check_call(["bigWigMerge"]+bw_file_list+[tmp_name+".bedGraph"])
with open(out_pf+".bedGraph" ,"w") as tmp_out:
subp.check_call(["sort", "-k1,1", "-k2,2n",tmp_name+".bedGraph"], stdout=tmp_out)
subp.check_call(["bedGraphToBigWig", tmp_name+".bedGraph", genome_file, tmp_name+".bw"])
bw_file_merged_list.append(tmp_name+".bw")
subp.check_call(["rm", tmp_name+".bedGraph"])
#subp.check_call(["rm", "_tmp.bedGraph"])
for _file in bw_file_list:
subp.check_call(["rm", _file])
bw_file_list=[]
if len(bw_file_list)>0:
bw_file_merged_list.append(bw_file_list[0])
tmp_name=out_pf+"_"+start_at+"_tmp"
subp.check_call(["bigWigMerge"]+bw_file_merged_list+[tmp_name+".bedGraph"])
with open(out_pf+".bedGraph" ,"w") as tmp_out:
subp.check_call(["sort", "-k1,1", "-k2,2n",tmp_name+".bedGraph"], stdout=tmp_out)
subp.check_call(["rm", tmp_name+".bedGraph"])
subp.check_call(["bedGraphToBigWig", out_pf+".bedGraph", genome_file, out_pf+".bw"])
subp.check_call(["rm", out_pf+".bedGraph"])
for _file in bw_file_merged_list:
subp.check_call(["rm", _file])
if __name__== '__main__':
run() | PypiClean |
/Flask-CKEditor-0.4.6.tar.gz/Flask-CKEditor-0.4.6/flask_ckeditor/static/full/lang/pl.js | /*
Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/license
*/
CKEDITOR.lang['pl']={"editor":"Edytor tekstu sformatowanego","editorPanel":"Panel edytora tekstu sformatowanego","common":{"editorHelp":"W celu uzyskania pomocy naciśnij ALT 0","browseServer":"Przeglądaj","url":"Adres URL","protocol":"Protokół","upload":"Wyślij","uploadSubmit":"Wyślij","image":"Obrazek","flash":"Flash","form":"Formularz","checkbox":"Pole wyboru (checkbox)","radio":"Przycisk opcji (radio)","textField":"Pole tekstowe","textarea":"Obszar tekstowy","hiddenField":"Pole ukryte","button":"Przycisk","select":"Lista wyboru","imageButton":"Przycisk graficzny","notSet":"<nie ustawiono>","id":"Id","name":"Nazwa","langDir":"Kierunek tekstu","langDirLtr":"Od lewej do prawej (LTR)","langDirRtl":"Od prawej do lewej (RTL)","langCode":"Kod języka","longDescr":"Adres URL długiego opisu","cssClass":"Nazwa klasy CSS","advisoryTitle":"Opis obiektu docelowego","cssStyle":"Styl","ok":"OK","cancel":"Anuluj","close":"Zamknij","preview":"Podgląd","resize":"Przeciągnij, aby zmienić rozmiar","generalTab":"Ogólne","advancedTab":"Zaawansowane","validateNumberFailed":"Ta wartość nie jest liczbą.","confirmNewPage":"Wszystkie niezapisane zmiany zostaną utracone. Czy na pewno wczytać nową stronę?","confirmCancel":"Pewne opcje zostały zmienione. Czy na pewno zamknąć okno dialogowe?","options":"Opcje","target":"Obiekt docelowy","targetNew":"Nowe okno (_blank)","targetTop":"Okno najwyżej w hierarchii (_top)","targetSelf":"To samo okno (_self)","targetParent":"Okno nadrzędne (_parent)","langDirLTR":"Od lewej do prawej (LTR)","langDirRTL":"Od prawej do lewej (RTL)","styles":"Style","cssClasses":"Klasy arkusza stylów","width":"Szerokość","height":"Wysokość","align":"Wyrównaj","left":"Do lewej","right":"Do prawej","center":"Do środka","justify":"Wyjustuj","alignLeft":"Wyrównaj do lewej","alignRight":"Wyrównaj do prawej","alignCenter":"Wyśrodkuj","alignTop":"Do góry","alignMiddle":"Do środka","alignBottom":"Do dołu","alignNone":"Brak","invalidValue":"Nieprawidłowa wartość.","invalidHeight":"Wysokość musi być liczbą.","invalidWidth":"Szerokość musi być liczbą.","invalidLength":"Wartość podana dla pola \"%1\" musi być liczbą dodatnią bez jednostki lub z poprawną jednostką długości (%2).","invalidCssLength":"Wartość podana dla pola \"%1\" musi być liczbą dodatnią bez jednostki lub z poprawną jednostką długości zgodną z CSS (px, %, in, cm, mm, em, ex, pt lub pc).","invalidHtmlLength":"Wartość podana dla pola \"%1\" musi być liczbą dodatnią bez jednostki lub z poprawną jednostką długości zgodną z HTML (px lub %).","invalidInlineStyle":"Wartość podana dla stylu musi składać się z jednej lub większej liczby krotek w formacie \"nazwa : wartość\", rozdzielonych średnikami.","cssLengthTooltip":"Wpisz liczbę dla wartości w pikselach lub liczbę wraz z jednostką długości zgodną z CSS (px, %, in, cm, mm, em, ex, pt lub pc).","unavailable":"%1<span class=\"cke_accessibility\">, niedostępne</span>","keyboard":{"8":"Backspace","13":"Enter","16":"Shift","17":"Ctrl","18":"Alt","32":"spacja","35":"End","36":"Home","46":"Delete","112":"F1","113":"F2","114":"F3","115":"F4","116":"F5","117":"F6","118":"F7","119":"F8","120":"F9","121":"F10","122":"F11","123":"F12","124":"F13","125":"F14","126":"F15","127":"F16","128":"F17","129":"F18","130":"F19","131":"F20","132":"F21","133":"F22","134":"F23","135":"F24","224":"Command"},"keyboardShortcut":"Skrót klawiszowy","optionDefault":"Domyślny"},"about":{"copy":"Copyright © $1. Wszelkie prawa zastrzeżone.","dlgTitle":"Informacje o programie CKEditor 4","moreInfo":"Informacje na temat licencji można znaleźć na naszej stronie:"},"basicstyles":{"bold":"Pogrubienie","italic":"Kursywa","strike":"Przekreślenie","subscript":"Indeks dolny","superscript":"Indeks górny","underline":"Podkreślenie"},"bidi":{"ltr":"Kierunek tekstu od lewej strony do prawej","rtl":"Kierunek tekstu od prawej strony do lewej"},"blockquote":{"toolbar":"Cytat"},"notification":{"closed":"Powiadomienie zostało zamknięte."},"toolbar":{"toolbarCollapse":"Zwiń pasek narzędzi","toolbarExpand":"Rozwiń pasek narzędzi","toolbarGroups":{"document":"Dokument","clipboard":"Schowek/Wstecz","editing":"Edycja","forms":"Formularze","basicstyles":"Style podstawowe","paragraph":"Akapit","links":"Hiperłącza","insert":"Wstawianie","styles":"Style","colors":"Kolory","tools":"Narzędzia"},"toolbars":"Paski narzędzi edytora"},"clipboard":{"copy":"Kopiuj","copyError":"Ustawienia bezpieczeństwa Twojej przeglądarki nie pozwalają na automatyczne kopiowanie tekstu. Użyj skrótu klawiszowego Ctrl/Cmd+C.","cut":"Wytnij","cutError":"Ustawienia bezpieczeństwa Twojej przeglądarki nie pozwalają na automatyczne wycinanie tekstu. Użyj skrótu klawiszowego Ctrl/Cmd+X.","paste":"Wklej","pasteNotification":"Naciśnij %1 by wkleić tekst. Twoja przeglądarka nie pozwala na wklejanie za pomocą przycisku paska narzędzi lub opcji menu kontekstowego.","pasteArea":"Miejsce do wklejenia treści","pasteMsg":"Wklej treść do obszaru poniżej i naciśnij OK."},"colorbutton":{"auto":"Automatycznie","bgColorTitle":"Kolor tła","colors":{"000":"Czarny","800000":"Kasztanowy","8B4513":"Czekoladowy","2F4F4F":"Ciemnografitowy","008080":"Morski","000080":"Granatowy","4B0082":"Indygo","696969":"Ciemnoszary","B22222":"Czerwień żelazowa","A52A2A":"Brązowy","DAA520":"Ciemnozłoty","006400":"Ciemnozielony","40E0D0":"Turkusowy","0000CD":"Ciemnoniebieski","800080":"Purpurowy","808080":"Szary","F00":"Czerwony","FF8C00":"Ciemnopomarańczowy","FFD700":"Złoty","008000":"Zielony","0FF":"Cyjan","00F":"Niebieski","EE82EE":"Fioletowy","A9A9A9":"Przygaszony szary","FFA07A":"Łososiowy","FFA500":"Pomarańczowy","FFFF00":"Żółty","00FF00":"Limonkowy","AFEEEE":"Bladoturkusowy","ADD8E6":"Jasnoniebieski","DDA0DD":"Śliwkowy","D3D3D3":"Jasnoszary","FFF0F5":"Jasnolawendowy","FAEBD7":"Kremowobiały","FFFFE0":"Jasnożółty","F0FFF0":"Bladozielony","F0FFFF":"Jasnolazurowy","F0F8FF":"Jasnobłękitny","E6E6FA":"Lawendowy","FFF":"Biały","1ABC9C":"Cyjan","2ECC71":"Szmaragdowy","3498DB":"Jasnoniebieski","9B59B6":"Ametystowy","4E5F70":"Szaroniebieski","F1C40F":"Żółty","16A085":"Ciemny cyjan","27AE60":"Ciemnoszmaragdowy","2980B9":"Ciemnoniebieski","8E44AD":"Ciemnofioletowy","2C3E50":"Nienasycony niebieski","F39C12":"Pomarańczowy","E67E22":"Marchewkowy","E74C3C":"Bladoczerwony","ECF0F1":"Jasnosrebrny","95A5A6":"Szarocyjanowy","DDD":"Jasnoszary","D35400":"Dyniowy","C0392B":"Ciemnoczerwony","BDC3C7":"Srebrny","7F8C8D":"Szarawy cyjan","999":"Ciemnoszary"},"more":"Więcej kolorów...","panelTitle":"Kolory","textColorTitle":"Kolor tekstu"},"colordialog":{"clear":"Wyczyść","highlight":"Zaznacz","options":"Opcje koloru","selected":"Wybrany","title":"Wybierz kolor"},"templates":{"button":"Szablony","emptyListMsg":"(Brak zdefiniowanych szablonów)","insertOption":"Zastąp obecną zawartość","options":"Opcje szablonów","selectPromptMsg":"Wybierz szablon do otwarcia w edytorze<br>(obecna zawartość okna edytora zostanie utracona):","title":"Szablony zawartości"},"contextmenu":{"options":"Opcje menu kontekstowego"},"copyformatting":{"label":"Kopiuj formatowanie","notification":{"copied":"Formatowanie skopiowane","applied":"Formatowanie zastosowane","canceled":"Formatowanie przerwane","failed":"Formatowanie nie powiodło się. Nie możesz zastosować stylów bez uprzedniego ich skopiowania."}},"div":{"IdInputLabel":"Id","advisoryTitleInputLabel":"Opis obiektu docelowego","cssClassInputLabel":"Klasy arkusza stylów","edit":"Edytuj pojemnik Div","inlineStyleInputLabel":"Style liniowe","langDirLTRLabel":"Od lewej do prawej (LTR)","langDirLabel":"Kierunek tekstu","langDirRTLLabel":"Od prawej do lewej (RTL)","languageCodeInputLabel":"Kod języka","remove":"Usuń pojemnik Div","styleSelectLabel":"Styl","title":"Utwórz pojemnik Div","toolbar":"Utwórz pojemnik Div"},"elementspath":{"eleLabel":"Ścieżka elementów","eleTitle":"element %1"},"filetools":{"loadError":"Błąd podczas odczytu pliku.","networkError":"W trakcie wysyłania pliku pojawił się błąd sieciowy.","httpError404":"Błąd HTTP w trakcie wysyłania pliku (404: Nie znaleziono pliku).","httpError403":"Błąd HTTP w trakcie wysyłania pliku (403: Zabroniony).","httpError":"Błąd HTTP w trakcie wysyłania pliku (status błędu: %1).","noUrlError":"Nie zdefiniowano adresu URL do przesłania pliku.","responseError":"Niepoprawna odpowiedź serwera."},"find":{"find":"Znajdź","findOptions":"Opcje wyszukiwania","findWhat":"Znajdź:","matchCase":"Uwzględnij wielkość liter","matchCyclic":"Cykliczne dopasowanie","matchWord":"Całe słowa","notFoundMsg":"Nie znaleziono szukanego hasła.","replace":"Zamień","replaceAll":"Zamień wszystko","replaceSuccessMsg":"%1 wystąpień zastąpionych.","replaceWith":"Zastąp przez:","title":"Znajdź i zamień"},"fakeobjects":{"anchor":"Kotwica","flash":"Animacja Flash","hiddenfield":"Pole ukryte","iframe":"IFrame","unknown":"Nieznany obiekt"},"flash":{"access":"Dostęp skryptów","accessAlways":"Zawsze","accessNever":"Nigdy","accessSameDomain":"Ta sama domena","alignAbsBottom":"Do dołu","alignAbsMiddle":"Do środka w pionie","alignBaseline":"Do linii bazowej","alignTextTop":"Do góry tekstu","bgcolor":"Kolor tła","chkFull":"Zezwól na pełny ekran","chkLoop":"Pętla","chkMenu":"Włącz menu","chkPlay":"Autoodtwarzanie","flashvars":"Zmienne obiektu Flash","hSpace":"Odstęp poziomy","properties":"Właściwości obiektu Flash","propertiesTab":"Właściwości","quality":"Jakość","qualityAutoHigh":"Auto wysoka","qualityAutoLow":"Auto niska","qualityBest":"Najlepsza","qualityHigh":"Wysoka","qualityLow":"Niska","qualityMedium":"Średnia","scale":"Skaluj","scaleAll":"Pokaż wszystko","scaleFit":"Dokładne dopasowanie","scaleNoBorder":"Bez obramowania","title":"Właściwości obiektu Flash","vSpace":"Odstęp pionowy","validateHSpace":"Odstęp poziomy musi być liczbą.","validateSrc":"Podaj adres URL","validateVSpace":"Odstęp pionowy musi być liczbą.","windowMode":"Tryb okna","windowModeOpaque":"Nieprzezroczyste","windowModeTransparent":"Przezroczyste","windowModeWindow":"Okno"},"font":{"fontSize":{"label":"Rozmiar","voiceLabel":"Rozmiar czcionki","panelTitle":"Rozmiar"},"label":"Czcionka","panelTitle":"Czcionka","voiceLabel":"Czcionka"},"forms":{"button":{"title":"Właściwości przycisku","text":"Tekst (Wartość)","type":"Typ","typeBtn":"Przycisk","typeSbm":"Wyślij","typeRst":"Wyczyść"},"checkboxAndRadio":{"checkboxTitle":"Właściwości pola wyboru (checkbox)","radioTitle":"Właściwości przycisku opcji (radio)","value":"Wartość","selected":"Zaznaczone","required":"Wymagane"},"form":{"title":"Właściwości formularza","menu":"Właściwości formularza","action":"Akcja","method":"Metoda","encoding":"Kodowanie"},"hidden":{"title":"Właściwości pola ukrytego","name":"Nazwa","value":"Wartość"},"select":{"title":"Właściwości listy wyboru","selectInfo":"Informacje","opAvail":"Dostępne opcje","value":"Wartość","size":"Rozmiar","lines":"wierszy","chkMulti":"Wielokrotny wybór","required":"Wymagane","opText":"Tekst","opValue":"Wartość","btnAdd":"Dodaj","btnModify":"Zmień","btnUp":"Do góry","btnDown":"Do dołu","btnSetValue":"Ustaw jako zaznaczoną","btnDelete":"Usuń"},"textarea":{"title":"Właściwości obszaru tekstowego","cols":"Liczba kolumn","rows":"Liczba wierszy"},"textfield":{"title":"Właściwości pola tekstowego","name":"Nazwa","value":"Wartość","charWidth":"Szerokość w znakach","maxChars":"Szerokość maksymalna","required":"Wymagane","type":"Typ","typeText":"Tekst","typePass":"Hasło","typeEmail":"Email","typeSearch":"Szukaj","typeTel":"Numer telefonu","typeUrl":"Adres URL"}},"format":{"label":"Format","panelTitle":"Format","tag_address":"Adres","tag_div":"Normalny (DIV)","tag_h1":"Nagłówek 1","tag_h2":"Nagłówek 2","tag_h3":"Nagłówek 3","tag_h4":"Nagłówek 4","tag_h5":"Nagłówek 5","tag_h6":"Nagłówek 6","tag_p":"Normalny","tag_pre":"Tekst sformatowany"},"horizontalrule":{"toolbar":"Wstaw poziomą linię"},"iframe":{"border":"Pokaż obramowanie obiektu IFrame","noUrl":"Podaj adres URL elementu IFrame","scrolling":"Włącz paski przewijania","title":"Właściwości elementu IFrame","toolbar":"IFrame"},"image":{"alt":"Tekst zastępczy","border":"Obramowanie","btnUpload":"Wyślij","button2Img":"Czy chcesz przekonwertować zaznaczony przycisk graficzny do zwykłego obrazka?","hSpace":"Odstęp poziomy","img2Button":"Czy chcesz przekonwertować zaznaczony obrazek do przycisku graficznego?","infoTab":"Informacje o obrazku","linkTab":"Hiperłącze","lockRatio":"Zablokuj proporcje","menu":"Właściwości obrazka","resetSize":"Przywróć rozmiar","title":"Właściwości obrazka","titleButton":"Właściwości przycisku graficznego","upload":"Wyślij","urlMissing":"Podaj adres URL obrazka.","vSpace":"Odstęp pionowy","validateBorder":"Wartość obramowania musi być liczbą całkowitą.","validateHSpace":"Wartość odstępu poziomego musi być liczbą całkowitą.","validateVSpace":"Wartość odstępu pionowego musi być liczbą całkowitą."},"indent":{"indent":"Zwiększ wcięcie","outdent":"Zmniejsz wcięcie"},"smiley":{"options":"Opcje emotikonów","title":"Wstaw emotikona","toolbar":"Emotikony"},"language":{"button":"Ustaw język","remove":"Usuń język"},"link":{"acccessKey":"Klawisz dostępu","advanced":"Zaawansowane","advisoryContentType":"Typ MIME obiektu docelowego","advisoryTitle":"Opis obiektu docelowego","anchor":{"toolbar":"Wstaw/edytuj kotwicę","menu":"Właściwości kotwicy","title":"Właściwości kotwicy","name":"Nazwa kotwicy","errorName":"Podaj nazwę kotwicy.","remove":"Usuń kotwicę"},"anchorId":"Wg identyfikatora","anchorName":"Wg nazwy","charset":"Kodowanie znaków obiektu docelowego","cssClasses":"Nazwa klasy CSS","download":"Wymuś pobieranie","displayText":"Wyświetlany tekst","emailAddress":"Adres e-mail","emailBody":"Treść","emailSubject":"Temat","id":"Id","info":"Informacje ","langCode":"Kod języka","langDir":"Kierunek tekstu","langDirLTR":"Od lewej do prawej (LTR)","langDirRTL":"Od prawej do lewej (RTL)","menu":"Edytuj odnośnik","name":"Nazwa","noAnchors":"(W dokumencie nie zdefiniowano żadnych kotwic)","noEmail":"Podaj adres e-mail.","noUrl":"Podaj adres URL.","noTel":"Podaj numer telefonu.","other":"<inny>","phoneNumber":"Numer telefonu","popupDependent":"Okno zależne (Netscape)","popupFeatures":"Właściwości wyskakującego okna","popupFullScreen":"Pełny ekran (IE)","popupLeft":"Pozycja w poziomie","popupLocationBar":"Pasek adresu","popupMenuBar":"Pasek menu","popupResizable":"Skalowalny","popupScrollBars":"Paski przewijania","popupStatusBar":"Pasek statusu","popupToolbar":"Pasek narzędzi","popupTop":"Pozycja w pionie","rel":"Relacja","selectAnchor":"Wybierz kotwicę","styles":"Styl","tabIndex":"Indeks kolejności","target":"Obiekt docelowy","targetFrame":"<ramka>","targetFrameName":"Nazwa ramki docelowej","targetPopup":"<wyskakujące okno>","targetPopupName":"Nazwa wyskakującego okna","title":"Odnośnik","toAnchor":"Odnośnik wewnątrz strony (kotwica)","toEmail":"Adres e-mail","toUrl":"Adres URL","toPhone":"Telefon","toolbar":"Wstaw/edytuj odnośnik","type":"Typ odnośnika","unlink":"Usuń odnośnik","upload":"Wyślij"},"list":{"bulletedlist":"Lista wypunktowana","numberedlist":"Lista numerowana"},"liststyle":{"bulletedTitle":"Właściwości list wypunktowanych","circle":"Koło","decimal":"Liczby (1, 2, 3 itd.)","disc":"Okrąg","lowerAlpha":"Małe litery (a, b, c, d, e itd.)","lowerRoman":"Małe cyfry rzymskie (i, ii, iii, iv, v itd.)","none":"Brak","notset":"<nie ustawiono>","numberedTitle":"Właściwości list numerowanych","square":"Kwadrat","start":"Początek","type":"Typ punktora","upperAlpha":"Duże litery (A, B, C, D, E itd.)","upperRoman":"Duże cyfry rzymskie (I, II, III, IV, V itd.)","validateStartNumber":"Listę musi rozpoczynać liczba całkowita."},"magicline":{"title":"Wstaw nowy akapit"},"maximize":{"maximize":"Maksymalizuj","minimize":"Minimalizuj"},"newpage":{"toolbar":"Nowa strona"},"pagebreak":{"alt":"Wstaw podział strony","toolbar":"Wstaw podział strony"},"pastetext":{"button":"Wklej jako czysty tekst","pasteNotification":"Naciśnij %1 by wkleić tekst. Twoja przeglądarka nie obsługuje wklejania za pomocą przycisku paska narzędzi lub opcji menu kontekstowego.","title":"Wklej jako czysty tekst"},"pastefromword":{"confirmCleanup":"Tekst, który chcesz wkleić, prawdopodobnie pochodzi z programu Microsoft Word. Czy chcesz go wyczyścić przed wklejeniem?","error":"Wyczyszczenie wklejonych danych nie było możliwe z powodu wystąpienia błędu.","title":"Wklej z programu MS Word","toolbar":"Wklej z programu MS Word"},"preview":{"preview":"Podgląd"},"print":{"toolbar":"Drukuj"},"removeformat":{"toolbar":"Usuń formatowanie"},"save":{"toolbar":"Zapisz"},"selectall":{"toolbar":"Zaznacz wszystko"},"showblocks":{"toolbar":"Pokaż bloki"},"sourcearea":{"toolbar":"Źródło dokumentu"},"specialchar":{"options":"Opcje znaków specjalnych","title":"Wybierz znak specjalny","toolbar":"Wstaw znak specjalny"},"scayt":{"btn_about":"Informacje o SCAYT","btn_dictionaries":"Słowniki","btn_disable":"Wyłącz SCAYT","btn_enable":"Włącz SCAYT","btn_langs":"Języki","btn_options":"Opcje","text_title":"Sprawdź pisownię podczas pisania (SCAYT)"},"stylescombo":{"label":"Styl","panelTitle":"Style formatujące","panelTitle1":"Style blokowe","panelTitle2":"Style liniowe","panelTitle3":"Style obiektowe"},"table":{"border":"Grubość obramowania","caption":"Tytuł","cell":{"menu":"Komórka","insertBefore":"Wstaw komórkę z lewej","insertAfter":"Wstaw komórkę z prawej","deleteCell":"Usuń komórki","merge":"Połącz komórki","mergeRight":"Połącz z komórką z prawej","mergeDown":"Połącz z komórką poniżej","splitHorizontal":"Podziel komórkę poziomo","splitVertical":"Podziel komórkę pionowo","title":"Właściwości komórki","cellType":"Typ komórki","rowSpan":"Scalenie wierszy","colSpan":"Scalenie komórek","wordWrap":"Zawijanie słów","hAlign":"Wyrównanie poziome","vAlign":"Wyrównanie pionowe","alignBaseline":"Linia bazowa","bgColor":"Kolor tła","borderColor":"Kolor obramowania","data":"Dane","header":"Nagłówek","yes":"Tak","no":"Nie","invalidWidth":"Szerokość komórki musi być liczbą.","invalidHeight":"Wysokość komórki musi być liczbą.","invalidRowSpan":"Scalenie wierszy musi być liczbą całkowitą.","invalidColSpan":"Scalenie komórek musi być liczbą całkowitą.","chooseColor":"Wybierz"},"cellPad":"Dopełnienie komórek","cellSpace":"Odstęp pomiędzy komórkami","column":{"menu":"Kolumna","insertBefore":"Wstaw kolumnę z lewej","insertAfter":"Wstaw kolumnę z prawej","deleteColumn":"Usuń kolumny"},"columns":"Liczba kolumn","deleteTable":"Usuń tabelę","headers":"Nagłówki","headersBoth":"Oba","headersColumn":"Pierwsza kolumna","headersNone":"Brak","headersRow":"Pierwszy wiersz","heightUnit":"height unit","invalidBorder":"Wartość obramowania musi być liczbą.","invalidCellPadding":"Dopełnienie komórek musi być liczbą dodatnią.","invalidCellSpacing":"Odstęp pomiędzy komórkami musi być liczbą dodatnią.","invalidCols":"Liczba kolumn musi być większa niż 0.","invalidHeight":"Wysokość tabeli musi być liczbą.","invalidRows":"Liczba wierszy musi być większa niż 0.","invalidWidth":"Szerokość tabeli musi być liczbą.","menu":"Właściwości tabeli","row":{"menu":"Wiersz","insertBefore":"Wstaw wiersz powyżej","insertAfter":"Wstaw wiersz poniżej","deleteRow":"Usuń wiersze"},"rows":"Liczba wierszy","summary":"Podsumowanie","title":"Właściwości tabeli","toolbar":"Tabela","widthPc":"%","widthPx":"piksele","widthUnit":"jednostka szerokości"},"undo":{"redo":"Ponów","undo":"Cofnij"},"widget":{"move":"Kliknij i przeciągnij, by przenieść.","label":"Widget %1"},"uploadwidget":{"abort":"Wysyłanie przerwane przez użytkownika.","doneOne":"Plik został pomyślnie wysłany.","doneMany":"Pomyślnie wysłane pliki: %1.","uploadOne":"Wysyłanie pliku ({percentage}%)...","uploadMany":"Wysyłanie plików, gotowe {current} z {max} ({percentage}%)..."},"wsc":{"btnIgnore":"Ignoruj","btnIgnoreAll":"Ignoruj wszystkie","btnReplace":"Zmień","btnReplaceAll":"Zmień wszystkie","btnUndo":"Cofnij","changeTo":"Zmień na","errorLoading":"Błąd wczytywania hosta aplikacji usługi: %s.","ieSpellDownload":"Słownik nie jest zainstalowany. Czy chcesz go pobrać?","manyChanges":"Sprawdzanie zakończone: zmieniono %l słów","noChanges":"Sprawdzanie zakończone: nie zmieniono żadnego słowa","noMispell":"Sprawdzanie zakończone: nie znaleziono błędów","noSuggestions":"- Brak sugestii -","notAvailable":"Przepraszamy, ale usługa jest obecnie niedostępna.","notInDic":"Słowa nie ma w słowniku","oneChange":"Sprawdzanie zakończone: zmieniono jedno słowo","progress":"Trwa sprawdzanie...","title":"Sprawdź pisownię","toolbar":"Sprawdź pisownię"}}; | PypiClean |
/githubkit-0.10.7-py3-none-any.whl/githubkit/rest/actions.py | from datetime import datetime
from typing import TYPE_CHECKING, Dict, List, Union, Literal, Optional, overload
from pydantic import BaseModel, parse_obj_as
from githubkit.utils import UNSET, Missing, exclude_unset
from .types import (
SelectedActionsType,
ReviewCustomGatesStateRequiredType,
OrgsOrgActionsVariablesPostBodyType,
OrgsOrgActionsPermissionsPutBodyType,
ReviewCustomGatesCommentRequiredType,
ActionsWorkflowAccessToRepositoryType,
ActionsSetDefaultWorkflowPermissionsType,
OrgsOrgActionsVariablesNamePatchBodyType,
OrgsOrgActionsSecretsSecretNamePutBodyType,
ReposOwnerRepoActionsVariablesPostBodyType,
ReposOwnerRepoActionsPermissionsPutBodyType,
OrgsOrgActionsRunnersRunnerIdLabelsPutBodyType,
OrgsOrgActionsRunnersRunnerIdLabelsPostBodyType,
ReposOwnerRepoActionsJobsJobIdRerunPostBodyType,
ReposOwnerRepoActionsRunsRunIdRerunPostBodyType,
ReposOwnerRepoActionsVariablesNamePatchBodyType,
OrgsOrgActionsPermissionsRepositoriesPutBodyType,
ReposOwnerRepoActionsSecretsSecretNamePutBodyType,
OrgsOrgActionsRunnersGenerateJitconfigPostBodyType,
OrgsOrgActionsVariablesNameRepositoriesPutBodyType,
ReposOwnerRepoActionsOidcCustomizationSubPutBodyType,
ReposOwnerRepoActionsRunnersRunnerIdLabelsPutBodyType,
OrgsOrgActionsSecretsSecretNameRepositoriesPutBodyType,
ReposOwnerRepoActionsRunnersRunnerIdLabelsPostBodyType,
ReposOwnerRepoActionsRunnersGenerateJitconfigPostBodyType,
ReposOwnerRepoActionsRunsRunIdRerunFailedJobsPostBodyType,
ReposOwnerRepoActionsRunsRunIdPendingDeploymentsPostBodyType,
ReposOwnerRepoActionsWorkflowsWorkflowIdDispatchesPostBodyType,
RepositoriesRepositoryIdEnvironmentsEnvironmentNameVariablesPostBodyType,
ReposOwnerRepoActionsWorkflowsWorkflowIdDispatchesPostBodyPropInputsType,
RepositoriesRepositoryIdEnvironmentsEnvironmentNameVariablesNamePatchBodyType,
RepositoriesRepositoryIdEnvironmentsEnvironmentNameSecretsSecretNamePutBodyType,
)
from .models import (
Job,
Runner,
Artifact,
Workflow,
BasicError,
Deployment,
EmptyObject,
WorkflowRun,
ActionsSecret,
WorkflowUsage,
ActionsVariable,
SelectedActions,
ActionsCacheList,
ActionsPublicKey,
WorkflowRunUsage,
OidcCustomSubRepo,
PendingDeployment,
RunnerApplication,
AuthenticationToken,
EnvironmentApprovals,
ValidationErrorSimple,
OrganizationActionsSecret,
OrganizationActionsVariable,
ActionsRepositoryPermissions,
ActionsCacheUsageByRepository,
ActionsCacheUsageOrgEnterprise,
ActionsOrganizationPermissions,
ReviewCustomGatesStateRequired,
OrgsOrgActionsVariablesPostBody,
OrgsOrgActionsPermissionsPutBody,
ReviewCustomGatesCommentRequired,
ActionsWorkflowAccessToRepository,
OrgsOrgActionsRunnersGetResponse200,
OrgsOrgActionsSecretsGetResponse200,
ActionsGetDefaultWorkflowPermissions,
ActionsSetDefaultWorkflowPermissions,
OrgsOrgActionsVariablesNamePatchBody,
OrgsOrgActionsVariablesGetResponse200,
OrgsOrgActionsSecretsSecretNamePutBody,
ReposOwnerRepoActionsVariablesPostBody,
ReposOwnerRepoActionsPermissionsPutBody,
ReposOwnerRepoActionsRunsGetResponse200,
OrgsOrgActionsRunnersRunnerIdLabelsPutBody,
ReposOwnerRepoActionsRunnersGetResponse200,
ReposOwnerRepoActionsSecretsGetResponse200,
OrgsOrgActionsRunnersRunnerIdLabelsPostBody,
ReposOwnerRepoActionsJobsJobIdRerunPostBody,
ReposOwnerRepoActionsRunsRunIdRerunPostBody,
ReposOwnerRepoActionsVariablesNamePatchBody,
OrgsOrgActionsPermissionsRepositoriesPutBody,
ReposOwnerRepoActionsArtifactsGetResponse200,
ReposOwnerRepoActionsVariablesGetResponse200,
ReposOwnerRepoActionsWorkflowsGetResponse200,
ReposOwnerRepoActionsSecretsSecretNamePutBody,
OrgsOrgActionsRunnersGenerateJitconfigPostBody,
OrgsOrgActionsVariablesNameRepositoriesPutBody,
ReposOwnerRepoActionsOidcCustomizationSubPutBody,
ReposOwnerRepoActionsRunsRunIdJobsGetResponse200,
OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200,
ReposOwnerRepoActionsRunnersRunnerIdLabelsPutBody,
OrgsOrgActionsCacheUsageByRepositoryGetResponse200,
OrgsOrgActionsSecretsSecretNameRepositoriesPutBody,
ReposOwnerRepoActionsRunnersRunnerIdLabelsPostBody,
OrgsOrgActionsPermissionsRepositoriesGetResponse200,
OrgsOrgActionsRunnersRunnerIdLabelsDeleteResponse200,
OrgsOrgActionsRunnersGenerateJitconfigPostResponse201,
OrgsOrgActionsVariablesNameRepositoriesGetResponse200,
ReposOwnerRepoActionsRunnersGenerateJitconfigPostBody,
ReposOwnerRepoActionsRunsRunIdArtifactsGetResponse200,
ReposOwnerRepoActionsRunsRunIdRerunFailedJobsPostBody,
ReposOwnerRepoActionsOrganizationSecretsGetResponse200,
ReposOwnerRepoActionsOrganizationVariablesGetResponse200,
ReposOwnerRepoActionsRunsRunIdPendingDeploymentsPostBody,
OrgsOrgActionsSecretsSecretNameRepositoriesGetResponse200,
ReposOwnerRepoActionsWorkflowsWorkflowIdDispatchesPostBody,
ReposOwnerRepoActionsWorkflowsWorkflowIdRunsGetResponse200,
RepositoriesRepositoryIdEnvironmentsEnvironmentNameVariablesPostBody,
ReposOwnerRepoActionsRunsRunIdAttemptsAttemptNumberJobsGetResponse200,
RepositoriesRepositoryIdEnvironmentsEnvironmentNameSecretsGetResponse200,
RepositoriesRepositoryIdEnvironmentsEnvironmentNameVariablesNamePatchBody,
RepositoriesRepositoryIdEnvironmentsEnvironmentNameVariablesGetResponse200,
RepositoriesRepositoryIdEnvironmentsEnvironmentNameSecretsSecretNamePutBody,
)
if TYPE_CHECKING:
from githubkit import GitHubCore
from githubkit.response import Response
class ActionsClient:
_REST_API_VERSION = "2022-11-28"
def __init__(self, github: "GitHubCore"):
self._github = github
def get_actions_cache_usage_for_org(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsCacheUsageOrgEnterprise]":
url = f"/orgs/{org}/actions/cache/usage"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsCacheUsageOrgEnterprise,
)
async def async_get_actions_cache_usage_for_org(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsCacheUsageOrgEnterprise]":
url = f"/orgs/{org}/actions/cache/usage"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsCacheUsageOrgEnterprise,
)
def get_actions_cache_usage_by_repo_for_org(
self,
org: str,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsCacheUsageByRepositoryGetResponse200]":
url = f"/orgs/{org}/actions/cache/usage-by-repository"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsCacheUsageByRepositoryGetResponse200,
)
async def async_get_actions_cache_usage_by_repo_for_org(
self,
org: str,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsCacheUsageByRepositoryGetResponse200]":
url = f"/orgs/{org}/actions/cache/usage-by-repository"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsCacheUsageByRepositoryGetResponse200,
)
def get_github_actions_permissions_organization(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsOrganizationPermissions]":
url = f"/orgs/{org}/actions/permissions"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsOrganizationPermissions,
)
async def async_get_github_actions_permissions_organization(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsOrganizationPermissions]":
url = f"/orgs/{org}/actions/permissions"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsOrganizationPermissions,
)
@overload
def set_github_actions_permissions_organization(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
data: OrgsOrgActionsPermissionsPutBodyType,
) -> "Response":
...
@overload
def set_github_actions_permissions_organization(
self,
org: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
enabled_repositories: Literal["all", "none", "selected"],
allowed_actions: Missing[Literal["all", "local_only", "selected"]] = UNSET,
) -> "Response":
...
def set_github_actions_permissions_organization(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[OrgsOrgActionsPermissionsPutBodyType] = UNSET,
**kwargs,
) -> "Response":
url = f"/orgs/{org}/actions/permissions"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(OrgsOrgActionsPermissionsPutBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
@overload
async def async_set_github_actions_permissions_organization(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
data: OrgsOrgActionsPermissionsPutBodyType,
) -> "Response":
...
@overload
async def async_set_github_actions_permissions_organization(
self,
org: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
enabled_repositories: Literal["all", "none", "selected"],
allowed_actions: Missing[Literal["all", "local_only", "selected"]] = UNSET,
) -> "Response":
...
async def async_set_github_actions_permissions_organization(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[OrgsOrgActionsPermissionsPutBodyType] = UNSET,
**kwargs,
) -> "Response":
url = f"/orgs/{org}/actions/permissions"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(OrgsOrgActionsPermissionsPutBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
def list_selected_repositories_enabled_github_actions_organization(
self,
org: str,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsPermissionsRepositoriesGetResponse200]":
url = f"/orgs/{org}/actions/permissions/repositories"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsPermissionsRepositoriesGetResponse200,
)
async def async_list_selected_repositories_enabled_github_actions_organization(
self,
org: str,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsPermissionsRepositoriesGetResponse200]":
url = f"/orgs/{org}/actions/permissions/repositories"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsPermissionsRepositoriesGetResponse200,
)
@overload
def set_selected_repositories_enabled_github_actions_organization(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
data: OrgsOrgActionsPermissionsRepositoriesPutBodyType,
) -> "Response":
...
@overload
def set_selected_repositories_enabled_github_actions_organization(
self,
org: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
selected_repository_ids: List[int],
) -> "Response":
...
def set_selected_repositories_enabled_github_actions_organization(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[OrgsOrgActionsPermissionsRepositoriesPutBodyType] = UNSET,
**kwargs,
) -> "Response":
url = f"/orgs/{org}/actions/permissions/repositories"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(OrgsOrgActionsPermissionsRepositoriesPutBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
@overload
async def async_set_selected_repositories_enabled_github_actions_organization(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
data: OrgsOrgActionsPermissionsRepositoriesPutBodyType,
) -> "Response":
...
@overload
async def async_set_selected_repositories_enabled_github_actions_organization(
self,
org: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
selected_repository_ids: List[int],
) -> "Response":
...
async def async_set_selected_repositories_enabled_github_actions_organization(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[OrgsOrgActionsPermissionsRepositoriesPutBodyType] = UNSET,
**kwargs,
) -> "Response":
url = f"/orgs/{org}/actions/permissions/repositories"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(OrgsOrgActionsPermissionsRepositoriesPutBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
def enable_selected_repository_github_actions_organization(
self,
org: str,
repository_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/orgs/{org}/actions/permissions/repositories/{repository_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"PUT",
url,
headers=exclude_unset(headers),
)
async def async_enable_selected_repository_github_actions_organization(
self,
org: str,
repository_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/orgs/{org}/actions/permissions/repositories/{repository_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"PUT",
url,
headers=exclude_unset(headers),
)
def disable_selected_repository_github_actions_organization(
self,
org: str,
repository_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/orgs/{org}/actions/permissions/repositories/{repository_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"DELETE",
url,
headers=exclude_unset(headers),
)
async def async_disable_selected_repository_github_actions_organization(
self,
org: str,
repository_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/orgs/{org}/actions/permissions/repositories/{repository_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"DELETE",
url,
headers=exclude_unset(headers),
)
def get_allowed_actions_organization(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[SelectedActions]":
url = f"/orgs/{org}/actions/permissions/selected-actions"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=SelectedActions,
)
async def async_get_allowed_actions_organization(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[SelectedActions]":
url = f"/orgs/{org}/actions/permissions/selected-actions"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=SelectedActions,
)
@overload
def set_allowed_actions_organization(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[SelectedActionsType] = UNSET,
) -> "Response":
...
@overload
def set_allowed_actions_organization(
self,
org: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
github_owned_allowed: Missing[bool] = UNSET,
verified_allowed: Missing[bool] = UNSET,
patterns_allowed: Missing[List[str]] = UNSET,
) -> "Response":
...
def set_allowed_actions_organization(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[SelectedActionsType] = UNSET,
**kwargs,
) -> "Response":
url = f"/orgs/{org}/actions/permissions/selected-actions"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(SelectedActions, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
@overload
async def async_set_allowed_actions_organization(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[SelectedActionsType] = UNSET,
) -> "Response":
...
@overload
async def async_set_allowed_actions_organization(
self,
org: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
github_owned_allowed: Missing[bool] = UNSET,
verified_allowed: Missing[bool] = UNSET,
patterns_allowed: Missing[List[str]] = UNSET,
) -> "Response":
...
async def async_set_allowed_actions_organization(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[SelectedActionsType] = UNSET,
**kwargs,
) -> "Response":
url = f"/orgs/{org}/actions/permissions/selected-actions"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(SelectedActions, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
def get_github_actions_default_workflow_permissions_organization(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsGetDefaultWorkflowPermissions]":
url = f"/orgs/{org}/actions/permissions/workflow"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsGetDefaultWorkflowPermissions,
)
async def async_get_github_actions_default_workflow_permissions_organization(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsGetDefaultWorkflowPermissions]":
url = f"/orgs/{org}/actions/permissions/workflow"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsGetDefaultWorkflowPermissions,
)
@overload
def set_github_actions_default_workflow_permissions_organization(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[ActionsSetDefaultWorkflowPermissionsType] = UNSET,
) -> "Response":
...
@overload
def set_github_actions_default_workflow_permissions_organization(
self,
org: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
default_workflow_permissions: Missing[Literal["read", "write"]] = UNSET,
can_approve_pull_request_reviews: Missing[bool] = UNSET,
) -> "Response":
...
def set_github_actions_default_workflow_permissions_organization(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[ActionsSetDefaultWorkflowPermissionsType] = UNSET,
**kwargs,
) -> "Response":
url = f"/orgs/{org}/actions/permissions/workflow"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(ActionsSetDefaultWorkflowPermissions, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
@overload
async def async_set_github_actions_default_workflow_permissions_organization(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[ActionsSetDefaultWorkflowPermissionsType] = UNSET,
) -> "Response":
...
@overload
async def async_set_github_actions_default_workflow_permissions_organization(
self,
org: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
default_workflow_permissions: Missing[Literal["read", "write"]] = UNSET,
can_approve_pull_request_reviews: Missing[bool] = UNSET,
) -> "Response":
...
async def async_set_github_actions_default_workflow_permissions_organization(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[ActionsSetDefaultWorkflowPermissionsType] = UNSET,
**kwargs,
) -> "Response":
url = f"/orgs/{org}/actions/permissions/workflow"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(ActionsSetDefaultWorkflowPermissions, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
def list_self_hosted_runners_for_org(
self,
org: str,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsRunnersGetResponse200]":
url = f"/orgs/{org}/actions/runners"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersGetResponse200,
)
async def async_list_self_hosted_runners_for_org(
self,
org: str,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsRunnersGetResponse200]":
url = f"/orgs/{org}/actions/runners"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersGetResponse200,
)
def list_runner_applications_for_org(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[List[RunnerApplication]]":
url = f"/orgs/{org}/actions/runners/downloads"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=List[RunnerApplication],
)
async def async_list_runner_applications_for_org(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[List[RunnerApplication]]":
url = f"/orgs/{org}/actions/runners/downloads"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=List[RunnerApplication],
)
@overload
def generate_runner_jitconfig_for_org(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
data: OrgsOrgActionsRunnersGenerateJitconfigPostBodyType,
) -> "Response[OrgsOrgActionsRunnersGenerateJitconfigPostResponse201]":
...
@overload
def generate_runner_jitconfig_for_org(
self,
org: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
name: str,
runner_group_id: int,
labels: List[str],
work_folder: Missing[str] = "_work",
) -> "Response[OrgsOrgActionsRunnersGenerateJitconfigPostResponse201]":
...
def generate_runner_jitconfig_for_org(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[OrgsOrgActionsRunnersGenerateJitconfigPostBodyType] = UNSET,
**kwargs,
) -> "Response[OrgsOrgActionsRunnersGenerateJitconfigPostResponse201]":
url = f"/orgs/{org}/actions/runners/generate-jitconfig"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(OrgsOrgActionsRunnersGenerateJitconfigPostBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersGenerateJitconfigPostResponse201,
error_models={
"404": BasicError,
"422": ValidationErrorSimple,
},
)
@overload
async def async_generate_runner_jitconfig_for_org(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
data: OrgsOrgActionsRunnersGenerateJitconfigPostBodyType,
) -> "Response[OrgsOrgActionsRunnersGenerateJitconfigPostResponse201]":
...
@overload
async def async_generate_runner_jitconfig_for_org(
self,
org: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
name: str,
runner_group_id: int,
labels: List[str],
work_folder: Missing[str] = "_work",
) -> "Response[OrgsOrgActionsRunnersGenerateJitconfigPostResponse201]":
...
async def async_generate_runner_jitconfig_for_org(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[OrgsOrgActionsRunnersGenerateJitconfigPostBodyType] = UNSET,
**kwargs,
) -> "Response[OrgsOrgActionsRunnersGenerateJitconfigPostResponse201]":
url = f"/orgs/{org}/actions/runners/generate-jitconfig"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(OrgsOrgActionsRunnersGenerateJitconfigPostBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersGenerateJitconfigPostResponse201,
error_models={
"404": BasicError,
"422": ValidationErrorSimple,
},
)
def create_registration_token_for_org(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[AuthenticationToken]":
url = f"/orgs/{org}/actions/runners/registration-token"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"POST",
url,
headers=exclude_unset(headers),
response_model=AuthenticationToken,
)
async def async_create_registration_token_for_org(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[AuthenticationToken]":
url = f"/orgs/{org}/actions/runners/registration-token"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"POST",
url,
headers=exclude_unset(headers),
response_model=AuthenticationToken,
)
def create_remove_token_for_org(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[AuthenticationToken]":
url = f"/orgs/{org}/actions/runners/remove-token"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"POST",
url,
headers=exclude_unset(headers),
response_model=AuthenticationToken,
)
async def async_create_remove_token_for_org(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[AuthenticationToken]":
url = f"/orgs/{org}/actions/runners/remove-token"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"POST",
url,
headers=exclude_unset(headers),
response_model=AuthenticationToken,
)
def get_self_hosted_runner_for_org(
self,
org: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[Runner]":
url = f"/orgs/{org}/actions/runners/{runner_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=Runner,
)
async def async_get_self_hosted_runner_for_org(
self,
org: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[Runner]":
url = f"/orgs/{org}/actions/runners/{runner_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=Runner,
)
def delete_self_hosted_runner_from_org(
self,
org: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/orgs/{org}/actions/runners/{runner_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"DELETE",
url,
headers=exclude_unset(headers),
)
async def async_delete_self_hosted_runner_from_org(
self,
org: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/orgs/{org}/actions/runners/{runner_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"DELETE",
url,
headers=exclude_unset(headers),
)
def list_labels_for_self_hosted_runner_for_org(
self,
org: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
url = f"/orgs/{org}/actions/runners/{runner_id}/labels"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200,
error_models={
"404": BasicError,
},
)
async def async_list_labels_for_self_hosted_runner_for_org(
self,
org: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
url = f"/orgs/{org}/actions/runners/{runner_id}/labels"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200,
error_models={
"404": BasicError,
},
)
@overload
def set_custom_labels_for_self_hosted_runner_for_org(
self,
org: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: OrgsOrgActionsRunnersRunnerIdLabelsPutBodyType,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
...
@overload
def set_custom_labels_for_self_hosted_runner_for_org(
self,
org: str,
runner_id: int,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
labels: List[str],
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
...
def set_custom_labels_for_self_hosted_runner_for_org(
self,
org: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[OrgsOrgActionsRunnersRunnerIdLabelsPutBodyType] = UNSET,
**kwargs,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
url = f"/orgs/{org}/actions/runners/{runner_id}/labels"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(OrgsOrgActionsRunnersRunnerIdLabelsPutBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200,
error_models={
"404": BasicError,
"422": ValidationErrorSimple,
},
)
@overload
async def async_set_custom_labels_for_self_hosted_runner_for_org(
self,
org: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: OrgsOrgActionsRunnersRunnerIdLabelsPutBodyType,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
...
@overload
async def async_set_custom_labels_for_self_hosted_runner_for_org(
self,
org: str,
runner_id: int,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
labels: List[str],
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
...
async def async_set_custom_labels_for_self_hosted_runner_for_org(
self,
org: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[OrgsOrgActionsRunnersRunnerIdLabelsPutBodyType] = UNSET,
**kwargs,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
url = f"/orgs/{org}/actions/runners/{runner_id}/labels"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(OrgsOrgActionsRunnersRunnerIdLabelsPutBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200,
error_models={
"404": BasicError,
"422": ValidationErrorSimple,
},
)
@overload
def add_custom_labels_to_self_hosted_runner_for_org(
self,
org: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: OrgsOrgActionsRunnersRunnerIdLabelsPostBodyType,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
...
@overload
def add_custom_labels_to_self_hosted_runner_for_org(
self,
org: str,
runner_id: int,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
labels: List[str],
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
...
def add_custom_labels_to_self_hosted_runner_for_org(
self,
org: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[OrgsOrgActionsRunnersRunnerIdLabelsPostBodyType] = UNSET,
**kwargs,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
url = f"/orgs/{org}/actions/runners/{runner_id}/labels"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(OrgsOrgActionsRunnersRunnerIdLabelsPostBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200,
error_models={
"404": BasicError,
"422": ValidationErrorSimple,
},
)
@overload
async def async_add_custom_labels_to_self_hosted_runner_for_org(
self,
org: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: OrgsOrgActionsRunnersRunnerIdLabelsPostBodyType,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
...
@overload
async def async_add_custom_labels_to_self_hosted_runner_for_org(
self,
org: str,
runner_id: int,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
labels: List[str],
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
...
async def async_add_custom_labels_to_self_hosted_runner_for_org(
self,
org: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[OrgsOrgActionsRunnersRunnerIdLabelsPostBodyType] = UNSET,
**kwargs,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
url = f"/orgs/{org}/actions/runners/{runner_id}/labels"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(OrgsOrgActionsRunnersRunnerIdLabelsPostBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200,
error_models={
"404": BasicError,
"422": ValidationErrorSimple,
},
)
def remove_all_custom_labels_from_self_hosted_runner_for_org(
self,
org: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsDeleteResponse200]":
url = f"/orgs/{org}/actions/runners/{runner_id}/labels"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"DELETE",
url,
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersRunnerIdLabelsDeleteResponse200,
error_models={
"404": BasicError,
},
)
async def async_remove_all_custom_labels_from_self_hosted_runner_for_org(
self,
org: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsDeleteResponse200]":
url = f"/orgs/{org}/actions/runners/{runner_id}/labels"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"DELETE",
url,
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersRunnerIdLabelsDeleteResponse200,
error_models={
"404": BasicError,
},
)
def remove_custom_label_from_self_hosted_runner_for_org(
self,
org: str,
runner_id: int,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
url = f"/orgs/{org}/actions/runners/{runner_id}/labels/{name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"DELETE",
url,
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200,
error_models={
"404": BasicError,
"422": ValidationErrorSimple,
},
)
async def async_remove_custom_label_from_self_hosted_runner_for_org(
self,
org: str,
runner_id: int,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
url = f"/orgs/{org}/actions/runners/{runner_id}/labels/{name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"DELETE",
url,
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200,
error_models={
"404": BasicError,
"422": ValidationErrorSimple,
},
)
def list_org_secrets(
self,
org: str,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsSecretsGetResponse200]":
url = f"/orgs/{org}/actions/secrets"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsSecretsGetResponse200,
)
async def async_list_org_secrets(
self,
org: str,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsSecretsGetResponse200]":
url = f"/orgs/{org}/actions/secrets"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsSecretsGetResponse200,
)
def get_org_public_key(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsPublicKey]":
url = f"/orgs/{org}/actions/secrets/public-key"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsPublicKey,
)
async def async_get_org_public_key(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsPublicKey]":
url = f"/orgs/{org}/actions/secrets/public-key"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsPublicKey,
)
def get_org_secret(
self,
org: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrganizationActionsSecret]":
url = f"/orgs/{org}/actions/secrets/{secret_name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=OrganizationActionsSecret,
)
async def async_get_org_secret(
self,
org: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrganizationActionsSecret]":
url = f"/orgs/{org}/actions/secrets/{secret_name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=OrganizationActionsSecret,
)
@overload
def create_or_update_org_secret(
self,
org: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: OrgsOrgActionsSecretsSecretNamePutBodyType,
) -> "Response[EmptyObject]":
...
@overload
def create_or_update_org_secret(
self,
org: str,
secret_name: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
encrypted_value: Missing[str] = UNSET,
key_id: Missing[str] = UNSET,
visibility: Literal["all", "private", "selected"],
selected_repository_ids: Missing[List[int]] = UNSET,
) -> "Response[EmptyObject]":
...
def create_or_update_org_secret(
self,
org: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[OrgsOrgActionsSecretsSecretNamePutBodyType] = UNSET,
**kwargs,
) -> "Response[EmptyObject]":
url = f"/orgs/{org}/actions/secrets/{secret_name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(OrgsOrgActionsSecretsSecretNamePutBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=EmptyObject,
)
@overload
async def async_create_or_update_org_secret(
self,
org: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: OrgsOrgActionsSecretsSecretNamePutBodyType,
) -> "Response[EmptyObject]":
...
@overload
async def async_create_or_update_org_secret(
self,
org: str,
secret_name: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
encrypted_value: Missing[str] = UNSET,
key_id: Missing[str] = UNSET,
visibility: Literal["all", "private", "selected"],
selected_repository_ids: Missing[List[int]] = UNSET,
) -> "Response[EmptyObject]":
...
async def async_create_or_update_org_secret(
self,
org: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[OrgsOrgActionsSecretsSecretNamePutBodyType] = UNSET,
**kwargs,
) -> "Response[EmptyObject]":
url = f"/orgs/{org}/actions/secrets/{secret_name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(OrgsOrgActionsSecretsSecretNamePutBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=EmptyObject,
)
def delete_org_secret(
self,
org: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/orgs/{org}/actions/secrets/{secret_name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"DELETE",
url,
headers=exclude_unset(headers),
)
async def async_delete_org_secret(
self,
org: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/orgs/{org}/actions/secrets/{secret_name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"DELETE",
url,
headers=exclude_unset(headers),
)
def list_selected_repos_for_org_secret(
self,
org: str,
secret_name: str,
page: Missing[int] = 1,
per_page: Missing[int] = 30,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsSecretsSecretNameRepositoriesGetResponse200]":
url = f"/orgs/{org}/actions/secrets/{secret_name}/repositories"
params = {
"page": page,
"per_page": per_page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsSecretsSecretNameRepositoriesGetResponse200,
)
async def async_list_selected_repos_for_org_secret(
self,
org: str,
secret_name: str,
page: Missing[int] = 1,
per_page: Missing[int] = 30,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsSecretsSecretNameRepositoriesGetResponse200]":
url = f"/orgs/{org}/actions/secrets/{secret_name}/repositories"
params = {
"page": page,
"per_page": per_page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsSecretsSecretNameRepositoriesGetResponse200,
)
@overload
def set_selected_repos_for_org_secret(
self,
org: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: OrgsOrgActionsSecretsSecretNameRepositoriesPutBodyType,
) -> "Response":
...
@overload
def set_selected_repos_for_org_secret(
self,
org: str,
secret_name: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
selected_repository_ids: List[int],
) -> "Response":
...
def set_selected_repos_for_org_secret(
self,
org: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[OrgsOrgActionsSecretsSecretNameRepositoriesPutBodyType] = UNSET,
**kwargs,
) -> "Response":
url = f"/orgs/{org}/actions/secrets/{secret_name}/repositories"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(OrgsOrgActionsSecretsSecretNameRepositoriesPutBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
@overload
async def async_set_selected_repos_for_org_secret(
self,
org: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: OrgsOrgActionsSecretsSecretNameRepositoriesPutBodyType,
) -> "Response":
...
@overload
async def async_set_selected_repos_for_org_secret(
self,
org: str,
secret_name: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
selected_repository_ids: List[int],
) -> "Response":
...
async def async_set_selected_repos_for_org_secret(
self,
org: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[OrgsOrgActionsSecretsSecretNameRepositoriesPutBodyType] = UNSET,
**kwargs,
) -> "Response":
url = f"/orgs/{org}/actions/secrets/{secret_name}/repositories"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(OrgsOrgActionsSecretsSecretNameRepositoriesPutBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
def add_selected_repo_to_org_secret(
self,
org: str,
secret_name: str,
repository_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/orgs/{org}/actions/secrets/{secret_name}/repositories/{repository_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"PUT",
url,
headers=exclude_unset(headers),
error_models={},
)
async def async_add_selected_repo_to_org_secret(
self,
org: str,
secret_name: str,
repository_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/orgs/{org}/actions/secrets/{secret_name}/repositories/{repository_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"PUT",
url,
headers=exclude_unset(headers),
error_models={},
)
def remove_selected_repo_from_org_secret(
self,
org: str,
secret_name: str,
repository_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/orgs/{org}/actions/secrets/{secret_name}/repositories/{repository_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"DELETE",
url,
headers=exclude_unset(headers),
error_models={},
)
async def async_remove_selected_repo_from_org_secret(
self,
org: str,
secret_name: str,
repository_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/orgs/{org}/actions/secrets/{secret_name}/repositories/{repository_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"DELETE",
url,
headers=exclude_unset(headers),
error_models={},
)
def list_org_variables(
self,
org: str,
per_page: Missing[int] = 10,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsVariablesGetResponse200]":
url = f"/orgs/{org}/actions/variables"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsVariablesGetResponse200,
)
async def async_list_org_variables(
self,
org: str,
per_page: Missing[int] = 10,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsVariablesGetResponse200]":
url = f"/orgs/{org}/actions/variables"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsVariablesGetResponse200,
)
@overload
def create_org_variable(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
data: OrgsOrgActionsVariablesPostBodyType,
) -> "Response[EmptyObject]":
...
@overload
def create_org_variable(
self,
org: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
name: str,
value: str,
visibility: Literal["all", "private", "selected"],
selected_repository_ids: Missing[List[int]] = UNSET,
) -> "Response[EmptyObject]":
...
def create_org_variable(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[OrgsOrgActionsVariablesPostBodyType] = UNSET,
**kwargs,
) -> "Response[EmptyObject]":
url = f"/orgs/{org}/actions/variables"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(OrgsOrgActionsVariablesPostBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=EmptyObject,
)
@overload
async def async_create_org_variable(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
data: OrgsOrgActionsVariablesPostBodyType,
) -> "Response[EmptyObject]":
...
@overload
async def async_create_org_variable(
self,
org: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
name: str,
value: str,
visibility: Literal["all", "private", "selected"],
selected_repository_ids: Missing[List[int]] = UNSET,
) -> "Response[EmptyObject]":
...
async def async_create_org_variable(
self,
org: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[OrgsOrgActionsVariablesPostBodyType] = UNSET,
**kwargs,
) -> "Response[EmptyObject]":
url = f"/orgs/{org}/actions/variables"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(OrgsOrgActionsVariablesPostBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=EmptyObject,
)
def get_org_variable(
self,
org: str,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrganizationActionsVariable]":
url = f"/orgs/{org}/actions/variables/{name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=OrganizationActionsVariable,
)
async def async_get_org_variable(
self,
org: str,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrganizationActionsVariable]":
url = f"/orgs/{org}/actions/variables/{name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=OrganizationActionsVariable,
)
def delete_org_variable(
self,
org: str,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/orgs/{org}/actions/variables/{name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"DELETE",
url,
headers=exclude_unset(headers),
)
async def async_delete_org_variable(
self,
org: str,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/orgs/{org}/actions/variables/{name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"DELETE",
url,
headers=exclude_unset(headers),
)
@overload
def update_org_variable(
self,
org: str,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: OrgsOrgActionsVariablesNamePatchBodyType,
) -> "Response":
...
@overload
def update_org_variable(
self,
org: str,
name: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
value: Missing[str] = UNSET,
visibility: Missing[Literal["all", "private", "selected"]] = UNSET,
selected_repository_ids: Missing[List[int]] = UNSET,
) -> "Response":
...
def update_org_variable(
self,
org: str,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[OrgsOrgActionsVariablesNamePatchBodyType] = UNSET,
**kwargs,
) -> "Response":
url = f"/orgs/{org}/actions/variables/{name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(OrgsOrgActionsVariablesNamePatchBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"PATCH",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
@overload
async def async_update_org_variable(
self,
org: str,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: OrgsOrgActionsVariablesNamePatchBodyType,
) -> "Response":
...
@overload
async def async_update_org_variable(
self,
org: str,
name: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
value: Missing[str] = UNSET,
visibility: Missing[Literal["all", "private", "selected"]] = UNSET,
selected_repository_ids: Missing[List[int]] = UNSET,
) -> "Response":
...
async def async_update_org_variable(
self,
org: str,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[OrgsOrgActionsVariablesNamePatchBodyType] = UNSET,
**kwargs,
) -> "Response":
url = f"/orgs/{org}/actions/variables/{name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(OrgsOrgActionsVariablesNamePatchBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"PATCH",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
def list_selected_repos_for_org_variable(
self,
org: str,
name: str,
page: Missing[int] = 1,
per_page: Missing[int] = 30,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsVariablesNameRepositoriesGetResponse200]":
url = f"/orgs/{org}/actions/variables/{name}/repositories"
params = {
"page": page,
"per_page": per_page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsVariablesNameRepositoriesGetResponse200,
error_models={},
)
async def async_list_selected_repos_for_org_variable(
self,
org: str,
name: str,
page: Missing[int] = 1,
per_page: Missing[int] = 30,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsVariablesNameRepositoriesGetResponse200]":
url = f"/orgs/{org}/actions/variables/{name}/repositories"
params = {
"page": page,
"per_page": per_page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsVariablesNameRepositoriesGetResponse200,
error_models={},
)
@overload
def set_selected_repos_for_org_variable(
self,
org: str,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: OrgsOrgActionsVariablesNameRepositoriesPutBodyType,
) -> "Response":
...
@overload
def set_selected_repos_for_org_variable(
self,
org: str,
name: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
selected_repository_ids: List[int],
) -> "Response":
...
def set_selected_repos_for_org_variable(
self,
org: str,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[OrgsOrgActionsVariablesNameRepositoriesPutBodyType] = UNSET,
**kwargs,
) -> "Response":
url = f"/orgs/{org}/actions/variables/{name}/repositories"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(OrgsOrgActionsVariablesNameRepositoriesPutBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
error_models={},
)
@overload
async def async_set_selected_repos_for_org_variable(
self,
org: str,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: OrgsOrgActionsVariablesNameRepositoriesPutBodyType,
) -> "Response":
...
@overload
async def async_set_selected_repos_for_org_variable(
self,
org: str,
name: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
selected_repository_ids: List[int],
) -> "Response":
...
async def async_set_selected_repos_for_org_variable(
self,
org: str,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[OrgsOrgActionsVariablesNameRepositoriesPutBodyType] = UNSET,
**kwargs,
) -> "Response":
url = f"/orgs/{org}/actions/variables/{name}/repositories"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(OrgsOrgActionsVariablesNameRepositoriesPutBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
error_models={},
)
def add_selected_repo_to_org_variable(
self,
org: str,
name: str,
repository_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/orgs/{org}/actions/variables/{name}/repositories/{repository_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"PUT",
url,
headers=exclude_unset(headers),
error_models={},
)
async def async_add_selected_repo_to_org_variable(
self,
org: str,
name: str,
repository_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/orgs/{org}/actions/variables/{name}/repositories/{repository_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"PUT",
url,
headers=exclude_unset(headers),
error_models={},
)
def remove_selected_repo_from_org_variable(
self,
org: str,
name: str,
repository_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/orgs/{org}/actions/variables/{name}/repositories/{repository_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"DELETE",
url,
headers=exclude_unset(headers),
error_models={},
)
async def async_remove_selected_repo_from_org_variable(
self,
org: str,
name: str,
repository_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/orgs/{org}/actions/variables/{name}/repositories/{repository_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"DELETE",
url,
headers=exclude_unset(headers),
error_models={},
)
def list_artifacts_for_repo(
self,
owner: str,
repo: str,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
name: Missing[str] = UNSET,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ReposOwnerRepoActionsArtifactsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/artifacts"
params = {
"per_page": per_page,
"page": page,
"name": name,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ReposOwnerRepoActionsArtifactsGetResponse200,
)
async def async_list_artifacts_for_repo(
self,
owner: str,
repo: str,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
name: Missing[str] = UNSET,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ReposOwnerRepoActionsArtifactsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/artifacts"
params = {
"per_page": per_page,
"page": page,
"name": name,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ReposOwnerRepoActionsArtifactsGetResponse200,
)
def get_artifact(
self,
owner: str,
repo: str,
artifact_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[Artifact]":
url = f"/repos/{owner}/{repo}/actions/artifacts/{artifact_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=Artifact,
)
async def async_get_artifact(
self,
owner: str,
repo: str,
artifact_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[Artifact]":
url = f"/repos/{owner}/{repo}/actions/artifacts/{artifact_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=Artifact,
)
def delete_artifact(
self,
owner: str,
repo: str,
artifact_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/artifacts/{artifact_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"DELETE",
url,
headers=exclude_unset(headers),
)
async def async_delete_artifact(
self,
owner: str,
repo: str,
artifact_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/artifacts/{artifact_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"DELETE",
url,
headers=exclude_unset(headers),
)
def download_artifact(
self,
owner: str,
repo: str,
artifact_id: int,
archive_format: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/artifacts/{artifact_id}/{archive_format}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
error_models={
"410": BasicError,
},
)
async def async_download_artifact(
self,
owner: str,
repo: str,
artifact_id: int,
archive_format: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/artifacts/{artifact_id}/{archive_format}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
error_models={
"410": BasicError,
},
)
def get_actions_cache_usage(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsCacheUsageByRepository]":
url = f"/repos/{owner}/{repo}/actions/cache/usage"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsCacheUsageByRepository,
)
async def async_get_actions_cache_usage(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsCacheUsageByRepository]":
url = f"/repos/{owner}/{repo}/actions/cache/usage"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsCacheUsageByRepository,
)
def get_actions_cache_list(
self,
owner: str,
repo: str,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
ref: Missing[str] = UNSET,
key: Missing[str] = UNSET,
sort: Missing[
Literal["created_at", "last_accessed_at", "size_in_bytes"]
] = "last_accessed_at",
direction: Missing[Literal["asc", "desc"]] = "desc",
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsCacheList]":
url = f"/repos/{owner}/{repo}/actions/caches"
params = {
"per_page": per_page,
"page": page,
"ref": ref,
"key": key,
"sort": sort,
"direction": direction,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ActionsCacheList,
)
async def async_get_actions_cache_list(
self,
owner: str,
repo: str,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
ref: Missing[str] = UNSET,
key: Missing[str] = UNSET,
sort: Missing[
Literal["created_at", "last_accessed_at", "size_in_bytes"]
] = "last_accessed_at",
direction: Missing[Literal["asc", "desc"]] = "desc",
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsCacheList]":
url = f"/repos/{owner}/{repo}/actions/caches"
params = {
"per_page": per_page,
"page": page,
"ref": ref,
"key": key,
"sort": sort,
"direction": direction,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ActionsCacheList,
)
def delete_actions_cache_by_key(
self,
owner: str,
repo: str,
key: str,
ref: Missing[str] = UNSET,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsCacheList]":
url = f"/repos/{owner}/{repo}/actions/caches"
params = {
"key": key,
"ref": ref,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"DELETE",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ActionsCacheList,
)
async def async_delete_actions_cache_by_key(
self,
owner: str,
repo: str,
key: str,
ref: Missing[str] = UNSET,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsCacheList]":
url = f"/repos/{owner}/{repo}/actions/caches"
params = {
"key": key,
"ref": ref,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"DELETE",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ActionsCacheList,
)
def delete_actions_cache_by_id(
self,
owner: str,
repo: str,
cache_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/caches/{cache_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"DELETE",
url,
headers=exclude_unset(headers),
)
async def async_delete_actions_cache_by_id(
self,
owner: str,
repo: str,
cache_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/caches/{cache_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"DELETE",
url,
headers=exclude_unset(headers),
)
def get_job_for_workflow_run(
self,
owner: str,
repo: str,
job_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[Job]":
url = f"/repos/{owner}/{repo}/actions/jobs/{job_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=Job,
)
async def async_get_job_for_workflow_run(
self,
owner: str,
repo: str,
job_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[Job]":
url = f"/repos/{owner}/{repo}/actions/jobs/{job_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=Job,
)
def download_job_logs_for_workflow_run(
self,
owner: str,
repo: str,
job_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/jobs/{job_id}/logs"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
)
async def async_download_job_logs_for_workflow_run(
self,
owner: str,
repo: str,
job_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/jobs/{job_id}/logs"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
)
@overload
def re_run_job_for_workflow_run(
self,
owner: str,
repo: str,
job_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
Union[ReposOwnerRepoActionsJobsJobIdRerunPostBodyType, None]
] = UNSET,
) -> "Response[EmptyObject]":
...
@overload
def re_run_job_for_workflow_run(
self,
owner: str,
repo: str,
job_id: int,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
enable_debug_logging: Missing[bool] = False,
) -> "Response[EmptyObject]":
...
def re_run_job_for_workflow_run(
self,
owner: str,
repo: str,
job_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
Union[ReposOwnerRepoActionsJobsJobIdRerunPostBodyType, None]
] = UNSET,
**kwargs,
) -> "Response[EmptyObject]":
url = f"/repos/{owner}/{repo}/actions/jobs/{job_id}/rerun"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(
Union[ReposOwnerRepoActionsJobsJobIdRerunPostBody, None], json
)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=EmptyObject,
error_models={
"403": BasicError,
},
)
@overload
async def async_re_run_job_for_workflow_run(
self,
owner: str,
repo: str,
job_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
Union[ReposOwnerRepoActionsJobsJobIdRerunPostBodyType, None]
] = UNSET,
) -> "Response[EmptyObject]":
...
@overload
async def async_re_run_job_for_workflow_run(
self,
owner: str,
repo: str,
job_id: int,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
enable_debug_logging: Missing[bool] = False,
) -> "Response[EmptyObject]":
...
async def async_re_run_job_for_workflow_run(
self,
owner: str,
repo: str,
job_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
Union[ReposOwnerRepoActionsJobsJobIdRerunPostBodyType, None]
] = UNSET,
**kwargs,
) -> "Response[EmptyObject]":
url = f"/repos/{owner}/{repo}/actions/jobs/{job_id}/rerun"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(
Union[ReposOwnerRepoActionsJobsJobIdRerunPostBody, None], json
)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=EmptyObject,
error_models={
"403": BasicError,
},
)
def get_custom_oidc_sub_claim_for_repo(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OidcCustomSubRepo]":
url = f"/repos/{owner}/{repo}/actions/oidc/customization/sub"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=OidcCustomSubRepo,
error_models={
"400": BasicError,
"404": BasicError,
},
)
async def async_get_custom_oidc_sub_claim_for_repo(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OidcCustomSubRepo]":
url = f"/repos/{owner}/{repo}/actions/oidc/customization/sub"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=OidcCustomSubRepo,
error_models={
"400": BasicError,
"404": BasicError,
},
)
@overload
def set_custom_oidc_sub_claim_for_repo(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: ReposOwnerRepoActionsOidcCustomizationSubPutBodyType,
) -> "Response[EmptyObject]":
...
@overload
def set_custom_oidc_sub_claim_for_repo(
self,
owner: str,
repo: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
use_default: bool,
include_claim_keys: Missing[List[str]] = UNSET,
) -> "Response[EmptyObject]":
...
def set_custom_oidc_sub_claim_for_repo(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[ReposOwnerRepoActionsOidcCustomizationSubPutBodyType] = UNSET,
**kwargs,
) -> "Response[EmptyObject]":
url = f"/repos/{owner}/{repo}/actions/oidc/customization/sub"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(ReposOwnerRepoActionsOidcCustomizationSubPutBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=EmptyObject,
error_models={
"404": BasicError,
"400": BasicError,
"422": ValidationErrorSimple,
},
)
@overload
async def async_set_custom_oidc_sub_claim_for_repo(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: ReposOwnerRepoActionsOidcCustomizationSubPutBodyType,
) -> "Response[EmptyObject]":
...
@overload
async def async_set_custom_oidc_sub_claim_for_repo(
self,
owner: str,
repo: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
use_default: bool,
include_claim_keys: Missing[List[str]] = UNSET,
) -> "Response[EmptyObject]":
...
async def async_set_custom_oidc_sub_claim_for_repo(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[ReposOwnerRepoActionsOidcCustomizationSubPutBodyType] = UNSET,
**kwargs,
) -> "Response[EmptyObject]":
url = f"/repos/{owner}/{repo}/actions/oidc/customization/sub"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(ReposOwnerRepoActionsOidcCustomizationSubPutBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=EmptyObject,
error_models={
"404": BasicError,
"400": BasicError,
"422": ValidationErrorSimple,
},
)
def list_repo_organization_secrets(
self,
owner: str,
repo: str,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ReposOwnerRepoActionsOrganizationSecretsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/organization-secrets"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ReposOwnerRepoActionsOrganizationSecretsGetResponse200,
)
async def async_list_repo_organization_secrets(
self,
owner: str,
repo: str,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ReposOwnerRepoActionsOrganizationSecretsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/organization-secrets"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ReposOwnerRepoActionsOrganizationSecretsGetResponse200,
)
def list_repo_organization_variables(
self,
owner: str,
repo: str,
per_page: Missing[int] = 10,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ReposOwnerRepoActionsOrganizationVariablesGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/organization-variables"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ReposOwnerRepoActionsOrganizationVariablesGetResponse200,
)
async def async_list_repo_organization_variables(
self,
owner: str,
repo: str,
per_page: Missing[int] = 10,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ReposOwnerRepoActionsOrganizationVariablesGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/organization-variables"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ReposOwnerRepoActionsOrganizationVariablesGetResponse200,
)
def get_github_actions_permissions_repository(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsRepositoryPermissions]":
url = f"/repos/{owner}/{repo}/actions/permissions"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsRepositoryPermissions,
)
async def async_get_github_actions_permissions_repository(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsRepositoryPermissions]":
url = f"/repos/{owner}/{repo}/actions/permissions"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsRepositoryPermissions,
)
@overload
def set_github_actions_permissions_repository(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: ReposOwnerRepoActionsPermissionsPutBodyType,
) -> "Response":
...
@overload
def set_github_actions_permissions_repository(
self,
owner: str,
repo: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
enabled: bool,
allowed_actions: Missing[Literal["all", "local_only", "selected"]] = UNSET,
) -> "Response":
...
def set_github_actions_permissions_repository(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[ReposOwnerRepoActionsPermissionsPutBodyType] = UNSET,
**kwargs,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/permissions"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(ReposOwnerRepoActionsPermissionsPutBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
@overload
async def async_set_github_actions_permissions_repository(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: ReposOwnerRepoActionsPermissionsPutBodyType,
) -> "Response":
...
@overload
async def async_set_github_actions_permissions_repository(
self,
owner: str,
repo: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
enabled: bool,
allowed_actions: Missing[Literal["all", "local_only", "selected"]] = UNSET,
) -> "Response":
...
async def async_set_github_actions_permissions_repository(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[ReposOwnerRepoActionsPermissionsPutBodyType] = UNSET,
**kwargs,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/permissions"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(ReposOwnerRepoActionsPermissionsPutBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
def get_workflow_access_to_repository(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsWorkflowAccessToRepository]":
url = f"/repos/{owner}/{repo}/actions/permissions/access"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsWorkflowAccessToRepository,
)
async def async_get_workflow_access_to_repository(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsWorkflowAccessToRepository]":
url = f"/repos/{owner}/{repo}/actions/permissions/access"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsWorkflowAccessToRepository,
)
@overload
def set_workflow_access_to_repository(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: ActionsWorkflowAccessToRepositoryType,
) -> "Response":
...
@overload
def set_workflow_access_to_repository(
self,
owner: str,
repo: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
access_level: Literal["none", "user", "organization"],
) -> "Response":
...
def set_workflow_access_to_repository(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[ActionsWorkflowAccessToRepositoryType] = UNSET,
**kwargs,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/permissions/access"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(ActionsWorkflowAccessToRepository, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
@overload
async def async_set_workflow_access_to_repository(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: ActionsWorkflowAccessToRepositoryType,
) -> "Response":
...
@overload
async def async_set_workflow_access_to_repository(
self,
owner: str,
repo: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
access_level: Literal["none", "user", "organization"],
) -> "Response":
...
async def async_set_workflow_access_to_repository(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[ActionsWorkflowAccessToRepositoryType] = UNSET,
**kwargs,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/permissions/access"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(ActionsWorkflowAccessToRepository, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
def get_allowed_actions_repository(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[SelectedActions]":
url = f"/repos/{owner}/{repo}/actions/permissions/selected-actions"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=SelectedActions,
)
async def async_get_allowed_actions_repository(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[SelectedActions]":
url = f"/repos/{owner}/{repo}/actions/permissions/selected-actions"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=SelectedActions,
)
@overload
def set_allowed_actions_repository(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[SelectedActionsType] = UNSET,
) -> "Response":
...
@overload
def set_allowed_actions_repository(
self,
owner: str,
repo: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
github_owned_allowed: Missing[bool] = UNSET,
verified_allowed: Missing[bool] = UNSET,
patterns_allowed: Missing[List[str]] = UNSET,
) -> "Response":
...
def set_allowed_actions_repository(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[SelectedActionsType] = UNSET,
**kwargs,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/permissions/selected-actions"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(SelectedActions, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
@overload
async def async_set_allowed_actions_repository(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[SelectedActionsType] = UNSET,
) -> "Response":
...
@overload
async def async_set_allowed_actions_repository(
self,
owner: str,
repo: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
github_owned_allowed: Missing[bool] = UNSET,
verified_allowed: Missing[bool] = UNSET,
patterns_allowed: Missing[List[str]] = UNSET,
) -> "Response":
...
async def async_set_allowed_actions_repository(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[SelectedActionsType] = UNSET,
**kwargs,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/permissions/selected-actions"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(SelectedActions, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
def get_github_actions_default_workflow_permissions_repository(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsGetDefaultWorkflowPermissions]":
url = f"/repos/{owner}/{repo}/actions/permissions/workflow"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsGetDefaultWorkflowPermissions,
)
async def async_get_github_actions_default_workflow_permissions_repository(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsGetDefaultWorkflowPermissions]":
url = f"/repos/{owner}/{repo}/actions/permissions/workflow"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsGetDefaultWorkflowPermissions,
)
@overload
def set_github_actions_default_workflow_permissions_repository(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: ActionsSetDefaultWorkflowPermissionsType,
) -> "Response":
...
@overload
def set_github_actions_default_workflow_permissions_repository(
self,
owner: str,
repo: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
default_workflow_permissions: Missing[Literal["read", "write"]] = UNSET,
can_approve_pull_request_reviews: Missing[bool] = UNSET,
) -> "Response":
...
def set_github_actions_default_workflow_permissions_repository(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[ActionsSetDefaultWorkflowPermissionsType] = UNSET,
**kwargs,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/permissions/workflow"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(ActionsSetDefaultWorkflowPermissions, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
error_models={},
)
@overload
async def async_set_github_actions_default_workflow_permissions_repository(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: ActionsSetDefaultWorkflowPermissionsType,
) -> "Response":
...
@overload
async def async_set_github_actions_default_workflow_permissions_repository(
self,
owner: str,
repo: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
default_workflow_permissions: Missing[Literal["read", "write"]] = UNSET,
can_approve_pull_request_reviews: Missing[bool] = UNSET,
) -> "Response":
...
async def async_set_github_actions_default_workflow_permissions_repository(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[ActionsSetDefaultWorkflowPermissionsType] = UNSET,
**kwargs,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/permissions/workflow"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(ActionsSetDefaultWorkflowPermissions, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
error_models={},
)
def list_self_hosted_runners_for_repo(
self,
owner: str,
repo: str,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ReposOwnerRepoActionsRunnersGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/runners"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ReposOwnerRepoActionsRunnersGetResponse200,
)
async def async_list_self_hosted_runners_for_repo(
self,
owner: str,
repo: str,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ReposOwnerRepoActionsRunnersGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/runners"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ReposOwnerRepoActionsRunnersGetResponse200,
)
def list_runner_applications_for_repo(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[List[RunnerApplication]]":
url = f"/repos/{owner}/{repo}/actions/runners/downloads"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=List[RunnerApplication],
)
async def async_list_runner_applications_for_repo(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[List[RunnerApplication]]":
url = f"/repos/{owner}/{repo}/actions/runners/downloads"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=List[RunnerApplication],
)
@overload
def generate_runner_jitconfig_for_repo(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: ReposOwnerRepoActionsRunnersGenerateJitconfigPostBodyType,
) -> "Response[OrgsOrgActionsRunnersGenerateJitconfigPostResponse201]":
...
@overload
def generate_runner_jitconfig_for_repo(
self,
owner: str,
repo: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
name: str,
runner_group_id: int,
labels: List[str],
work_folder: Missing[str] = "_work",
) -> "Response[OrgsOrgActionsRunnersGenerateJitconfigPostResponse201]":
...
def generate_runner_jitconfig_for_repo(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
ReposOwnerRepoActionsRunnersGenerateJitconfigPostBodyType
] = UNSET,
**kwargs,
) -> "Response[OrgsOrgActionsRunnersGenerateJitconfigPostResponse201]":
url = f"/repos/{owner}/{repo}/actions/runners/generate-jitconfig"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(ReposOwnerRepoActionsRunnersGenerateJitconfigPostBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersGenerateJitconfigPostResponse201,
error_models={
"404": BasicError,
"422": ValidationErrorSimple,
},
)
@overload
async def async_generate_runner_jitconfig_for_repo(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: ReposOwnerRepoActionsRunnersGenerateJitconfigPostBodyType,
) -> "Response[OrgsOrgActionsRunnersGenerateJitconfigPostResponse201]":
...
@overload
async def async_generate_runner_jitconfig_for_repo(
self,
owner: str,
repo: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
name: str,
runner_group_id: int,
labels: List[str],
work_folder: Missing[str] = "_work",
) -> "Response[OrgsOrgActionsRunnersGenerateJitconfigPostResponse201]":
...
async def async_generate_runner_jitconfig_for_repo(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
ReposOwnerRepoActionsRunnersGenerateJitconfigPostBodyType
] = UNSET,
**kwargs,
) -> "Response[OrgsOrgActionsRunnersGenerateJitconfigPostResponse201]":
url = f"/repos/{owner}/{repo}/actions/runners/generate-jitconfig"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(ReposOwnerRepoActionsRunnersGenerateJitconfigPostBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersGenerateJitconfigPostResponse201,
error_models={
"404": BasicError,
"422": ValidationErrorSimple,
},
)
def create_registration_token_for_repo(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[AuthenticationToken]":
url = f"/repos/{owner}/{repo}/actions/runners/registration-token"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"POST",
url,
headers=exclude_unset(headers),
response_model=AuthenticationToken,
)
async def async_create_registration_token_for_repo(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[AuthenticationToken]":
url = f"/repos/{owner}/{repo}/actions/runners/registration-token"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"POST",
url,
headers=exclude_unset(headers),
response_model=AuthenticationToken,
)
def create_remove_token_for_repo(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[AuthenticationToken]":
url = f"/repos/{owner}/{repo}/actions/runners/remove-token"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"POST",
url,
headers=exclude_unset(headers),
response_model=AuthenticationToken,
)
async def async_create_remove_token_for_repo(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[AuthenticationToken]":
url = f"/repos/{owner}/{repo}/actions/runners/remove-token"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"POST",
url,
headers=exclude_unset(headers),
response_model=AuthenticationToken,
)
def get_self_hosted_runner_for_repo(
self,
owner: str,
repo: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[Runner]":
url = f"/repos/{owner}/{repo}/actions/runners/{runner_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=Runner,
)
async def async_get_self_hosted_runner_for_repo(
self,
owner: str,
repo: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[Runner]":
url = f"/repos/{owner}/{repo}/actions/runners/{runner_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=Runner,
)
def delete_self_hosted_runner_from_repo(
self,
owner: str,
repo: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/runners/{runner_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"DELETE",
url,
headers=exclude_unset(headers),
)
async def async_delete_self_hosted_runner_from_repo(
self,
owner: str,
repo: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/runners/{runner_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"DELETE",
url,
headers=exclude_unset(headers),
)
def list_labels_for_self_hosted_runner_for_repo(
self,
owner: str,
repo: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/runners/{runner_id}/labels"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200,
error_models={
"404": BasicError,
},
)
async def async_list_labels_for_self_hosted_runner_for_repo(
self,
owner: str,
repo: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/runners/{runner_id}/labels"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200,
error_models={
"404": BasicError,
},
)
@overload
def set_custom_labels_for_self_hosted_runner_for_repo(
self,
owner: str,
repo: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: ReposOwnerRepoActionsRunnersRunnerIdLabelsPutBodyType,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
...
@overload
def set_custom_labels_for_self_hosted_runner_for_repo(
self,
owner: str,
repo: str,
runner_id: int,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
labels: List[str],
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
...
def set_custom_labels_for_self_hosted_runner_for_repo(
self,
owner: str,
repo: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[ReposOwnerRepoActionsRunnersRunnerIdLabelsPutBodyType] = UNSET,
**kwargs,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/runners/{runner_id}/labels"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(ReposOwnerRepoActionsRunnersRunnerIdLabelsPutBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200,
error_models={
"404": BasicError,
"422": ValidationErrorSimple,
},
)
@overload
async def async_set_custom_labels_for_self_hosted_runner_for_repo(
self,
owner: str,
repo: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: ReposOwnerRepoActionsRunnersRunnerIdLabelsPutBodyType,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
...
@overload
async def async_set_custom_labels_for_self_hosted_runner_for_repo(
self,
owner: str,
repo: str,
runner_id: int,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
labels: List[str],
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
...
async def async_set_custom_labels_for_self_hosted_runner_for_repo(
self,
owner: str,
repo: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[ReposOwnerRepoActionsRunnersRunnerIdLabelsPutBodyType] = UNSET,
**kwargs,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/runners/{runner_id}/labels"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(ReposOwnerRepoActionsRunnersRunnerIdLabelsPutBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200,
error_models={
"404": BasicError,
"422": ValidationErrorSimple,
},
)
@overload
def add_custom_labels_to_self_hosted_runner_for_repo(
self,
owner: str,
repo: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: ReposOwnerRepoActionsRunnersRunnerIdLabelsPostBodyType,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
...
@overload
def add_custom_labels_to_self_hosted_runner_for_repo(
self,
owner: str,
repo: str,
runner_id: int,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
labels: List[str],
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
...
def add_custom_labels_to_self_hosted_runner_for_repo(
self,
owner: str,
repo: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[ReposOwnerRepoActionsRunnersRunnerIdLabelsPostBodyType] = UNSET,
**kwargs,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/runners/{runner_id}/labels"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(ReposOwnerRepoActionsRunnersRunnerIdLabelsPostBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200,
error_models={
"404": BasicError,
"422": ValidationErrorSimple,
},
)
@overload
async def async_add_custom_labels_to_self_hosted_runner_for_repo(
self,
owner: str,
repo: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: ReposOwnerRepoActionsRunnersRunnerIdLabelsPostBodyType,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
...
@overload
async def async_add_custom_labels_to_self_hosted_runner_for_repo(
self,
owner: str,
repo: str,
runner_id: int,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
labels: List[str],
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
...
async def async_add_custom_labels_to_self_hosted_runner_for_repo(
self,
owner: str,
repo: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[ReposOwnerRepoActionsRunnersRunnerIdLabelsPostBodyType] = UNSET,
**kwargs,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/runners/{runner_id}/labels"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(ReposOwnerRepoActionsRunnersRunnerIdLabelsPostBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200,
error_models={
"404": BasicError,
"422": ValidationErrorSimple,
},
)
def remove_all_custom_labels_from_self_hosted_runner_for_repo(
self,
owner: str,
repo: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsDeleteResponse200]":
url = f"/repos/{owner}/{repo}/actions/runners/{runner_id}/labels"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"DELETE",
url,
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersRunnerIdLabelsDeleteResponse200,
error_models={
"404": BasicError,
},
)
async def async_remove_all_custom_labels_from_self_hosted_runner_for_repo(
self,
owner: str,
repo: str,
runner_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsDeleteResponse200]":
url = f"/repos/{owner}/{repo}/actions/runners/{runner_id}/labels"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"DELETE",
url,
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersRunnerIdLabelsDeleteResponse200,
error_models={
"404": BasicError,
},
)
def remove_custom_label_from_self_hosted_runner_for_repo(
self,
owner: str,
repo: str,
runner_id: int,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/runners/{runner_id}/labels/{name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"DELETE",
url,
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200,
error_models={
"404": BasicError,
"422": ValidationErrorSimple,
},
)
async def async_remove_custom_label_from_self_hosted_runner_for_repo(
self,
owner: str,
repo: str,
runner_id: int,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/runners/{runner_id}/labels/{name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"DELETE",
url,
headers=exclude_unset(headers),
response_model=OrgsOrgActionsRunnersRunnerIdLabelsGetResponse200,
error_models={
"404": BasicError,
"422": ValidationErrorSimple,
},
)
def list_workflow_runs_for_repo(
self,
owner: str,
repo: str,
actor: Missing[str] = UNSET,
branch: Missing[str] = UNSET,
event: Missing[str] = UNSET,
status: Missing[
Literal[
"completed",
"action_required",
"cancelled",
"failure",
"neutral",
"skipped",
"stale",
"success",
"timed_out",
"in_progress",
"queued",
"requested",
"waiting",
"pending",
]
] = UNSET,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
created: Missing[datetime] = UNSET,
exclude_pull_requests: Missing[bool] = False,
check_suite_id: Missing[int] = UNSET,
head_sha: Missing[str] = UNSET,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ReposOwnerRepoActionsRunsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/runs"
params = {
"actor": actor,
"branch": branch,
"event": event,
"status": status,
"per_page": per_page,
"page": page,
"created": created,
"exclude_pull_requests": exclude_pull_requests,
"check_suite_id": check_suite_id,
"head_sha": head_sha,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ReposOwnerRepoActionsRunsGetResponse200,
)
async def async_list_workflow_runs_for_repo(
self,
owner: str,
repo: str,
actor: Missing[str] = UNSET,
branch: Missing[str] = UNSET,
event: Missing[str] = UNSET,
status: Missing[
Literal[
"completed",
"action_required",
"cancelled",
"failure",
"neutral",
"skipped",
"stale",
"success",
"timed_out",
"in_progress",
"queued",
"requested",
"waiting",
"pending",
]
] = UNSET,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
created: Missing[datetime] = UNSET,
exclude_pull_requests: Missing[bool] = False,
check_suite_id: Missing[int] = UNSET,
head_sha: Missing[str] = UNSET,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ReposOwnerRepoActionsRunsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/runs"
params = {
"actor": actor,
"branch": branch,
"event": event,
"status": status,
"per_page": per_page,
"page": page,
"created": created,
"exclude_pull_requests": exclude_pull_requests,
"check_suite_id": check_suite_id,
"head_sha": head_sha,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ReposOwnerRepoActionsRunsGetResponse200,
)
def get_workflow_run(
self,
owner: str,
repo: str,
run_id: int,
exclude_pull_requests: Missing[bool] = False,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[WorkflowRun]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}"
params = {
"exclude_pull_requests": exclude_pull_requests,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=WorkflowRun,
)
async def async_get_workflow_run(
self,
owner: str,
repo: str,
run_id: int,
exclude_pull_requests: Missing[bool] = False,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[WorkflowRun]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}"
params = {
"exclude_pull_requests": exclude_pull_requests,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=WorkflowRun,
)
def delete_workflow_run(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"DELETE",
url,
headers=exclude_unset(headers),
)
async def async_delete_workflow_run(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"DELETE",
url,
headers=exclude_unset(headers),
)
def get_reviews_for_run(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[List[EnvironmentApprovals]]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/approvals"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=List[EnvironmentApprovals],
)
async def async_get_reviews_for_run(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[List[EnvironmentApprovals]]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/approvals"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=List[EnvironmentApprovals],
)
def approve_workflow_run(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[EmptyObject]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/approve"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"POST",
url,
headers=exclude_unset(headers),
response_model=EmptyObject,
error_models={
"404": BasicError,
"403": BasicError,
},
)
async def async_approve_workflow_run(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[EmptyObject]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/approve"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"POST",
url,
headers=exclude_unset(headers),
response_model=EmptyObject,
error_models={
"404": BasicError,
"403": BasicError,
},
)
def list_workflow_run_artifacts(
self,
owner: str,
repo: str,
run_id: int,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ReposOwnerRepoActionsRunsRunIdArtifactsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/artifacts"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ReposOwnerRepoActionsRunsRunIdArtifactsGetResponse200,
)
async def async_list_workflow_run_artifacts(
self,
owner: str,
repo: str,
run_id: int,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ReposOwnerRepoActionsRunsRunIdArtifactsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/artifacts"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ReposOwnerRepoActionsRunsRunIdArtifactsGetResponse200,
)
def get_workflow_run_attempt(
self,
owner: str,
repo: str,
run_id: int,
attempt_number: int,
exclude_pull_requests: Missing[bool] = False,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[WorkflowRun]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/attempts/{attempt_number}"
params = {
"exclude_pull_requests": exclude_pull_requests,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=WorkflowRun,
)
async def async_get_workflow_run_attempt(
self,
owner: str,
repo: str,
run_id: int,
attempt_number: int,
exclude_pull_requests: Missing[bool] = False,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[WorkflowRun]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/attempts/{attempt_number}"
params = {
"exclude_pull_requests": exclude_pull_requests,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=WorkflowRun,
)
def list_jobs_for_workflow_run_attempt(
self,
owner: str,
repo: str,
run_id: int,
attempt_number: int,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ReposOwnerRepoActionsRunsRunIdAttemptsAttemptNumberJobsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/attempts/{attempt_number}/jobs"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ReposOwnerRepoActionsRunsRunIdAttemptsAttemptNumberJobsGetResponse200,
error_models={
"404": BasicError,
},
)
async def async_list_jobs_for_workflow_run_attempt(
self,
owner: str,
repo: str,
run_id: int,
attempt_number: int,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ReposOwnerRepoActionsRunsRunIdAttemptsAttemptNumberJobsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/attempts/{attempt_number}/jobs"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ReposOwnerRepoActionsRunsRunIdAttemptsAttemptNumberJobsGetResponse200,
error_models={
"404": BasicError,
},
)
def download_workflow_run_attempt_logs(
self,
owner: str,
repo: str,
run_id: int,
attempt_number: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/attempts/{attempt_number}/logs"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
)
async def async_download_workflow_run_attempt_logs(
self,
owner: str,
repo: str,
run_id: int,
attempt_number: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/attempts/{attempt_number}/logs"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
)
def cancel_workflow_run(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[EmptyObject]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/cancel"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"POST",
url,
headers=exclude_unset(headers),
response_model=EmptyObject,
error_models={
"409": BasicError,
},
)
async def async_cancel_workflow_run(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[EmptyObject]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/cancel"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"POST",
url,
headers=exclude_unset(headers),
response_model=EmptyObject,
error_models={
"409": BasicError,
},
)
@overload
def review_custom_gates_for_run(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Union[
ReviewCustomGatesCommentRequiredType, ReviewCustomGatesStateRequiredType
],
) -> "Response":
...
@overload
def review_custom_gates_for_run(
self,
owner: str,
repo: str,
run_id: int,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
environment_name: str,
comment: str,
) -> "Response":
...
@overload
def review_custom_gates_for_run(
self,
owner: str,
repo: str,
run_id: int,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
environment_name: str,
state: Literal["approved", "rejected"],
comment: Missing[str] = UNSET,
) -> "Response":
...
def review_custom_gates_for_run(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
Union[
ReviewCustomGatesCommentRequiredType, ReviewCustomGatesStateRequiredType
]
] = UNSET,
**kwargs,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/deployment_protection_rule"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(
Union[ReviewCustomGatesCommentRequired, ReviewCustomGatesStateRequired],
json,
)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
@overload
async def async_review_custom_gates_for_run(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Union[
ReviewCustomGatesCommentRequiredType, ReviewCustomGatesStateRequiredType
],
) -> "Response":
...
@overload
async def async_review_custom_gates_for_run(
self,
owner: str,
repo: str,
run_id: int,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
environment_name: str,
comment: str,
) -> "Response":
...
@overload
async def async_review_custom_gates_for_run(
self,
owner: str,
repo: str,
run_id: int,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
environment_name: str,
state: Literal["approved", "rejected"],
comment: Missing[str] = UNSET,
) -> "Response":
...
async def async_review_custom_gates_for_run(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
Union[
ReviewCustomGatesCommentRequiredType, ReviewCustomGatesStateRequiredType
]
] = UNSET,
**kwargs,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/deployment_protection_rule"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(
Union[ReviewCustomGatesCommentRequired, ReviewCustomGatesStateRequired],
json,
)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
def list_jobs_for_workflow_run(
self,
owner: str,
repo: str,
run_id: int,
filter_: Missing[Literal["latest", "all"]] = "latest",
per_page: Missing[int] = 30,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ReposOwnerRepoActionsRunsRunIdJobsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/jobs"
params = {
"filter": filter_,
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ReposOwnerRepoActionsRunsRunIdJobsGetResponse200,
)
async def async_list_jobs_for_workflow_run(
self,
owner: str,
repo: str,
run_id: int,
filter_: Missing[Literal["latest", "all"]] = "latest",
per_page: Missing[int] = 30,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ReposOwnerRepoActionsRunsRunIdJobsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/jobs"
params = {
"filter": filter_,
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ReposOwnerRepoActionsRunsRunIdJobsGetResponse200,
)
def download_workflow_run_logs(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/logs"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
)
async def async_download_workflow_run_logs(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/logs"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
)
def delete_workflow_run_logs(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/logs"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"DELETE",
url,
headers=exclude_unset(headers),
error_models={
"403": BasicError,
"500": BasicError,
},
)
async def async_delete_workflow_run_logs(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/logs"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"DELETE",
url,
headers=exclude_unset(headers),
error_models={
"403": BasicError,
"500": BasicError,
},
)
def get_pending_deployments_for_run(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[List[PendingDeployment]]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/pending_deployments"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=List[PendingDeployment],
)
async def async_get_pending_deployments_for_run(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[List[PendingDeployment]]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/pending_deployments"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=List[PendingDeployment],
)
@overload
def review_pending_deployments_for_run(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: ReposOwnerRepoActionsRunsRunIdPendingDeploymentsPostBodyType,
) -> "Response[List[Deployment]]":
...
@overload
def review_pending_deployments_for_run(
self,
owner: str,
repo: str,
run_id: int,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
environment_ids: List[int],
state: Literal["approved", "rejected"],
comment: str,
) -> "Response[List[Deployment]]":
...
def review_pending_deployments_for_run(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
ReposOwnerRepoActionsRunsRunIdPendingDeploymentsPostBodyType
] = UNSET,
**kwargs,
) -> "Response[List[Deployment]]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/pending_deployments"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(
ReposOwnerRepoActionsRunsRunIdPendingDeploymentsPostBody, json
)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=List[Deployment],
)
@overload
async def async_review_pending_deployments_for_run(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: ReposOwnerRepoActionsRunsRunIdPendingDeploymentsPostBodyType,
) -> "Response[List[Deployment]]":
...
@overload
async def async_review_pending_deployments_for_run(
self,
owner: str,
repo: str,
run_id: int,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
environment_ids: List[int],
state: Literal["approved", "rejected"],
comment: str,
) -> "Response[List[Deployment]]":
...
async def async_review_pending_deployments_for_run(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
ReposOwnerRepoActionsRunsRunIdPendingDeploymentsPostBodyType
] = UNSET,
**kwargs,
) -> "Response[List[Deployment]]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/pending_deployments"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(
ReposOwnerRepoActionsRunsRunIdPendingDeploymentsPostBody, json
)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=List[Deployment],
)
@overload
def re_run_workflow(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
Union[ReposOwnerRepoActionsRunsRunIdRerunPostBodyType, None]
] = UNSET,
) -> "Response[EmptyObject]":
...
@overload
def re_run_workflow(
self,
owner: str,
repo: str,
run_id: int,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
enable_debug_logging: Missing[bool] = False,
) -> "Response[EmptyObject]":
...
def re_run_workflow(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
Union[ReposOwnerRepoActionsRunsRunIdRerunPostBodyType, None]
] = UNSET,
**kwargs,
) -> "Response[EmptyObject]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/rerun"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(
Union[ReposOwnerRepoActionsRunsRunIdRerunPostBody, None], json
)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=EmptyObject,
)
@overload
async def async_re_run_workflow(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
Union[ReposOwnerRepoActionsRunsRunIdRerunPostBodyType, None]
] = UNSET,
) -> "Response[EmptyObject]":
...
@overload
async def async_re_run_workflow(
self,
owner: str,
repo: str,
run_id: int,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
enable_debug_logging: Missing[bool] = False,
) -> "Response[EmptyObject]":
...
async def async_re_run_workflow(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
Union[ReposOwnerRepoActionsRunsRunIdRerunPostBodyType, None]
] = UNSET,
**kwargs,
) -> "Response[EmptyObject]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/rerun"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(
Union[ReposOwnerRepoActionsRunsRunIdRerunPostBody, None], json
)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=EmptyObject,
)
@overload
def re_run_workflow_failed_jobs(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
Union[ReposOwnerRepoActionsRunsRunIdRerunFailedJobsPostBodyType, None]
] = UNSET,
) -> "Response[EmptyObject]":
...
@overload
def re_run_workflow_failed_jobs(
self,
owner: str,
repo: str,
run_id: int,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
enable_debug_logging: Missing[bool] = False,
) -> "Response[EmptyObject]":
...
def re_run_workflow_failed_jobs(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
Union[ReposOwnerRepoActionsRunsRunIdRerunFailedJobsPostBodyType, None]
] = UNSET,
**kwargs,
) -> "Response[EmptyObject]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/rerun-failed-jobs"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(
Union[ReposOwnerRepoActionsRunsRunIdRerunFailedJobsPostBody, None], json
)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=EmptyObject,
)
@overload
async def async_re_run_workflow_failed_jobs(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
Union[ReposOwnerRepoActionsRunsRunIdRerunFailedJobsPostBodyType, None]
] = UNSET,
) -> "Response[EmptyObject]":
...
@overload
async def async_re_run_workflow_failed_jobs(
self,
owner: str,
repo: str,
run_id: int,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
enable_debug_logging: Missing[bool] = False,
) -> "Response[EmptyObject]":
...
async def async_re_run_workflow_failed_jobs(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
Union[ReposOwnerRepoActionsRunsRunIdRerunFailedJobsPostBodyType, None]
] = UNSET,
**kwargs,
) -> "Response[EmptyObject]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/rerun-failed-jobs"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(
Union[ReposOwnerRepoActionsRunsRunIdRerunFailedJobsPostBody, None], json
)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=EmptyObject,
)
def get_workflow_run_usage(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[WorkflowRunUsage]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/timing"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=WorkflowRunUsage,
)
async def async_get_workflow_run_usage(
self,
owner: str,
repo: str,
run_id: int,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[WorkflowRunUsage]":
url = f"/repos/{owner}/{repo}/actions/runs/{run_id}/timing"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=WorkflowRunUsage,
)
def list_repo_secrets(
self,
owner: str,
repo: str,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ReposOwnerRepoActionsSecretsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/secrets"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ReposOwnerRepoActionsSecretsGetResponse200,
)
async def async_list_repo_secrets(
self,
owner: str,
repo: str,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ReposOwnerRepoActionsSecretsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/secrets"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ReposOwnerRepoActionsSecretsGetResponse200,
)
def get_repo_public_key(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsPublicKey]":
url = f"/repos/{owner}/{repo}/actions/secrets/public-key"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsPublicKey,
)
async def async_get_repo_public_key(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsPublicKey]":
url = f"/repos/{owner}/{repo}/actions/secrets/public-key"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsPublicKey,
)
def get_repo_secret(
self,
owner: str,
repo: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsSecret]":
url = f"/repos/{owner}/{repo}/actions/secrets/{secret_name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsSecret,
)
async def async_get_repo_secret(
self,
owner: str,
repo: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsSecret]":
url = f"/repos/{owner}/{repo}/actions/secrets/{secret_name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsSecret,
)
@overload
def create_or_update_repo_secret(
self,
owner: str,
repo: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: ReposOwnerRepoActionsSecretsSecretNamePutBodyType,
) -> "Response[EmptyObject]":
...
@overload
def create_or_update_repo_secret(
self,
owner: str,
repo: str,
secret_name: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
encrypted_value: Missing[str] = UNSET,
key_id: Missing[str] = UNSET,
) -> "Response[EmptyObject]":
...
def create_or_update_repo_secret(
self,
owner: str,
repo: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[ReposOwnerRepoActionsSecretsSecretNamePutBodyType] = UNSET,
**kwargs,
) -> "Response[EmptyObject]":
url = f"/repos/{owner}/{repo}/actions/secrets/{secret_name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(ReposOwnerRepoActionsSecretsSecretNamePutBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=EmptyObject,
)
@overload
async def async_create_or_update_repo_secret(
self,
owner: str,
repo: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: ReposOwnerRepoActionsSecretsSecretNamePutBodyType,
) -> "Response[EmptyObject]":
...
@overload
async def async_create_or_update_repo_secret(
self,
owner: str,
repo: str,
secret_name: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
encrypted_value: Missing[str] = UNSET,
key_id: Missing[str] = UNSET,
) -> "Response[EmptyObject]":
...
async def async_create_or_update_repo_secret(
self,
owner: str,
repo: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[ReposOwnerRepoActionsSecretsSecretNamePutBodyType] = UNSET,
**kwargs,
) -> "Response[EmptyObject]":
url = f"/repos/{owner}/{repo}/actions/secrets/{secret_name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(ReposOwnerRepoActionsSecretsSecretNamePutBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=EmptyObject,
)
def delete_repo_secret(
self,
owner: str,
repo: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/secrets/{secret_name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"DELETE",
url,
headers=exclude_unset(headers),
)
async def async_delete_repo_secret(
self,
owner: str,
repo: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/secrets/{secret_name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"DELETE",
url,
headers=exclude_unset(headers),
)
def list_repo_variables(
self,
owner: str,
repo: str,
per_page: Missing[int] = 10,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ReposOwnerRepoActionsVariablesGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/variables"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ReposOwnerRepoActionsVariablesGetResponse200,
)
async def async_list_repo_variables(
self,
owner: str,
repo: str,
per_page: Missing[int] = 10,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ReposOwnerRepoActionsVariablesGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/variables"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ReposOwnerRepoActionsVariablesGetResponse200,
)
@overload
def create_repo_variable(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: ReposOwnerRepoActionsVariablesPostBodyType,
) -> "Response[EmptyObject]":
...
@overload
def create_repo_variable(
self,
owner: str,
repo: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
name: str,
value: str,
) -> "Response[EmptyObject]":
...
def create_repo_variable(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[ReposOwnerRepoActionsVariablesPostBodyType] = UNSET,
**kwargs,
) -> "Response[EmptyObject]":
url = f"/repos/{owner}/{repo}/actions/variables"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(ReposOwnerRepoActionsVariablesPostBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=EmptyObject,
)
@overload
async def async_create_repo_variable(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: ReposOwnerRepoActionsVariablesPostBodyType,
) -> "Response[EmptyObject]":
...
@overload
async def async_create_repo_variable(
self,
owner: str,
repo: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
name: str,
value: str,
) -> "Response[EmptyObject]":
...
async def async_create_repo_variable(
self,
owner: str,
repo: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[ReposOwnerRepoActionsVariablesPostBodyType] = UNSET,
**kwargs,
) -> "Response[EmptyObject]":
url = f"/repos/{owner}/{repo}/actions/variables"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(ReposOwnerRepoActionsVariablesPostBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=EmptyObject,
)
def get_repo_variable(
self,
owner: str,
repo: str,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsVariable]":
url = f"/repos/{owner}/{repo}/actions/variables/{name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsVariable,
)
async def async_get_repo_variable(
self,
owner: str,
repo: str,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsVariable]":
url = f"/repos/{owner}/{repo}/actions/variables/{name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsVariable,
)
def delete_repo_variable(
self,
owner: str,
repo: str,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/variables/{name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"DELETE",
url,
headers=exclude_unset(headers),
)
async def async_delete_repo_variable(
self,
owner: str,
repo: str,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/variables/{name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"DELETE",
url,
headers=exclude_unset(headers),
)
@overload
def update_repo_variable(
self,
owner: str,
repo: str,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: ReposOwnerRepoActionsVariablesNamePatchBodyType,
) -> "Response":
...
@overload
def update_repo_variable(
self,
owner: str,
repo: str,
name: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
value: Missing[str] = UNSET,
) -> "Response":
...
def update_repo_variable(
self,
owner: str,
repo: str,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[ReposOwnerRepoActionsVariablesNamePatchBodyType] = UNSET,
**kwargs,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/variables/{name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(ReposOwnerRepoActionsVariablesNamePatchBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"PATCH",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
@overload
async def async_update_repo_variable(
self,
owner: str,
repo: str,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: ReposOwnerRepoActionsVariablesNamePatchBodyType,
) -> "Response":
...
@overload
async def async_update_repo_variable(
self,
owner: str,
repo: str,
name: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
value: Missing[str] = UNSET,
) -> "Response":
...
async def async_update_repo_variable(
self,
owner: str,
repo: str,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[ReposOwnerRepoActionsVariablesNamePatchBodyType] = UNSET,
**kwargs,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/variables/{name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(ReposOwnerRepoActionsVariablesNamePatchBody, json)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"PATCH",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
def list_repo_workflows(
self,
owner: str,
repo: str,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ReposOwnerRepoActionsWorkflowsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/workflows"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ReposOwnerRepoActionsWorkflowsGetResponse200,
)
async def async_list_repo_workflows(
self,
owner: str,
repo: str,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ReposOwnerRepoActionsWorkflowsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/workflows"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ReposOwnerRepoActionsWorkflowsGetResponse200,
)
def get_workflow(
self,
owner: str,
repo: str,
workflow_id: Union[int, str],
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[Workflow]":
url = f"/repos/{owner}/{repo}/actions/workflows/{workflow_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=Workflow,
)
async def async_get_workflow(
self,
owner: str,
repo: str,
workflow_id: Union[int, str],
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[Workflow]":
url = f"/repos/{owner}/{repo}/actions/workflows/{workflow_id}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=Workflow,
)
def disable_workflow(
self,
owner: str,
repo: str,
workflow_id: Union[int, str],
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/workflows/{workflow_id}/disable"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"PUT",
url,
headers=exclude_unset(headers),
)
async def async_disable_workflow(
self,
owner: str,
repo: str,
workflow_id: Union[int, str],
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/workflows/{workflow_id}/disable"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"PUT",
url,
headers=exclude_unset(headers),
)
@overload
def create_workflow_dispatch(
self,
owner: str,
repo: str,
workflow_id: Union[int, str],
*,
headers: Optional[Dict[str, str]] = None,
data: ReposOwnerRepoActionsWorkflowsWorkflowIdDispatchesPostBodyType,
) -> "Response":
...
@overload
def create_workflow_dispatch(
self,
owner: str,
repo: str,
workflow_id: Union[int, str],
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
ref: str,
inputs: Missing[
ReposOwnerRepoActionsWorkflowsWorkflowIdDispatchesPostBodyPropInputsType
] = UNSET,
) -> "Response":
...
def create_workflow_dispatch(
self,
owner: str,
repo: str,
workflow_id: Union[int, str],
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
ReposOwnerRepoActionsWorkflowsWorkflowIdDispatchesPostBodyType
] = UNSET,
**kwargs,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/workflows/{workflow_id}/dispatches"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(
ReposOwnerRepoActionsWorkflowsWorkflowIdDispatchesPostBody, json
)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
@overload
async def async_create_workflow_dispatch(
self,
owner: str,
repo: str,
workflow_id: Union[int, str],
*,
headers: Optional[Dict[str, str]] = None,
data: ReposOwnerRepoActionsWorkflowsWorkflowIdDispatchesPostBodyType,
) -> "Response":
...
@overload
async def async_create_workflow_dispatch(
self,
owner: str,
repo: str,
workflow_id: Union[int, str],
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
ref: str,
inputs: Missing[
ReposOwnerRepoActionsWorkflowsWorkflowIdDispatchesPostBodyPropInputsType
] = UNSET,
) -> "Response":
...
async def async_create_workflow_dispatch(
self,
owner: str,
repo: str,
workflow_id: Union[int, str],
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
ReposOwnerRepoActionsWorkflowsWorkflowIdDispatchesPostBodyType
] = UNSET,
**kwargs,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/workflows/{workflow_id}/dispatches"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(
ReposOwnerRepoActionsWorkflowsWorkflowIdDispatchesPostBody, json
)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
def enable_workflow(
self,
owner: str,
repo: str,
workflow_id: Union[int, str],
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/workflows/{workflow_id}/enable"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"PUT",
url,
headers=exclude_unset(headers),
)
async def async_enable_workflow(
self,
owner: str,
repo: str,
workflow_id: Union[int, str],
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repos/{owner}/{repo}/actions/workflows/{workflow_id}/enable"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"PUT",
url,
headers=exclude_unset(headers),
)
def list_workflow_runs(
self,
owner: str,
repo: str,
workflow_id: Union[int, str],
actor: Missing[str] = UNSET,
branch: Missing[str] = UNSET,
event: Missing[str] = UNSET,
status: Missing[
Literal[
"completed",
"action_required",
"cancelled",
"failure",
"neutral",
"skipped",
"stale",
"success",
"timed_out",
"in_progress",
"queued",
"requested",
"waiting",
"pending",
]
] = UNSET,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
created: Missing[datetime] = UNSET,
exclude_pull_requests: Missing[bool] = False,
check_suite_id: Missing[int] = UNSET,
head_sha: Missing[str] = UNSET,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ReposOwnerRepoActionsWorkflowsWorkflowIdRunsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/workflows/{workflow_id}/runs"
params = {
"actor": actor,
"branch": branch,
"event": event,
"status": status,
"per_page": per_page,
"page": page,
"created": created,
"exclude_pull_requests": exclude_pull_requests,
"check_suite_id": check_suite_id,
"head_sha": head_sha,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ReposOwnerRepoActionsWorkflowsWorkflowIdRunsGetResponse200,
)
async def async_list_workflow_runs(
self,
owner: str,
repo: str,
workflow_id: Union[int, str],
actor: Missing[str] = UNSET,
branch: Missing[str] = UNSET,
event: Missing[str] = UNSET,
status: Missing[
Literal[
"completed",
"action_required",
"cancelled",
"failure",
"neutral",
"skipped",
"stale",
"success",
"timed_out",
"in_progress",
"queued",
"requested",
"waiting",
"pending",
]
] = UNSET,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
created: Missing[datetime] = UNSET,
exclude_pull_requests: Missing[bool] = False,
check_suite_id: Missing[int] = UNSET,
head_sha: Missing[str] = UNSET,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ReposOwnerRepoActionsWorkflowsWorkflowIdRunsGetResponse200]":
url = f"/repos/{owner}/{repo}/actions/workflows/{workflow_id}/runs"
params = {
"actor": actor,
"branch": branch,
"event": event,
"status": status,
"per_page": per_page,
"page": page,
"created": created,
"exclude_pull_requests": exclude_pull_requests,
"check_suite_id": check_suite_id,
"head_sha": head_sha,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=ReposOwnerRepoActionsWorkflowsWorkflowIdRunsGetResponse200,
)
def get_workflow_usage(
self,
owner: str,
repo: str,
workflow_id: Union[int, str],
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[WorkflowUsage]":
url = f"/repos/{owner}/{repo}/actions/workflows/{workflow_id}/timing"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=WorkflowUsage,
)
async def async_get_workflow_usage(
self,
owner: str,
repo: str,
workflow_id: Union[int, str],
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[WorkflowUsage]":
url = f"/repos/{owner}/{repo}/actions/workflows/{workflow_id}/timing"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=WorkflowUsage,
)
def list_environment_secrets(
self,
repository_id: int,
environment_name: str,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[RepositoriesRepositoryIdEnvironmentsEnvironmentNameSecretsGetResponse200]":
url = f"/repositories/{repository_id}/environments/{environment_name}/secrets"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=RepositoriesRepositoryIdEnvironmentsEnvironmentNameSecretsGetResponse200,
)
async def async_list_environment_secrets(
self,
repository_id: int,
environment_name: str,
per_page: Missing[int] = 30,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[RepositoriesRepositoryIdEnvironmentsEnvironmentNameSecretsGetResponse200]":
url = f"/repositories/{repository_id}/environments/{environment_name}/secrets"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=RepositoriesRepositoryIdEnvironmentsEnvironmentNameSecretsGetResponse200,
)
def get_environment_public_key(
self,
repository_id: int,
environment_name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsPublicKey]":
url = f"/repositories/{repository_id}/environments/{environment_name}/secrets/public-key"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsPublicKey,
)
async def async_get_environment_public_key(
self,
repository_id: int,
environment_name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsPublicKey]":
url = f"/repositories/{repository_id}/environments/{environment_name}/secrets/public-key"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsPublicKey,
)
def get_environment_secret(
self,
repository_id: int,
environment_name: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsSecret]":
url = f"/repositories/{repository_id}/environments/{environment_name}/secrets/{secret_name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsSecret,
)
async def async_get_environment_secret(
self,
repository_id: int,
environment_name: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsSecret]":
url = f"/repositories/{repository_id}/environments/{environment_name}/secrets/{secret_name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsSecret,
)
@overload
def create_or_update_environment_secret(
self,
repository_id: int,
environment_name: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: RepositoriesRepositoryIdEnvironmentsEnvironmentNameSecretsSecretNamePutBodyType,
) -> "Response[EmptyObject]":
...
@overload
def create_or_update_environment_secret(
self,
repository_id: int,
environment_name: str,
secret_name: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
encrypted_value: str,
key_id: str,
) -> "Response[EmptyObject]":
...
def create_or_update_environment_secret(
self,
repository_id: int,
environment_name: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
RepositoriesRepositoryIdEnvironmentsEnvironmentNameSecretsSecretNamePutBodyType
] = UNSET,
**kwargs,
) -> "Response[EmptyObject]":
url = f"/repositories/{repository_id}/environments/{environment_name}/secrets/{secret_name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(
RepositoriesRepositoryIdEnvironmentsEnvironmentNameSecretsSecretNamePutBody,
json,
)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=EmptyObject,
)
@overload
async def async_create_or_update_environment_secret(
self,
repository_id: int,
environment_name: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: RepositoriesRepositoryIdEnvironmentsEnvironmentNameSecretsSecretNamePutBodyType,
) -> "Response[EmptyObject]":
...
@overload
async def async_create_or_update_environment_secret(
self,
repository_id: int,
environment_name: str,
secret_name: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
encrypted_value: str,
key_id: str,
) -> "Response[EmptyObject]":
...
async def async_create_or_update_environment_secret(
self,
repository_id: int,
environment_name: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
RepositoriesRepositoryIdEnvironmentsEnvironmentNameSecretsSecretNamePutBodyType
] = UNSET,
**kwargs,
) -> "Response[EmptyObject]":
url = f"/repositories/{repository_id}/environments/{environment_name}/secrets/{secret_name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(
RepositoriesRepositoryIdEnvironmentsEnvironmentNameSecretsSecretNamePutBody,
json,
)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"PUT",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=EmptyObject,
)
def delete_environment_secret(
self,
repository_id: int,
environment_name: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repositories/{repository_id}/environments/{environment_name}/secrets/{secret_name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"DELETE",
url,
headers=exclude_unset(headers),
)
async def async_delete_environment_secret(
self,
repository_id: int,
environment_name: str,
secret_name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repositories/{repository_id}/environments/{environment_name}/secrets/{secret_name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"DELETE",
url,
headers=exclude_unset(headers),
)
def list_environment_variables(
self,
repository_id: int,
environment_name: str,
per_page: Missing[int] = 10,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[RepositoriesRepositoryIdEnvironmentsEnvironmentNameVariablesGetResponse200]":
url = f"/repositories/{repository_id}/environments/{environment_name}/variables"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=RepositoriesRepositoryIdEnvironmentsEnvironmentNameVariablesGetResponse200,
)
async def async_list_environment_variables(
self,
repository_id: int,
environment_name: str,
per_page: Missing[int] = 10,
page: Missing[int] = 1,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[RepositoriesRepositoryIdEnvironmentsEnvironmentNameVariablesGetResponse200]":
url = f"/repositories/{repository_id}/environments/{environment_name}/variables"
params = {
"per_page": per_page,
"page": page,
}
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
params=exclude_unset(params),
headers=exclude_unset(headers),
response_model=RepositoriesRepositoryIdEnvironmentsEnvironmentNameVariablesGetResponse200,
)
@overload
def create_environment_variable(
self,
repository_id: int,
environment_name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: RepositoriesRepositoryIdEnvironmentsEnvironmentNameVariablesPostBodyType,
) -> "Response[EmptyObject]":
...
@overload
def create_environment_variable(
self,
repository_id: int,
environment_name: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
name: str,
value: str,
) -> "Response[EmptyObject]":
...
def create_environment_variable(
self,
repository_id: int,
environment_name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
RepositoriesRepositoryIdEnvironmentsEnvironmentNameVariablesPostBodyType
] = UNSET,
**kwargs,
) -> "Response[EmptyObject]":
url = f"/repositories/{repository_id}/environments/{environment_name}/variables"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(
RepositoriesRepositoryIdEnvironmentsEnvironmentNameVariablesPostBody, json
)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=EmptyObject,
)
@overload
async def async_create_environment_variable(
self,
repository_id: int,
environment_name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: RepositoriesRepositoryIdEnvironmentsEnvironmentNameVariablesPostBodyType,
) -> "Response[EmptyObject]":
...
@overload
async def async_create_environment_variable(
self,
repository_id: int,
environment_name: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
name: str,
value: str,
) -> "Response[EmptyObject]":
...
async def async_create_environment_variable(
self,
repository_id: int,
environment_name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
RepositoriesRepositoryIdEnvironmentsEnvironmentNameVariablesPostBodyType
] = UNSET,
**kwargs,
) -> "Response[EmptyObject]":
url = f"/repositories/{repository_id}/environments/{environment_name}/variables"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(
RepositoriesRepositoryIdEnvironmentsEnvironmentNameVariablesPostBody, json
)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"POST",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
response_model=EmptyObject,
)
def get_environment_variable(
self,
repository_id: int,
environment_name: str,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsVariable]":
url = f"/repositories/{repository_id}/environments/{environment_name}/variables/{name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsVariable,
)
async def async_get_environment_variable(
self,
repository_id: int,
environment_name: str,
name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response[ActionsVariable]":
url = f"/repositories/{repository_id}/environments/{environment_name}/variables/{name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"GET",
url,
headers=exclude_unset(headers),
response_model=ActionsVariable,
)
def delete_environment_variable(
self,
repository_id: int,
name: str,
environment_name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repositories/{repository_id}/environments/{environment_name}/variables/{name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return self._github.request(
"DELETE",
url,
headers=exclude_unset(headers),
)
async def async_delete_environment_variable(
self,
repository_id: int,
name: str,
environment_name: str,
*,
headers: Optional[Dict[str, str]] = None,
) -> "Response":
url = f"/repositories/{repository_id}/environments/{environment_name}/variables/{name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
return await self._github.arequest(
"DELETE",
url,
headers=exclude_unset(headers),
)
@overload
def update_environment_variable(
self,
repository_id: int,
name: str,
environment_name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: RepositoriesRepositoryIdEnvironmentsEnvironmentNameVariablesNamePatchBodyType,
) -> "Response":
...
@overload
def update_environment_variable(
self,
repository_id: int,
name: str,
environment_name: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
value: Missing[str] = UNSET,
) -> "Response":
...
def update_environment_variable(
self,
repository_id: int,
name: str,
environment_name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
RepositoriesRepositoryIdEnvironmentsEnvironmentNameVariablesNamePatchBodyType
] = UNSET,
**kwargs,
) -> "Response":
url = f"/repositories/{repository_id}/environments/{environment_name}/variables/{name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(
RepositoriesRepositoryIdEnvironmentsEnvironmentNameVariablesNamePatchBody,
json,
)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return self._github.request(
"PATCH",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
)
@overload
async def async_update_environment_variable(
self,
repository_id: int,
name: str,
environment_name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: RepositoriesRepositoryIdEnvironmentsEnvironmentNameVariablesNamePatchBodyType,
) -> "Response":
...
@overload
async def async_update_environment_variable(
self,
repository_id: int,
name: str,
environment_name: str,
*,
data: Literal[UNSET] = UNSET,
headers: Optional[Dict[str, str]] = None,
value: Missing[str] = UNSET,
) -> "Response":
...
async def async_update_environment_variable(
self,
repository_id: int,
name: str,
environment_name: str,
*,
headers: Optional[Dict[str, str]] = None,
data: Missing[
RepositoriesRepositoryIdEnvironmentsEnvironmentNameVariablesNamePatchBodyType
] = UNSET,
**kwargs,
) -> "Response":
url = f"/repositories/{repository_id}/environments/{environment_name}/variables/{name}"
headers = {"X-GitHub-Api-Version": self._REST_API_VERSION, **(headers or {})}
if not kwargs:
kwargs = UNSET
json = kwargs if data is UNSET else data
json = parse_obj_as(
RepositoriesRepositoryIdEnvironmentsEnvironmentNameVariablesNamePatchBody,
json,
)
json = json.dict(by_alias=True) if isinstance(json, BaseModel) else json
return await self._github.arequest(
"PATCH",
url,
json=exclude_unset(json),
headers=exclude_unset(headers),
) | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/bower_components/prism/plugins/keep-markup/prism-keep-markup.js | (function () {
if (typeof self === 'undefined' || !self.Prism || !self.document || !document.createRange) {
return;
}
Prism.plugins.KeepMarkup = true;
Prism.hooks.add('before-highlight', function (env) {
if (!env.element.children.length) {
return;
}
var pos = 0;
var data = [];
var f = function (elt, baseNode) {
var o = {};
if (!baseNode) {
// Clone the original tag to keep all attributes
o.clone = elt.cloneNode(false);
o.posOpen = pos;
data.push(o);
}
for (var i = 0, l = elt.childNodes.length; i < l; i++) {
var child = elt.childNodes[i];
if (child.nodeType === 1) { // element
f(child);
} else if(child.nodeType === 3) { // text
pos += child.data.length;
}
}
if (!baseNode) {
o.posClose = pos;
}
};
f(env.element, true);
if (data && data.length) {
// data is an array of all existing tags
env.keepMarkup = data;
}
});
Prism.hooks.add('after-highlight', function (env) {
if(env.keepMarkup && env.keepMarkup.length) {
var walk = function (elt, nodeState) {
for (var i = 0, l = elt.childNodes.length; i < l; i++) {
var child = elt.childNodes[i];
if (child.nodeType === 1) { // element
if (!walk(child, nodeState)) {
return false;
}
} else if (child.nodeType === 3) { // text
if(!nodeState.nodeStart && nodeState.pos + child.data.length > nodeState.node.posOpen) {
// We found the start position
nodeState.nodeStart = child;
nodeState.nodeStartPos = nodeState.node.posOpen - nodeState.pos;
}
if(nodeState.nodeStart && nodeState.pos + child.data.length >= nodeState.node.posClose) {
// We found the end position
nodeState.nodeEnd = child;
nodeState.nodeEndPos = nodeState.node.posClose - nodeState.pos;
}
nodeState.pos += child.data.length;
}
if (nodeState.nodeStart && nodeState.nodeEnd) {
// Select the range and wrap it with the clone
var range = document.createRange();
range.setStart(nodeState.nodeStart, nodeState.nodeStartPos);
range.setEnd(nodeState.nodeEnd, nodeState.nodeEndPos);
nodeState.node.clone.appendChild(range.extractContents());
range.insertNode(nodeState.node.clone);
range.detach();
// Process is over
return false;
}
}
return true;
};
// For each tag, we walk the DOM to reinsert it
env.keepMarkup.forEach(function (node) {
walk(env.element, {
node: node,
pos: 0
});
});
// Store new highlightedCode for later hooks calls
env.highlightedCode = env.element.innerHTML;
}
});
}()); | PypiClean |
/Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/demo/cache_proxy.py | import amara
from amara.thirdparty import httplib2
import akara
from akara.services import simple_service
from akara import response
from akara import logger
from akara.util import normalize_http_header_name
import calendar
import email
import email.Utils
import time
MAXLEN = akara.module_config().get('maxlen')
if None in MAXLEN:
DEFAULT_MAXLEN = MAXLEN[None]
del MAXLEN[None]
else:
DEFAULT_MAXLEN = 3600
OVERRIDE_STALE = akara.module_config().get('override_stale',0)
CACHE_PROXY_SERVICE_ID = 'http://purl.org/xml3k/akara/services/demo/cache-proxy'
MAXAGE_HEADER = lambda age: ('Cache-Control','max-age={0}'.format(age))
#FIXME: recycle after N uses
H = httplib2.Http()
def get_max_age(url):
for k in MAXLEN:
#XXX url normalize?
if url.startswith(k):
return MAXLEN[k]
break
else:
return DEFAULT_MAXLEN
def is_fresh(resp):
"""
Returns a tuple, the first element a boolean whether the response can be
considered (for our purposes) fresh or not, and the second the freshness
lifetime of the response.
Much of this is reworked from httplib2._entry_disposition. We can't reuse it
directly since it assumes responses are stale unless otherwise marked as
fresh, and we want to do the opposite.
"""
fresh = True
freshness_lifetime = 0
cc_response = httplib2._parse_cache_control(resp)
if 'no-cache' in cc_response or 'private' in cc_response:
fresh = False
elif 'date' in resp:
date = calendar.timegm(email.Utils.parsedate_tz(resp['date']))
now = time.time()
current_age = max(0, now - date - 5) # Give us 5 seconds to get this far
if 'max-age' in cc_response:
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif 'expires' in resp:
expires = email.Utils.parsedate_tz(resp['expires'])
if expires == None:
freshness_lifetime = 0
else:
freshness_lifetime = calendar.timegm(expires) - date
else:
freshness_lifetime = 0
if freshness_lifetime < current_age:
logger.debug('lifetime = {0}, age = {1}, so marking explicitly stale'.format(freshness_lifetime,current_age))
fresh = False
return fresh, freshness_lifetime
@simple_service('GET', CACHE_PROXY_SERVICE_ID, 'akara.cache-proxy')
def akara_cache_proxy(url=None):
'''
Sample request:
curl -I "http://localhost:8880/akara.cache-proxy?url=http://poemtree.com/poems/UsefulAdvice.htm"
'''
logger.debug('remote URL {0}: '.format(repr(url)))
if not url:
raise ValueError('url query parameter required')
resp, content = H.request(url)
if OVERRIDE_STALE:
response.add_header(*MAXAGE_HEADER(get_max_age(url)))
else:
(fresh, lifetime) = is_fresh(resp)
if fresh:
response.add_header(*MAXAGE_HEADER( max(get_max_age(url),lifetime) ))
else:
response.add_header(*MAXAGE_HEADER(0))
logger.debug('remote response headers {0}: '.format(repr(resp)))
#Oof. What about 'transfer-encoding' and other such headers
for k in resp:
if k not in ('server','status', 'transfer-encoding', 'content-length','cache-control','expires','date'):
response.add_header(normalize_http_header_name(k), resp[k])
#response.add_header(k, resp[k])
#FIXME: This might distort return encoding, which would of course throw off content length & encoding. Workaround for now is removal of e.g. transfer-encoding (above)
return content | PypiClean |
/Flask-Espresso-0.2.0.tar.gz/Flask-Espresso-0.2.0/flask_espresso/espresso.py | from __future__ import absolute_import
from __future__ import unicode_literals
import flask_espresso.coffeescript
import slimit
import execjs
import flask
import zlib
class Espresso(object):
"""
Central controller class that can be used to configure how Flask-Espresso
behaves. Each application that wants to use Flask-Espresso has to create,
or run :meth:`init_app` on, an instance of this class after the
configuration whas initialized.
There are two usage modes which work very similar. One is binding the
instance to a very Flask application::
app = Flask(__name__)
e = Espresso(app)
The other possibility is to create the object once and configure the
application later to support it::
e = Espresso(app)
def create_app():
app = Flask(__name__)
e.init_app(app)
return app
:param app: A Flask application.
:param compiler: An alternate Coffeescript compiler to use.
"""
cache = {} # A class level dict acting as a cache.
def __init__(self, app=None, compiler=None):
self.app = app
self._compiler = compiler
if app is not None:
self.init_app(app)
def init_app(self, app):
"""
Set up this instance for use with ``app``, if no app was passed to the
constructor.
:param app: A Flask application.
"""
self.app = app
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['espresso'] = self
app.config.setdefault('ESPRESSO_DEFAULT_COMPILER', self._compiler)
# hot patching the spidermonkey hard-coded encoding.
app.config.setdefault('ESPRESSO_SPIDERMONKEY_ENCODING', 'utf8')
execjs._runtimes['SpiderMonkey']._encoding = app.config['ESPRESSO_SPIDERMONKEY_ENCODING']
def clear_cache(self):
"""
"""
Espresso.cache.clear()
def compute_key(value):
"""
Computes a key for a ``value``.
"""
# The CRC32 checksum is used because of the low security risk. If you
# intend to compile CS from the outside world or a large number of
# files, you should consider patching this method to use a stronger
# hashing algorithm.
return zlib.crc32(bytes(value.encode('utf-8')))
def from_cache(key, minified):
"""
"""
return Espresso.cache.get((key, minified), None)
def to_cache(key, minified, value):
"""
"""
Espresso.cache[key, minified] = value
def espresso(cs, force=False, cache=True, minify=False):
"""
Returns a real response object that is an instance of
"""
cs = flask.render_template(cs)
key = compute_key(cs)
resp = None
if not force:
resp = from_cache(key, minify)
if resp is None:
resp = flask_espresso.coffeescript.compile_cs(cs)
if minify:
resp = slimit.minify(resp, mangle=True, mangle_toplevel=True)
if cache: # the caching only happen if the
to_cache(key, minify, resp) # file is compiled.
return flask.Response(resp, mimetype='application/javascript') | PypiClean |
/MegEngine-1.13.1-cp37-cp37m-macosx_10_14_x86_64.whl/megengine/xla/rules/nn.py | from functools import partial
from typing import Sequence, Union
import numpy as np
from ...core._imperative_rt import ops as mops
from .. import ir_utils
from ..lib.mlir import ir
from ..lib.mlir.dialects import hlo
from .elemwise import exp
from .hlotensor import HLOTensor
from .indexing import index_with_slices
from .reduction import _get_max_identity, _get_sum_identity
from .tensor import fill, pad, reshape
from .utils import register_lower_rule
@register_lower_rule(mops.Convolution)
def convolution_lower(ctx, *args: Union[HLOTensor, Sequence[HLOTensor]]):
assert isinstance(ctx.op, mops.Convolution)
assert len(args) == 2, "convolution requires 2 arguments"
assert len(ctx.vars_in) == 2, "convolution requires 2 input variables"
assert len(ctx.vars_out) == 1, "convolution requires 1 output variable"
opr = ctx.op
inp, weight = args[0], args[1]
if opr.format == mops.AdaptivePooling.Format.NCHW:
inp_spec, weight_spec, out_spec = (0, 1, 2, 3), (0, 1, 2, 3), (0, 1, 2, 3)
dnums = hlo.ConvDimensionNumbers.get(
input_batch_dimension=inp_spec[0],
input_feature_dimension=inp_spec[1],
input_spatial_dimensions=list(inp_spec[2:]),
kernel_output_feature_dimension=weight_spec[0],
kernel_input_feature_dimension=weight_spec[1],
kernel_spatial_dimensions=list(weight_spec[2:]),
output_batch_dimension=out_spec[0],
output_feature_dimension=out_spec[1],
output_spatial_dimensions=list(out_spec[2:]),
)
ic = inp.shape[1] # NCHW
oc = weight.shape[0] # OIHW or O11HW for dwconv
else:
assert False, "only nchw supported"
num_spatial_dims = len(weight_spec) - 2
window_reversal = ir_utils.dense_bool_elements([False] * num_spatial_dims)
if opr.sparse == mops.BatchConvBias.Sparse.DENSE:
feature_group_count, batch_group_count = 1, 1
else:
assert len(weight.shape) == 5, "mge dpconv weight dim is 5"
feature_group_count, batch_group_count = weight.shape[0], 1
if opr.format == mops.AdaptivePooling.Format.NCHW:
xla_weight_shape = xla_weight_shape = [
weight.shape[0] * weight.shape[1],
weight.shape[2],
weight.shape[3],
weight.shape[4],
]
weight = reshape(weight, xla_weight_shape)
feature_group_count = ir_utils.i64_attr(feature_group_count)
batch_group_count = ir_utils.i64_attr(batch_group_count)
window_strides = (opr.stride_h, opr.stride_w)
window_strides = ir_utils.dense_int_elements(window_strides)
padding = ((opr.pad_h, opr.pad_h), (opr.pad_w, opr.pad_w))
padding = ir_utils.dense_int_elements(padding)
assert opr.dilate_h == 1 and opr.dilate_w == 1, "dilate_conv is not support now"
inp_dilation = (opr.dilate_h, opr.dilate_w)
weight_dilation = (opr.dilate_h, opr.dilate_w)
inp_dilation = ir_utils.dense_int_elements(inp_dilation)
weight_dilation = ir_utils.dense_int_elements(weight_dilation)
window_reversal = ir_utils.dense_bool_elements([False] * num_spatial_dims)
precision = ir_utils.precision_attr(inp.dtype, weight.dtype)
return HLOTensor(
hlo.ConvolutionOp(
ir_utils.mge_varinfo_to_ir_type(ctx.vars_out[0]),
inp.tensor,
weight.tensor,
dimension_numbers=dnums,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count,
window_strides=window_strides,
padding=padding,
lhs_dilation=inp_dilation,
rhs_dilation=weight_dilation,
window_reversal=window_reversal,
precision_config=precision,
).result,
ctx.vars_out[0].shape,
ctx.vars_out[0].dtype,
)
def _dilate_shape(shape, dilation):
"""Utility function for computing the shape resulting from a dilation."""
if not np.all(np.greater(dilation, 0)):
msg = "All dilations must be positive, got {}."
raise TypeError(msg.format(dilation))
dilation = (1,) * (len(shape) - len(dilation)) + tuple(dilation)
def dilate_dim(d, dilation):
return 0 if d == 0 else 1 + dilation * (d - 1)
return tuple(map(dilate_dim, shape, dilation))
def _conv_general_vjp_lhs_padding(
in_shape,
window_dimensions,
window_strides,
out_shape,
padding,
lhs_dilation,
rhs_dilation,
):
lhs_dilated_shape = _dilate_shape(in_shape, lhs_dilation)
rhs_dilated_shape = _dilate_shape(window_dimensions, rhs_dilation)
out_dilated_shape = _dilate_shape(out_shape, window_strides)
pad_before = np.subtract(rhs_dilated_shape, [lo for lo, _ in padding]) - 1
pad_after = (
np.add(lhs_dilated_shape, rhs_dilated_shape)
- 1
- out_dilated_shape
- pad_before
)
return list(zip(pad_before, pad_after))
def _conv_general_vjp_rhs_padding(
in_shape,
window_dimensions,
window_strides,
out_shape,
padding,
lhs_dilation,
rhs_dilation,
):
def diff_shape(s1, s2):
return tuple(map(lambda a, b: a - b, s1, s2))
if len(in_shape) == 0: # 0D conv
return []
lhs_dilated_shape = _dilate_shape(in_shape, lhs_dilation)
rhs_dilated_shape = _dilate_shape(window_dimensions, rhs_dilation)
out_dilated_shape = _dilate_shape(out_shape, window_strides)
pads_lo = tuple(map(lambda p: p[0], padding))
pads_from_lhs = diff_shape(out_dilated_shape, lhs_dilated_shape)
pads_from_rhs = diff_shape(
diff_shape(rhs_dilated_shape, pads_lo), (1,) * len(pads_lo)
)
pads_hi = tuple(map(lambda *s: sum(s), pads_from_lhs, pads_from_rhs))
return list(zip(pads_lo, pads_hi))
@register_lower_rule("ConvolutionBackwardDataV2", mops.ConvolutionBackwardData)
def conv_backward_data_lower(ctx, *args: Union[HLOTensor, Sequence[HLOTensor]]):
assert (
ctx.param["dilate_h"] == 1 and ctx.param["dilate_w"] == 1
), "dilate_conv is not support now"
if len(args) == 3:
weight, dout, inp = args[0], args[1], args[2]
else:
weight, dout, inp = args[0], args[1], None
if ctx.param["format"] == mops.AdaptivePooling.Format.NCHW:
dnums = ((0, 1, 2, 3), (0, 1, 2, 3), (0, 1, 2, 3))
inp_spec, weight_spec, out_spec = dnums
inp_hw, weight_hw, out_hw = map(lambda s: s[2:], dnums)
inp_dilation = (1, 1)
weight_dilation = (ctx.param["dilate_h"], ctx.param["dilate_w"])
window_strides = (ctx.param["stride_h"], ctx.param["stride_w"])
ph, pw = ctx.param["pad_h"], ctx.param["pad_w"]
padding = ((ph, ph), (pw, pw))
weight_shape = weight.shape
inp_shape = inp.shape if inp else ctx.vars_out[0].shape
ic = inp_shape[1] # NCHW
oc = weight.shape[0] # OIHW or O11HW for dwconv
t_weight_spec = (weight_spec[1], weight_spec[0]) + weight_spec[2:]
dnums = hlo.ConvDimensionNumbers.get(
input_batch_dimension=out_spec[0],
input_feature_dimension=out_spec[1],
input_spatial_dimensions=list(out_spec[2:]),
kernel_output_feature_dimension=t_weight_spec[0],
kernel_input_feature_dimension=t_weight_spec[1],
kernel_spatial_dimensions=list(t_weight_spec[2:]),
output_batch_dimension=inp_spec[0],
output_feature_dimension=inp_spec[1],
output_spatial_dimensions=list(inp_spec[2:]),
)
if ctx.param["sparse"] == mops.BatchConvBias.Sparse.DENSE:
feature_group_count, batch_group_count = 1, 1
else:
weight_shape = weight.shape
assert len(weight_shape) == 5, "mge dpconv weight dim is 5"
feature_group_count, batch_group_count = weight.shape[0], 1
weight_shape = [
weight.shape[1],
weight.shape[0] * weight.shape[2],
weight.shape[3],
weight.shape[4],
]
weight = weight.transpose((1, 0, 2, 3, 4))
weight = weight.reshape(weight_shape)
weight_shape = [
weight_shape[1],
weight_shape[0],
weight_shape[2],
weight_shape[3],
]
padding = _conv_general_vjp_lhs_padding(
np.take(inp_shape, inp_hw),
np.take(weight_shape, weight_hw),
window_strides,
np.take(dout.shape, out_hw),
padding,
inp_dilation,
weight_dilation,
)
rev_filter = HLOTensor(
hlo.ReverseOp(weight.tensor, ir_utils.dense_int_elements(weight_hw)).result
)
window_reversal = ir_utils.dense_bool_elements([False] * (len(weight_spec) - 2))
precision = ir_utils.precision_attr(rev_filter.dtype, dout.dtype)
return HLOTensor(
hlo.ConvolutionOp(
ir_utils.mge_varinfo_to_ir_type(ctx.vars_out[0]),
dout.tensor,
rev_filter.tensor,
dimension_numbers=dnums,
feature_group_count=ir_utils.i64_attr(feature_group_count),
batch_group_count=ir_utils.i64_attr(batch_group_count),
window_strides=ir_utils.dense_int_elements(inp_dilation),
padding=ir_utils.dense_int_elements(padding),
lhs_dilation=ir_utils.dense_int_elements(window_strides),
rhs_dilation=ir_utils.dense_int_elements(weight_dilation),
window_reversal=window_reversal,
precision_config=precision,
).result
)
else:
assert False, "only nchw supported"
@register_lower_rule("ConvolutionBackwardFilterV2")
def conv_backward_filter_lower(ctx, *args: Union[HLOTensor, Sequence[HLOTensor]]):
assert (
ctx.param["dilate_h"] == 1 and ctx.param["dilate_w"] == 1
), "dilate_conv is not support now"
assert len(args) == 3 and len(ctx.vars_out) == 1 and len(ctx.vars_in) == 3
inp, dout, weight = args[0], args[1], args[2]
if ctx.param["format"] == mops.AdaptivePooling.Format.NCHW:
dnums = ((0, 1, 2, 3), (0, 1, 2, 3), (0, 1, 2, 3))
_, weight_spec, _ = dnums
inp_hw, weight_hw, out_hw = map(lambda s: s[2:], dnums)
inp_trans, weight_trans, out_trans = map(lambda s: (s[1], s[0]) + s[2:], dnums)
inp_dilation = (1, 1)
weight_dilation = (ctx.param["dilate_h"], ctx.param["dilate_w"])
window_strides = (ctx.param["stride_h"], ctx.param["stride_w"])
ph, pw = ctx.param["pad_h"], ctx.param["pad_w"]
padding = ((ph, ph), (pw, pw))
weight_shape = weight.shape
inp_shape = inp.shape
ic = inp.shape[1] # NCHW
oc = weight.shape[0] # OIHW or O11HW for dwconv
if ctx.param["sparse"] == mops.BatchConvBias.Sparse.DENSE:
feature_group_count, batch_group_count = 1, 1
else:
weight_shape = weight.shape
assert len(weight_shape) == 5, "mge dpconv weight dim is 5"
feature_group_count, batch_group_count = weight.shape[0], 1
weight_shape = [
weight_shape[2],
weight_shape[0] * weight_shape[1],
weight_shape[3],
weight_shape[4],
]
if batch_group_count > 1:
feature_group_count = batch_group_count
batch_group_count = 1
elif feature_group_count > 1:
batch_group_count = feature_group_count
feature_group_count = 1
padding = _conv_general_vjp_rhs_padding(
np.take(inp_shape, inp_hw),
np.take(weight_shape, weight_hw),
window_strides,
np.take(dout.shape, out_hw),
padding,
inp_dilation,
weight_dilation,
)
dnums = hlo.ConvDimensionNumbers.get(
input_batch_dimension=inp_trans[0],
input_feature_dimension=inp_trans[1],
input_spatial_dimensions=list(inp_trans[2:]),
kernel_output_feature_dimension=out_trans[0],
kernel_input_feature_dimension=out_trans[1],
kernel_spatial_dimensions=list(out_trans[2:]),
output_batch_dimension=weight_trans[0],
output_feature_dimension=weight_trans[1],
output_spatial_dimensions=list(weight_trans[2:]),
)
if batch_group_count > 1:
oup = ir.RankedTensorType.get(
[weight_shape[1], weight_shape[0]] + weight_shape[2:],
ir_utils.mge_dtype_to_ir_type(ctx.vars_out[0].dtype),
)
else:
oup = ir_utils.mge_varinfo_to_ir_type(ctx.vars_out[0])
window_reversal = ir_utils.dense_bool_elements([False] * (len(weight_spec) - 2))
precision = ir_utils.precision_attr(inp.dtype, dout.dtype)
rst = HLOTensor(
hlo.ConvolutionOp(
oup,
inp.tensor,
dout.tensor,
dimension_numbers=dnums,
feature_group_count=ir_utils.i64_attr(feature_group_count),
batch_group_count=ir_utils.i64_attr(batch_group_count),
window_strides=ir_utils.dense_int_elements(weight_dilation),
padding=ir_utils.dense_int_elements(padding),
lhs_dilation=ir_utils.dense_int_elements(inp_dilation),
rhs_dilation=ir_utils.dense_int_elements(window_strides),
window_reversal=window_reversal,
precision_config=precision,
).result
)
if batch_group_count > 1:
rst = rst.reshape(ctx.vars_out[0].shape)
return rst
else:
assert False, "only nchw supported"
def _pooling(
reducer,
unit_factory,
inp,
stride,
kernel,
padding,
base_dilation=None,
kernel_dilation=None,
oshape=None,
):
"""
if pooling on H and W,
stride: len(stride) need to be equal to len(inp.shape)
for NCHW, should be (1, 1, stride_h, stride_w)
for NHWC, should be (1, stride_h, stride_w, 1)
kernel: similar to stride, len(kernel) also need to be equal to len(inp.shape)
padding: similar
for NCHW, should be ((0, 0), (0, 0), (pad_h, pad_h), (pad_w, pad_w)) or (0, 0, pad_h, pad_w)
for NHWC, should be ((0, 0), (pad_h, pad_h), (pad_w, pad_w), (0, 0)) or (0, pad_h, pad_w, 0)
"""
ishape, idtype = inp.shape, inp.dtype
assert oshape is not None, "pooling shape infer is not supported"
assert len(ishape) == len(oshape), f"shape error: {ishape} {oshape}"
def check_param(param, info):
assert len(ishape) == len(
param
), f"pooling: illegal {info} {param} for {ishape}"
base_dilation = base_dilation if base_dilation is not None else (1, 1, 1, 1)
kernel_dilation = kernel_dilation if kernel_dilation is not None else (1, 1, 1, 1)
padding = [(p, p) if isinstance(p, int) else p for p in padding]
check_param(stride, "stride")
check_param(kernel, "kernel")
check_param(padding, "padding")
check_param(base_dilation, "base_dilation")
check_param(kernel_dilation, "kernel_dilation")
rw = hlo.ReduceWindowOp(
ir_utils.make_ir_type_according_meta_tuple(oshape, idtype),
[inp.tensor],
ir_utils.ir_constant_tuple(unit_factory(idtype)),
ir_utils.dense_int_elements(kernel),
window_strides=ir_utils.dense_int_elements(stride),
base_dilations=ir_utils.dense_int_elements(base_dilation),
window_dilations=ir_utils.dense_int_elements(kernel_dilation),
padding=ir.DenseIntElementsAttr.get(
np.asarray(padding, np.int64), shape=(len(padding), 2)
),
)
scalar_type = ir_utils.make_ir_type_according_meta(tuple(), idtype)
reducer_region = rw.regions[0].blocks.append(scalar_type, scalar_type)
with ir.InsertionPoint(reducer_region):
hlo.ReturnOp(reducer(*reducer_region.arguments))
return HLOTensor(rw.result)
maxpooling = partial(_pooling, hlo.MaxOp, _get_max_identity)
sumpooling = partial(_pooling, hlo.AddOp, _get_sum_identity)
def avgpooling(
inp,
stride,
kernel,
padding,
count_include_pad,
base_dilation=None,
kernel_dilation=None,
oshape=None,
):
sum_pool = sumpooling(
inp, stride, kernel, padding, base_dilation, kernel_dilation, oshape=oshape
)
if count_include_pad:
ret = sum_pool / float(np.prod(kernel))
else:
# for inp[a,b,c,d], kernel[1,1,2,2], oshape[a,b,e,f]
# div_ishape=[1,1,c,d], div_oshape=[1,1,e,f]
div_ishape = [i if k != 1 else 1 for (k, i) in zip(kernel, inp.shape)]
div_oshape = [o if k != 1 else 1 for (k, o) in zip(kernel, oshape)]
divider = fill(1.0, div_ishape, inp.dtype)
divider = sumpooling(divider, stride, kernel, padding, oshape=div_oshape)
ret = sum_pool / divider
return ret
def _get_adaptive_pool_param(ishape, oshape, tensor_format):
assert len(ishape) == 4 and len(oshape) == 4, "only 2-d pooling supported"
if not isinstance(tensor_format, str):
tensor_format = str(tensor_format)
ishape_hw, oshape_hw = None, None
if tensor_format in str(mops.AdaptivePooling.Format.NCHW):
ishape_hw, oshape_hw = ishape[2:4], oshape[2:4]
elif tensor_format in str(mops.AdaptivePooling.Format.NHWC):
ishape_hw, oshape_hw = ishape[1:3], oshape[1:3]
else:
assert False, f"adaptive pooling only nchw or nhwc, get {tensor_format}"
stride_hw = [(isize // osize) for isize, osize in zip(ishape_hw, oshape_hw)]
kernel_hw = [
(isize - (osize - 1) * stride)
for isize, osize, stride in zip(ishape_hw, oshape_hw, stride_hw)
]
stride, kernel = None, None
if tensor_format in str(mops.AdaptivePooling.Format.NCHW):
stride = (1, 1, *stride_hw)
kernel = (1, 1, *kernel_hw)
elif tensor_format in str(mops.AdaptivePooling.Format.NHWC):
stride = (1, *stride_hw, 1)
kernel = (1, *kernel_hw, 1)
else:
assert False, f"adaptive pooling only nchw or nhwc, get {tensor_format}"
padding = (0, 0, 0, 0)
return kernel, stride, padding
def _select_and_scatter(
inp, source, init_value, kernel, stride, padding, selector, scatter
):
oshape, odtype = inp.shape, inp.dtype
scalar_type = ir_utils.make_ir_type_according_meta(tuple(), odtype)
op = hlo.SelectAndScatterOp(
ir_utils.make_ir_type_according_meta(oshape, odtype),
inp.tensor,
source.tensor,
HLOTensor(init_value).tensor,
window_dimensions=ir_utils.dense_int_elements(kernel),
window_strides=ir_utils.dense_int_elements(stride),
padding=ir.DenseIntElementsAttr.get(
np.asarray(padding, np.int64), shape=(len(padding), 2)
),
)
select_block = op.select.blocks.append(scalar_type, scalar_type)
with ir.InsertionPoint(select_block):
blockargs = [HLOTensor(blockarg) for blockarg in select_block.arguments]
hlo.ReturnOp([selector(*blockargs).tensor])
scatter_block = op.scatter.blocks.append(scalar_type, scalar_type)
with ir.InsertionPoint(scatter_block):
blockargs = [HLOTensor(blockarg) for blockarg in scatter_block.arguments]
hlo.ReturnOp([scatter(*blockargs).tensor])
return HLOTensor(op.result)
def maxpooling_grad(
x,
dy,
kernel,
stride,
padding,
base_dilation=None,
kernel_dilation=None,
expand_padding=True,
):
assert base_dilation is None and kernel_dilation is None
assert expand_padding == True
padding = [(p, p) if isinstance(p, int) else p for p in padding]
dxdtype, dxshape = x.dtype, x.shape
assert dxdtype == "float32" or dxdtype == "float16"
org_padding, new_padding = padding, padding
if expand_padding:
pads = [(lo, hi, 0) for (lo, hi) in padding]
padded_x = pad(x, _get_max_identity(dxdtype), pads)
new_padding = [(0, 0) for _ in padding]
selector = lambda x, y: x >= y
scatter = lambda x, y: x + y
out = _select_and_scatter(
padded_x, dy, 0.0, kernel, stride, new_padding, selector, scatter
)
if expand_padding:
start_indices = [lo for (lo, hi) in org_padding]
stop_indices = [lo + d for ((lo, hi), d) in zip(org_padding, dxshape)]
slices = [
slice(start, stop, 1) for start, stop in zip(start_indices, stop_indices)
]
out = index_with_slices(out, slices)
return out
def avgpooling_grad(
x,
dy,
kernel,
stride,
padding,
base_dilation=None,
kernel_dilation=None,
count_include_pad=True,
):
padding = [(p, p) if isinstance(p, int) else p for p in padding]
base_dilation = base_dilation if base_dilation is not None else (1, 1, 1, 1)
kernel_dilation = kernel_dilation if kernel_dilation is not None else (1, 1, 1, 1)
if count_include_pad:
dy = dy / float(np.prod(kernel))
else:
div_ishape = [i if k != 1 else 1 for (k, i) in zip(kernel, x.shape)]
div_oshape = [o if k != 1 else 1 for (k, o) in zip(kernel, dy.shape)]
divider = fill(1.0, div_ishape, dy.dtype)
divider = sumpooling(divider, stride, kernel, padding, oshape=div_oshape)
dy = dy / divider
pads = _conv_general_vjp_lhs_padding(
x.shape, kernel, stride, dy.shape, padding, base_dilation, kernel_dilation
)
padding_dy_config = [(lo, hi, st - 1) for (lo, hi), st in zip(pads, stride)]
padded_dy = pad(dy, _get_sum_identity(dy.dtype), padding_dy_config)
ret = sumpooling(
padded_dy,
stride=base_dilation,
kernel=kernel,
padding=[(0, 0)] * len(x.shape),
base_dilation=(1, 1, 1, 1),
kernel_dilation=kernel_dilation,
oshape=x.shape,
)
return ret
@register_lower_rule(mops.AdaptivePooling)
def adaptive_pooling_lower(ctx, *args: Union[HLOTensor, Sequence[HLOTensor]]):
assert len(ctx.vars_in) == 2 and len(args) == 2 and len(ctx.vars_out) == 1
assert ctx.op.shape == ctx.vars_in[1].bound_data.tolist() and len(ctx.op.shape) == 2
ishape, oshape = ctx.vars_in[0].shape, ctx.vars_out[0].shape
kernel, stride, padding = _get_adaptive_pool_param(ishape, oshape, ctx.op.format)
if ctx.op.mode == mops.AdaptivePooling.Mode.AVERAGE:
return avgpooling(
args[0], stride, kernel, padding, count_include_pad=True, oshape=oshape
)
elif ctx.op.mode == mops.AdaptivePooling.Mode.AVERAGE_COUNT_EXCLUDE_PADDING:
return avgpooling(
args[0], stride, kernel, padding, count_include_pad=False, oshape=oshape
)
else:
assert (
ctx.op.mode == mops.AdaptivePooling.Mode.MAX
), f"unknown adaptive pooling mode {ctx.op.mode}"
return maxpooling(args[0], stride, kernel, padding, oshape=oshape)
@register_lower_rule("AdaptivePoolingBackwardV1")
def adaptive_pooling_grad_lower(ctx, *args: Union[HLOTensor, Sequence[HLOTensor]]):
# for forward: y = adaptive_pool(x, tshape)
# for backward: dx = adaptive_pool_grad(x, tshape, y, dy)
assert len(args) == 4 and len(ctx.vars_in) == 4 and len(ctx.vars_out) == 1
var_x, _, var_y, _ = ctx.vars_in
x, dy = args[0], args[3]
tensor_format, pool_mode = ctx.param["format"], ctx.param["mode"]
kernel, stride, padding = _get_adaptive_pool_param(
var_x.shape, var_y.shape, tensor_format
)
if pool_mode in str(mops.AdaptivePooling.Mode.AVERAGE):
return avgpooling_grad(x, dy, kernel, stride, padding, count_include_pad=True)
elif pool_mode in str(mops.AdaptivePooling.Mode.AVERAGE_COUNT_EXCLUDE_PADDING):
return avgpooling_grad(x, dy, kernel, stride, padding, count_include_pad=False)
else:
assert pool_mode in str(
mops.AdaptivePooling.Mode.MAX
), f"unknown adaptive pooling mode {pool_mode}"
return maxpooling_grad(x, dy, kernel, stride, padding)
def _get_pool_param(kernel_hw, stride_hw, padding_hw, tensor_format):
assert len(kernel_hw) == 2 and len(stride_hw) == 2 and len(padding_hw) == 2
# for backward, the tensor format is str
if not isinstance(tensor_format, str):
tensor_format = str(tensor_format)
stride, kernel, padding = None, None, None
if tensor_format in str(mops.AdaptivePooling.Format.NCHW):
stride = (1, 1, *stride_hw)
kernel = (1, 1, *kernel_hw)
padding = (0, 0, *padding_hw)
elif tensor_format in str(mops.AdaptivePooling.Format.NHWC):
stride = (1, *stride_hw, 1)
kernel = (1, *kernel_hw, 1)
padding = (0, *padding_hw, 0)
else:
assert False, f"adaptive pooling only nchw or nhwc, get {tensor_format}"
return kernel, stride, padding
@register_lower_rule(mops.Pooling)
def pooling_lower(ctx, *args: Union[HLOTensor, Sequence[HLOTensor]]):
assert len(args) == 1, f"pooling should have only 1 input, but give {len(args)}"
assert len(ctx.vars_in) == 1 and len(ctx.vars_out) == 1
assert (
args[0].ndim == 4
), f"pooling only support 4d tensor, but give {args[0].shape}"
opr = ctx.op
kernel, stride, padding = _get_pool_param(
(opr.window_h, opr.window_w),
(opr.stride_h, opr.stride_w),
(opr.pad_h, opr.pad_w),
opr.format,
)
oshape, _ = ctx.vars_out[0].shape, ctx.vars_out[0].dtype
if opr.mode == mops.AdaptivePooling.Mode.AVERAGE:
return avgpooling(
args[0], stride, kernel, padding, count_include_pad=True, oshape=oshape
)
elif opr.mode == mops.AdaptivePooling.Mode.AVERAGE_COUNT_EXCLUDE_PADDING:
return avgpooling(
args[0], stride, kernel, padding, count_include_pad=False, oshape=oshape
)
else:
assert (
opr.mode == mops.AdaptivePooling.Mode.MAX
), f"unknown adaptive pooling mode {opr.mode}"
return maxpooling(args[0], stride, kernel, padding, oshape=oshape)
@register_lower_rule("PoolingBackwardV1")
def pooling_backward_lower(ctx, *args: Union[HLOTensor, Sequence[HLOTensor]]):
# for forward: y = pool(x)
# for backward: dx = pool_grad(x, y, dy)
assert len(args) == 3 and len(ctx.vars_in) == 3 and len(ctx.vars_out) == 1
tensor_format, pool_mode = ctx.param["format"], ctx.param["mode"]
kernel, stride, padding = _get_pool_param(
(ctx.param["window_h"], ctx.param["window_w"]),
(ctx.param["stride_h"], ctx.param["stride_w"]),
(ctx.param["pad_h"], ctx.param["pad_w"]),
tensor_format,
)
x, dy = args[0], args[2]
if pool_mode in str(mops.AdaptivePooling.Mode.AVERAGE):
return avgpooling_grad(x, dy, kernel, stride, padding, count_include_pad=True)
elif pool_mode in str(mops.AdaptivePooling.Mode.AVERAGE_COUNT_EXCLUDE_PADDING):
return avgpooling_grad(x, dy, kernel, stride, padding, count_include_pad=False)
else:
assert pool_mode in str(
mops.AdaptivePooling.Mode.MAX
), f"unknown adaptive pooling mode {pool_mode}"
return maxpooling_grad(x, dy, kernel, stride, padding)
def softmax(x: HLOTensor, axis: int = -1):
assert isinstance(axis, int), f"axis should be int, but get {axis}({type(axis)})"
x_exp = exp(x)
x_exp_sum = x_exp.sum(axis=axis, keepdims=True)
y = x_exp / x_exp_sum
return y
def softmax_grad(y: HLOTensor, dy: HLOTensor, axis: int = -1):
assert isinstance(axis, int), f"axis should be int, but get {axis}({type(axis)})"
ydy = y * dy
ydy_sum = ydy.sum(axis=axis, keepdims=True)
dx = ydy - y * ydy_sum
return dx
@register_lower_rule(mops.Softmax)
def softmax_lower(ctx, *args: Union[HLOTensor, Sequence[HLOTensor]]):
assert (
len(args) == 1 and len(ctx.vars_in) == 1 and len(ctx.vars_out) == 1
), f"{len(args)}, {len(ctx.vars_in)}, {len(ctx.vars_out)}"
return softmax(args[0], ctx.op.axis)
@register_lower_rule("SoftmaxBackward")
def softmax_backward_lower(ctx, *args: Union[HLOTensor, Sequence[HLOTensor]]):
assert (
len(args) == 2 and len(ctx.vars_in) == 2 and len(ctx.vars_out) == 1
), f"{len(args)}, {len(ctx.vars_in)}, {len(ctx.vars_out)}"
return softmax_grad(args[0], args[1], ctx.param["axis"]) | PypiClean |
/FFGo-1.12.7-py3-none-any.whl/ffgo/gui/configwindow.py | import os
import platform
import sys
import tkinter as tk
import tkinter.filedialog as fd
from tkinter.messagebox import showinfo, showerror
from tkinter import ttk
from .. import misc
from .tooltip import ToolTip
from ..constants import *
def setupTranslationHelper(config):
global pgettext
translationHelper = misc.TranslationHelper(config)
pgettext = translationHelper.pgettext
class ValidatingWidget:
"""Class holding widget-metadata to ease input validation.
This class allows a number of widgets to have their input validated
using the same code, with error reporting when the input is invalid.
"""
def __init__(self, widget, paneWidget, validateFunc, invalidFunc):
for attr in ("widget", "paneWidget", "validateFunc", "invalidFunc"):
setattr(self, attr, locals()[attr])
class ConfigWindow:
def __init__(self, master, config, text):
self.master = master
self.config = config
self.text = text
setupTranslationHelper(config)
# List of ValidatingWidget instances for “standard” input validation.
self.validatingWidgets = []
self.apt_data_source = tk.StringVar()
self.auto_update_apt = tk.StringVar()
self.FG_bin = tk.StringVar()
self.FG_root = tk.StringVar()
self.FG_scenery = tk.StringVar()
self.FG_aircraft = tk.StringVar()
self.FG_download_dir = tk.StringVar()
self.FG_working_dir = tk.StringVar()
self.MagneticField_bin = tk.StringVar()
self.language = tk.StringVar()
self.baseFontSize = tk.StringVar()
self.rememberMainWinPos = tk.IntVar()
self.autoscrollFGOutput = tk.IntVar()
self.fakeParkposOption = tk.IntVar()
if self.config.apt_data_source.get():
self.apt_data_source.set(_('Scenery'))
else:
self.apt_data_source.set(_('Old default'))
if self.config.auto_update_apt.get():
self.auto_update_apt.set(_('Automatic'))
else:
self.auto_update_apt.set(_('Manual'))
self.FG_bin.set(self.config.FG_bin.get())
self.FG_root.set(self.config.FG_root.get())
self.FG_scenery.set(self.config.FG_scenery.get())
self.FG_aircraft.set(self.config.FG_aircraft.get())
self.FG_download_dir.set(self.config.FG_download_dir.get())
self.FG_working_dir.set(self.config.FG_working_dir.get())
self.MagneticField_bin.set(self.config.MagneticField_bin.get())
if self.config.language.get():
self.language.set(self.config.language.get())
else:
self.language.set('-')
self.baseFontSize.set(self.config.baseFontSize.get())
self.rememberMainWinPos.set(self.config.saveWindowPosition.get())
self.autoscrollFGOutput.set(self.config.autoscrollFGOutput.get())
self.fakeParkposOption.set(self.config.fakeParkposOption.get())
for name in ("aircraftStatsShowPeriod", "aircraftStatsExpiryPeriod",
"airportStatsShowPeriod", "airportStatsExpiryPeriod"):
setattr(self, name, tk.IntVar())
tkVar = getattr(self, name)
tkVar.set(getattr(self.config, name).get())
self.reset_flag = False
self.initToolTipMessages()
# -----------------------------------------------------------------------------
self.top = tk.Toplevel(self.master)
self.top.grid_rowconfigure(0, weight=100)
self.top.grid_columnconfigure(0, weight=100)
self.top.grab_set() # Focus input on that window.
self.top.title(_('Preferences'))
self.top.transient(self.master)
self.main = ttk.Frame(self.top, padding=("12p", "12p", "12p", 0))
self.main.grid(row=0, column=0, sticky="nsew")
self.top.grid_rowconfigure(0, weight=100)
self.top.grid_columnconfigure(0, weight=100)
self.noteBook = ttk.Notebook(self.main)
self.noteBook.grid(row=0, column=0, sticky="nsew")
self.main.grid_rowconfigure(0, weight=100)
self.main.grid_columnconfigure(0, weight=100)
# Padding inside each pane of the notebook
self.paddingInsideNotebookPanes = "12p"
self.frameFG = self.widgetFG(self.noteBook)
self.noteBook.add(self.frameFG, text=_('FlightGear settings'))
self.frameStats = self.widgetStats(self.noteBook)
self.noteBook.add(self.frameStats, text=_('Statistics'))
self.frameMisc = self.widgetMisc(self.noteBook)
self.noteBook.add(self.frameMisc, text=_('Miscellaneous'))
# ----- Buttons ---------------------------------------------------------------
self.frame_Buttons = ttk.Frame(self.main, padding=(0, "16p", 0, "16p"))
self.frame_Buttons.grid(row=1, column=0, sticky="nse")
self.main.grid_rowconfigure(1, weight=100)
saveButton = ttk.Button(self.frame_Buttons, text=_('Save settings'),
command=self.saveAndQuit, padding="4p")
saveButton.grid(row=0, column=0)
self.frame_Buttons.grid_rowconfigure(0, weight=100)
self.frame_Buttons.grid_columnconfigure(0, pad="18p")
closeButton = ttk.Button(self.frame_Buttons, text=_('Cancel'),
command=self.quit, padding="4p")
closeButton.grid(row=0, column=1)
self.top.protocol("WM_DELETE_WINDOW", closeButton.invoke)
self.top.bind('<Escape>', lambda event, b=closeButton: b.invoke())
def findFG_bin(self):
self.chooseExecutable(self.FG_bin)
def findMagneticField_bin(self):
self.chooseExecutable(self.MagneticField_bin)
def chooseExecutable(self, cfgVar):
try:
p = fd.askopenfilename(parent=self.top,
initialdir=self.getInitialDir(cfgVar.get()),
title=_('Path to executable file:'))
if p:
cfgVar.set(p)
except tk.TclError:
pass
def findFG_root(self):
try:
p = fd.askdirectory(parent=self.top,
initialdir=self.getInitialDir(
self.FG_root.get()),
title='FG_ROOT:')
if p:
self.FG_root.set(p)
except tk.TclError:
return
def findFG_scenery(self):
try:
p = fd.askdirectory(parent=self.top,
initialdir=self.getInitialDir(
self.FG_scenery.get()),
title='FG_SCENERY:')
if p:
self.FG_scenery.set(p)
except tk.TclError:
return
def findFG_aircraft(self):
try:
p = fd.askdirectory(parent=self.top,
initialdir=self.getInitialDir(
self.FG_aircraft.get()),
title=_('Additional aircraft path:'))
if p:
self.FG_aircraft.set(p)
except tk.TclError:
return
def findFgDownloadDir(self):
try:
p = fd.askdirectory(parent=self.top,
initialdir=self.getInitialDir(
self.FG_download_dir.get()),
title=_('Download directory (optional):'))
if p:
self.FG_download_dir.set(p)
except tk.TclError:
return
def findFgWorkingDir(self):
try:
p = fd.askdirectory(parent=self.top,
initialdir=self.getInitialDir(
self.FG_working_dir.get()),
title=_('Working directory (optional):'))
if p:
self.FG_working_dir.set(p)
except tk.TclError:
return
def getInitialDir(self, path):
if os.path.isdir(path):
return path
elif os.path.isfile(path) or os.path.islink(path):
return os.path.split(path)[0]
elif os.path.isdir(HOME_DIR):
return HOME_DIR
elif platform.system() == "Windows":
if os.path.isdir("C:\\"):
return "C:\\"
else:
return os.getenv("USERPROFILE", os.getcwd())
elif os.path.isdir("/"):
return "/"
else:
return os.getcwd()
def getLanguages(self):
"""Walk through a locale directory and return list of
supported languages based on directory names."""
res = []
for d in misc.resourcelistDir("data/locale"):
if misc.resourceIsDir("data/locale/" + d):
res.append(d)
res.sort()
res = ['-'] + res
return res
def initToolTipMessages(self):
self.tooltip_bin = _("""\
Name or path to the FlightGear executable ('{fgfs}'), or to
'run_fgfs.sh' in case you are using the 'download_and_compile.sh' script.
Note: this corresponds to FG_BIN in the configuration file.""").format(
fgfs=FG_EXECUTABLE)
self.tooltip_root = _("""\
Path to FlightGear's main data directory, containing the “base package”.
On Linux, this directory is likely to be something like
/usr/share/games/flightgear if FlightGear was installed using your
distribution package manager. This will be passed to '{fgfs}' (the
FlightGear executable) as the value for the --fg-root option. You may
consult <http://wiki.flightgear.org/$FG_ROOT> for details.""").format(
fgfs=FG_EXECUTABLE)
self.tooltip_scenery = _("""\
Path(s) to scenery directories.
You can specify more than one path (separated by {separator!r}), ordered
from highest to lowest priority. You may want to include your TerraSync
directory (if any) in this list in order to specify its priority
relatively to any custom scenery directories you may have installed.
This setting will be passed to '{fgfs}' (the FlightGear executable) as
the value for the --fg-scenery option. It is documented at
<http://wiki.flightgear.org/$FG_SCENERY>.
Note:
The default TerraSync directory in FlightGear 2016.1.1 is:
- $FG_HOME/TerraSync on non-Windows systems;
- something such as
C:\\Users\\<username>\\Documents\\FlightGear\\TerraSync
on Windows.
You may consult <http://wiki.flightgear.org/TerraSync> and
<http://wiki.flightgear.org/$FG_HOME> for more information.""").format(
prg=PROGNAME, separator=os.pathsep, fgfs=FG_EXECUTABLE)
self.tooltip_aircraft = _("""\
Path(s) to additional aircraft directories.
Multiple directories separated by {separator!r} may be specified.
The $FG_ROOT/{defaultAircraftDir} directory is always used; thus, there
is no need to list it here. Leave this field empty unless you are using
additional aircraft directories.""").format(
separator=os.pathsep, defaultAircraftDir=DEFAULT_AIRCRAFT_DIR)
self.tooltip_download_dir = _("""\
Optional parameter specifying FlightGear's download directory.
FlightGear uses this directory to store things it automatically
downloads, such as TerraSync scenery and aircraft (the latter: when
using FlightGear's built-in launcher).
Leave this field empty if you want to use FlightGear's default download
directory.""")
self.tooltip_working_dir = _("""\
Optional parameter specifying FlightGear's working directory.
That is the directory FlightGear will be run from. It can affect the
default location of some files created by FlightGear (screenshots...).
If left blank, the working directory is the user's home directory.""")
self.tooltip_langMenu = _("""\
Language used in {prg}. If no language is selected, {prg} will use the
system language.""").format(prg=PROGNAME)
self.tooltip_aptMenu = _("""\
Select the primary data source where {prg} will be looking for
information about parking positions. There are two options:
Scenery - Parking data will be read from
$FG_SCENERY/Airports/[I]/[C]/[A]/[ICAO].groundnet.xml. {prg} will
use the first match if FG_SCENERY contains several paths.
Old default - Parking data will be read from
$FG_ROOT/AI/Airports/[ICAO]/parking.xml.
(for example, for the EDDF airport, [ICAO] should be replaced with
EDDF, [I] with E, [C] with D, [A] with D and [O] with F:
[I]/[C]/[A]/[ICAO].groundnet.xml → E/D/D/EDDF.groundnet.xml)
Note:
In both cases, if no parking position is found, {prg} will look into
the apt.dat files present inside scenery paths under NavData/apt.
With FlightGear 2.6 and later, it is advised to choose "Scenery"; it
is now the default in {prg}.
For more information, you may consult:
http://wiki.flightgear.org/About_Scenery/Airports""").format(prg=PROGNAME)
self.tooltip_autoAptMenu = _("""\
Automatic - {prg} will try to keep track of changes to the apt.dat
files, and will automatically rebuild its own airport database
({aptDigest}) when this happens.
Manual - The “Rebuild Airport Database” button must be used every time
the ordered list of apt.dat files is changed, or any of these
files.""").format(prg=PROGNAME, aptDigest=APT)
self.tooltip_rebuildApt = _("""\
Rebuild the airport database from the apt.dat files present inside
scenery paths under NavData/apt. This must be done every time the
ordered list of these files is changed, or any of their contents. If you
have left “Airport database update” to its default setting of Automatic,
you don't have to worry about that: the rebuild will be done
automatically every time it is needed.""")
self.tooltip_fontSize = _("""\
Set the base font size in the range from {0} to {1}. Zero is a special
value corresponding to a platform-dependent default size.""").format(
MIN_BASE_FONT_SIZE, MAX_BASE_FONT_SIZE)
self.tooltip_MagneticFieldBin = _("""\
Name or path to GeographicLib's MagneticField executable. If left
blank, '{MagneticField}' will be searched in your PATH.""").format(
MagneticField=misc.executableFileName("MagneticField"))
self.tooltip_rememberMainWinPos = _("""\
When saving the configuration, don't store the main window size only,
but also its position (i.e., the offsets from the screen borders).
When this option is unchecked, only the main window size is stored.""")
self.tooltip_autoscrollFGOutput = _(
"Automatically scroll the FlightGear Output Window to the end "
"every time new text is received from FlightGear's stdout or "
"stderr stream.")
self.tooltip_fakeParkposOption = _(
"Translate the --parkpos option into a sequence of --lat, --lon "
"and --heading options. This is useful when --parkpos is broken "
"in FlightGear; otherwise, it is probably better to leave this "
"option disabled.")
def quit(self):
"""Quit without saving."""
# Destroying more widgets would probably be better for memory...
for w in (self.top, self.main, self.noteBook, self.frameFG,
self.frameMisc):
w.destroy()
def resetBaseFontSize(self):
self.baseFontSize.set(int(float(DEFAULT_BASE_FONT_SIZE)))
def saveAndQuit(self):
if not self.validateStandardWidgets():
return
if self.apt_data_source.get() == _('Scenery'):
self.config.apt_data_source.set(1)
else:
self.config.apt_data_source.set(0)
if self.auto_update_apt.get() == _('Automatic'):
self.config.auto_update_apt.set(1)
else:
self.config.auto_update_apt.set(0)
self.config.FG_bin.set(self.FG_bin.get())
self.config.FG_root.set(self.FG_root.get())
self.config.FG_scenery.set(self.FG_scenery.get())
self.config.FG_aircraft.set(self.FG_aircraft.get())
self.config.FG_download_dir.set(self.FG_download_dir.get())
self.config.FG_working_dir.set(self.FG_working_dir.get())
self.config.MagneticField_bin.set(self.MagneticField_bin.get())
if self.language.get() == '-':
self.config.language.set('')
else:
self.config.language.set(self.language.get())
self.saveBaseFontSize()
self.config.saveWindowPosition.set(self.rememberMainWinPos.get())
self.config.autoscrollFGOutput.set(self.autoscrollFGOutput.get())
self.config.fakeParkposOption.set(self.fakeParkposOption.get())
for name in ("aircraftStatsShowPeriod", "aircraftStatsExpiryPeriod",
"airportStatsShowPeriod", "airportStatsExpiryPeriod"):
tkConfigVar = getattr(self.config, name)
tkConfigVar.set(getattr(self, name).get())
self.config.write(text=self.text)
self.reset_flag = True
self.quit()
def saveBaseFontSize(self):
value = self.validateBaseFontSize()
if int(self.config.baseFontSize.get()) != int(value):
message = _('Some changes may need a restart to be effective')
detail = _("It may be necessary to restart {prg} in order to "
"see the full effects of changing the font size.") \
.format(prg=PROGNAME)
showinfo(_('{prg}').format(prg=PROGNAME), message, detail=detail,
parent=self.top)
self.config.baseFontSize.set(value)
self.config.setupFonts() # Apply the change
def validateBaseFontSize(self):
v = self.getBaseFontSize()
min_size = int(float(MIN_BASE_FONT_SIZE))
max_size = int(float(MAX_BASE_FONT_SIZE))
if v != 0 and v < min_size:
size = min_size
elif v > max_size:
size = max_size
else:
size = v
return str(size)
def getBaseFontSize(self):
try:
v = int(float(self.baseFontSize.get()))
except ValueError:
v = int(float(DEFAULT_BASE_FONT_SIZE))
return v
def widgetFG(self, parent):
"""FlightGear settings widget."""
frame_FG = ttk.Frame(parent, padding=self.paddingInsideNotebookPanes)
frame_FG.grid_columnconfigure(0, weight=100)
def addSettingsLine(rowNum, isLast, container, labelText, tkVar,
tooltipText, buttonCallback):
verticalSpaceBetweenRows = "12p"
label = ttk.Label(container, text=labelText)
label.grid(row=3*rowNum, column=0, columnspan=2, sticky="w")
container.grid_rowconfigure(3*rowNum, weight=100)
ToolTip(label, tooltipText)
entry = ttk.Entry(container, width=50, textvariable=tkVar)
entry.grid(row=3*rowNum+1, column=0, sticky="ew")
container.grid_rowconfigure(3*rowNum+1, weight=100)
ToolTip(entry, tooltipText)
button = ttk.Button(container, text=_('Find'),
command=buttonCallback)
button.grid(row=3*rowNum+1, column=1, padx="12p")
if not isLast:
spacer = ttk.Frame(container)
spacer.grid(row=3*rowNum+2, column=0, sticky="nsew")
container.grid_rowconfigure(
3*rowNum+2, minsize=verticalSpaceBetweenRows, weight=100)
t = ((_("FlightGear executable:"),
self.FG_bin, self.tooltip_bin, self.findFG_bin),
('FG_ROOT:', self.FG_root,
self.tooltip_root, self.findFG_root),
('FG_SCENERY:', self.FG_scenery,
self.tooltip_scenery, self.findFG_scenery),
(_('Additional aircraft path(s):'), self.FG_aircraft,
self.tooltip_aircraft, self.findFG_aircraft),
(_('Download directory (optional):'), self.FG_download_dir,
self.tooltip_download_dir, self.findFgDownloadDir),
(_('Working directory (optional):'), self.FG_working_dir,
self.tooltip_working_dir, self.findFgWorkingDir))
lt = len(t)
for i, (labelText, tkVar, tooltipText, buttonCallback) in enumerate(t):
addSettingsLine(i, i == lt-1, frame_FG, labelText, tkVar,
tooltipText, buttonCallback)
return frame_FG
def widgetStats(self, parent):
"""Widget used for the “Statistics” pane of the Notebook."""
outerFrame = ttk.Frame(parent, padding=self.paddingInsideNotebookPanes)
outerFrame.grid_columnconfigure(0, weight=0) # default: non-stretchable
outerFrame.grid_columnconfigure(1, weight=100)
# Common width for all 4 Spinbox instances that are going to be created
spinboxWd = 6
nonNegativeIntValidateCmd = self.master.register(
self._nonNegativeIntValidateFunc)
statsPeriodInvalidCmd = self.master.register(
self._statsPeriodInvalidFunc)
def createLine(rowNum, isLast, container, tkVar, labelText,
tooltipText):
verticalSpaceBetweenRows = "12p"
label = ttk.Label(container, text=labelText)
label.grid(row=2*rowNum, column=0, sticky="w")
container.grid_rowconfigure(2*rowNum, weight=0) # not stretchable
ToolTip(label, tooltipText, autowrap=True)
spinbox = tk.Spinbox(
container, from_=0, to=sys.maxsize, increment=1,
repeatinterval=20, textvariable=tkVar,
width=spinboxWd, validate="focusout",
validatecommand=(nonNegativeIntValidateCmd, "%P"),
invalidcommand=(statsPeriodInvalidCmd, "%W", "%P"))
# Used to run the validation code manually in some cases, such as
# when the user clicks on the “Save” button (otherwise, the
# validate command isn't called).
self.validatingWidgets.append(
# 'container': pane of self.noteBook that must be selected to
# allow the user to see the widget with invalid contents
ValidatingWidget(spinbox, container,
self._nonNegativeIntValidateFunc,
self._statsPeriodInvalidFunc))
spinbox.grid(row=2*rowNum, column=1, sticky="w")
ToolTip(spinbox, tooltipText, autowrap=True)
if not isLast: # insert a non-stretchable spacer
spacer = ttk.Frame(container)
spacer.grid(row=2*rowNum+1, column=0, sticky="nsew")
container.grid_rowconfigure(
2*rowNum+1, minsize=verticalSpaceBetweenRows, weight=0)
t = ((self.aircraftStatsShowPeriod,
_("Aircraft statistics show period: "),
_("The “use count” for each aircraft is the number of days "
"it was used during the last n days, where n is the number "
"entered here.")),
(self.aircraftStatsExpiryPeriod,
_("Aircraft statistics expiry period: "),
_("{prg} automatically forgets about dates you used a "
"given aircraft when they get older than this number of "
"days.").format(prg=PROGNAME)),
(self.airportStatsShowPeriod,
_("Airports statistics show period: "),
_("The “visit count” for each airport is the number of days "
"it was visited during the last n days, where n is the "
"number entered here.")),
(self.airportStatsExpiryPeriod,
_("Airports statistics expiry period: "),
_("{prg} automatically forgets about dates you visited a "
"given airport when they get older than this number of "
"days.").format(prg=PROGNAME)))
lt = len(t)
for i, (tkVar, labelText, tooltipText) in enumerate(t):
createLine(i, i == lt-1, outerFrame, tkVar, labelText, tooltipText)
return outerFrame
def _nonNegativeIntValidateFunc(self, text):
"""Validate a string that should contain a non-negative integer."""
try:
n = int(text)
except ValueError:
return False
return (n >= 0)
def _statsPeriodInvalidFunc(self, widgetPath, text):
"""
Callback function used when an invalid number of days has been input."""
widget = self.master.nametowidget(widgetPath)
# Get the Tkinter “window name” of the current pane
currentPaneWPath = self.noteBook.select()
# If the validation failure was triggered by the user switching to
# another pane, get back to the pane where there is invalid input.
if (self.master.nametowidget(currentPaneWPath) is not self.frameStats):
self.noteBook.select(self.frameStats)
message = _('Invalid number of days')
detail = _("A non-negative integer is required.")
showerror(_('{prg}').format(prg=PROGNAME), message, detail=detail,
parent=self.top)
widget.focus_set() # give focus to the widget with invalid input
def widgetMisc(self, parent):
"""Miscellaneous settings widget."""
outerFrame = ttk.Frame(parent, padding=self.paddingInsideNotebookPanes)
verticalSpaceBetweenRows = "6p"
horizSpaceBetweenLabelAndControl = "6p"
horizSeparationForUnrelatedThings = "15p"
def addHorizSpacer(container, rowNum, colNum,
minWidth=horizSpaceBetweenLabelAndControl,
weight=0):
"""Add a horizontal spacer."""
hSpacer = ttk.Frame(container)
hSpacer.grid(row=rowNum, column=colNum, sticky="ew")
container.grid_columnconfigure(colNum, minsize=minWidth,
weight=weight)
def addVertSpacer(container, rowNum, colNum=0,
minHeight=verticalSpaceBetweenRows, weight=100):
"""Add a vertical spacer."""
spacer = ttk.Frame(container)
spacer.grid(row=rowNum, column=colNum, sticky="ns")
container.grid_rowconfigure(
rowNum, minsize=minHeight, weight=weight)
# Logical row number in OuterFrame. For each “logical” row, there are
# two “physical” rows in OuterFrame's grid, one with widgets followed
# by one only containing a spacer frame (except there is no spacer
# after the last row).
rowNum = 0
frame1 = ttk.Frame(outerFrame)
frame1.grid(row=2*rowNum, column=0, sticky="ew")
outerFrame.grid_rowconfigure(2*rowNum, weight=0) # non-stretchable
outerFrame.grid_columnconfigure(0, weight=100) # stretchable
# Language menu
rowNum += 1
frame1Left = ttk.Frame(frame1)
frame1Left.grid(row=0, column=0, sticky="ew")
frame1.grid_columnconfigure(0, weight=100) # stretchable
langLabel = ttk.Label(frame1Left, text=_('Language:'))
ToolTip(langLabel, self.tooltip_langMenu)
langLabel.grid(row=0, column=0, sticky="w")
addHorizSpacer(frame1Left, 0, 1)
languages = self.getLanguages()
langMenu = ttk.OptionMenu(frame1Left, self.language,
self.language.get(), *languages)
ToolTip(langMenu, self.tooltip_langMenu)
langMenu.grid(row=0, column=2, sticky="w")
frame1Left.grid_columnconfigure(2, weight=100)
# Make sure there is a space between what is defined above and what is
# defined below, even if some translated string is very long.
addHorizSpacer(frame1Left, 0, 3,
minWidth=horizSeparationForUnrelatedThings, weight=100)
# Font size
frame1Right = ttk.Frame(frame1)
frame1Right.grid(row=0, column=1, sticky="e")
fontsizeLabel = ttk.Label(frame1Right, text=_('Font size:'))
ToolTip(fontsizeLabel, self.tooltip_fontSize)
fontsizeLabel.grid(row=0, column=0, sticky="w")
addHorizSpacer(frame1Right, 0, 1)
backupBaseFontSize = str(self.baseFontSize.get())
v = ((0,) + tuple(range(int(float(MIN_BASE_FONT_SIZE)),
int(float(MAX_BASE_FONT_SIZE)) + 1)))
fontsizeSpinbox = tk.Spinbox(frame1Right, values=v,
textvariable=self.baseFontSize,
width=4, justify='right')
# Workaround for a bug (or undocumented feature) of the Spinbox widget
# that overrides a textvariable value at its initialization if
# a values option is used. Tested in Python 2.7.3
self.baseFontSize.set(backupBaseFontSize)
ToolTip(fontsizeSpinbox, self.tooltip_fontSize)
fontsizeSpinbox.grid(row=0, column=2, sticky="w")
fontsizeResetButton = ttk.Button(
frame1Right, text=pgettext('font size', 'Default'),
command=self.resetBaseFontSize)
ToolTip(fontsizeResetButton, self.tooltip_fontSize)
fontsizeResetButton.grid(row=0, column=3, sticky="w", padx="12p")
addVertSpacer(outerFrame, 2*rowNum+1)
# Apt source menu
rowNum += 1
frame2 = ttk.Frame(outerFrame)
frame2.grid(row=2*rowNum, column=0, sticky="w")
aptLabel = ttk.Label(frame2, text=_('Airport data source:'))
ToolTip(aptLabel, self.tooltip_aptMenu)
aptLabel.grid(row=0, column=0, sticky="w")
addHorizSpacer(frame2, 0, 1)
aptMenu = ttk.OptionMenu(frame2, self.apt_data_source,
self.apt_data_source.get(),
_('Scenery'), _('Old default'))
ToolTip(aptMenu, self.tooltip_aptMenu)
aptMenu.grid(row=0, column=2, sticky="w")
addVertSpacer(outerFrame, 2*rowNum+1)
# “Airport database update” menu and “Rebuild airport database” button
rowNum += 1
frame3 = ttk.Frame(outerFrame)
frame3.grid(row=2*rowNum, column=0, sticky="ew")
autoAptLabel = ttk.Label(frame3,
text=_('Airport database update:') + " ")
ToolTip(autoAptLabel, self.tooltip_autoAptMenu)
autoAptLabel.grid(row=0, column=0, sticky="w")
addHorizSpacer(frame3, 0, 1)
autoAptMenu = ttk.OptionMenu(frame3, self.auto_update_apt,
self.auto_update_apt.get(),
_('Automatic'), _('Manual'))
ToolTip(autoAptMenu, self.tooltip_autoAptMenu)
autoAptMenu.grid(row=0, column=2, sticky="w")
frame3.grid_columnconfigure(2, weight=100) # stretchable
addHorizSpacer(frame3, 0, 3,
minWidth=horizSeparationForUnrelatedThings, weight=100)
rebuildAptDbButton = ttk.Button(frame3,
text=_('Rebuild airport database'),
command=self.config.makeAptDigest,
padding="6p")
ToolTip(rebuildAptDbButton, self.tooltip_rebuildApt)
rebuildAptDbButton.grid(row=0, column=4, sticky="e")
addVertSpacer(outerFrame, 2*rowNum+1)
# MagneticField executable
rowNum += 1
frame_MagField = ttk.Frame(outerFrame)
frame_MagField.grid(row=2*rowNum, column=0, sticky="ew")
magneticFieldBinLabel = ttk.Label(frame_MagField,
text=_("GeographicLib's MagneticField executable:"))
ToolTip(magneticFieldBinLabel, self.tooltip_MagneticFieldBin)
magneticFieldBinLabel.grid(row=0, column=0, sticky="w")
frame_MagFieldInner = ttk.Frame(frame_MagField)
frame_MagFieldInner.grid(row=1, column=0, sticky="ew")
magneticFieldBinEntry = ttk.Entry(frame_MagFieldInner, width=50,
textvariable=self.MagneticField_bin)
ToolTip(magneticFieldBinEntry, self.tooltip_MagneticFieldBin)
magneticFieldBinEntry.grid(row=0, column=0, sticky="ew")
magneticFieldBinFind = ttk.Button(frame_MagFieldInner, text=_('Find'),
command=self.findMagneticField_bin)
magneticFieldBinFind.grid(row=0, column=1, sticky="w", padx="12p")
addVertSpacer(outerFrame, 2*rowNum+1)
# “Remember main windows position” checkbox
rowNum += 1
frame_checkboxes = ttk.Frame(outerFrame)
frame_checkboxes.grid(row=2*rowNum, column=0, sticky="ew")
rememberMainWinPosCb = ttk.Checkbutton(
frame_checkboxes,
text=_('Remember the main window position'),
variable=self.rememberMainWinPos)
ToolTip(rememberMainWinPosCb, self.tooltip_rememberMainWinPos)
rememberMainWinPosCb.grid(row=0, column=0, sticky="w")
# “Automatically scroll the Output Window” checkbox
rowNum += 1
autoscrollFGOutputCb = ttk.Checkbutton(
frame_checkboxes,
text=_('Automatically scroll the Output Window'),
variable=self.autoscrollFGOutput)
ToolTip(autoscrollFGOutputCb, self.tooltip_autoscrollFGOutput,
autowrap=True)
autoscrollFGOutputCb.grid(row=1, column=0, sticky="w")
# “Fake the --parkpos option” checkbox
rowNum += 1
fakeParkposOptionCb = ttk.Checkbutton(
frame_checkboxes,
text=_('Fake the --parkpos option'),
variable=self.fakeParkposOption)
ToolTip(fakeParkposOptionCb, self.tooltip_fakeParkposOption,
autowrap=True)
fakeParkposOptionCb.grid(row=2, column=0, sticky="w")
return outerFrame
def validateStandardWidgets(self):
# Validate the contents of some widgets in case one of them still
# has the focus.
for validating in self.validatingWidgets:
val = validating.widget.get()
if not validating.validateFunc(val):
self.noteBook.select(validating.paneWidget)
validating.widget.focus_set()
validating.invalidFunc(str(validating.widget), val)
return False
return True | PypiClean |
/GxSphinx-1.0.0.tar.gz/GxSphinx-1.0.0/sphinx/domains/c.py | import re
from typing import (
Any, Callable, Dict, Generator, Iterator, List, Type, Tuple, Union
)
from typing import cast
from docutils import nodes
from docutils.nodes import Element, Node, TextElement, system_message
from sphinx import addnodes
from sphinx.addnodes import pending_xref
from sphinx.application import Sphinx
from sphinx.builders import Builder
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, ObjType
from sphinx.environment import BuildEnvironment
from sphinx.locale import _, __
from sphinx.roles import SphinxRole, XRefRole
from sphinx.util import logging
from sphinx.util.cfamily import (
NoOldIdError, ASTBaseBase, verify_description_mode, StringifyTransform,
BaseParser, DefinitionError, UnsupportedMultiCharacterCharLiteral,
identifier_re, anon_identifier_re, integer_literal_re, octal_literal_re,
hex_literal_re, binary_literal_re, integers_literal_suffix_re,
float_literal_re, float_literal_suffix_re,
char_literal_re
)
from sphinx.util.docfields import Field, TypedField
from sphinx.util.docutils import SphinxDirective
from sphinx.util.nodes import make_refnode
logger = logging.getLogger(__name__)
# https://en.cppreference.com/w/c/keyword
_keywords = [
'auto', 'break', 'case', 'char', 'const', 'continue', 'default', 'do', 'double',
'else', 'enum', 'extern', 'float', 'for', 'goto', 'if', 'inline', 'int', 'long',
'register', 'restrict', 'return', 'short', 'signed', 'sizeof', 'static', 'struct',
'switch', 'typedef', 'union', 'unsigned', 'void', 'volatile', 'while',
'_Alignas', 'alignas', '_Alignof', 'alignof', '_Atomic', '_Bool', 'bool',
'_Complex', 'complex', '_Generic', '_Imaginary', 'imaginary',
'_Noreturn', 'noreturn', '_Static_assert', 'static_assert',
'_Thread_local', 'thread_local',
]
# these are ordered by preceedence
_expression_bin_ops = [
['||', 'or'],
['&&', 'and'],
['|', 'bitor'],
['^', 'xor'],
['&', 'bitand'],
['==', '!=', 'not_eq'],
['<=', '>=', '<', '>'],
['<<', '>>'],
['+', '-'],
['*', '/', '%'],
['.*', '->*']
]
_expression_unary_ops = ["++", "--", "*", "&", "+", "-", "!", "not", "~", "compl"]
_expression_assignment_ops = ["=", "*=", "/=", "%=", "+=", "-=",
">>=", "<<=", "&=", "and_eq", "^=", "xor_eq", "|=", "or_eq"]
_max_id = 1
_id_prefix = [None, 'c.', 'Cv2.']
# Ids are used in lookup keys which are used across pickled files,
# so when _max_id changes, make sure to update the ENV_VERSION.
_string_re = re.compile(r"[LuU8]?('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
class _DuplicateSymbolError(Exception):
def __init__(self, symbol: "Symbol", declaration: "ASTDeclaration") -> None:
assert symbol
assert declaration
self.symbol = symbol
self.declaration = declaration
def __str__(self) -> str:
return "Internal C duplicate symbol error:\n%s" % self.symbol.dump(0)
class ASTBase(ASTBaseBase):
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
raise NotImplementedError(repr(self))
# Names
################################################################################
class ASTIdentifier(ASTBaseBase):
def __init__(self, identifier: str) -> None:
assert identifier is not None
assert len(identifier) != 0
self.identifier = identifier
def is_anon(self) -> bool:
return self.identifier[0] == '@'
# and this is where we finally make a difference between __str__ and the display string
def __str__(self) -> str:
return self.identifier
def get_display_string(self) -> str:
return "[anonymous]" if self.is_anon() else self.identifier
def describe_signature(self, signode: TextElement, mode: str, env: "BuildEnvironment",
prefix: str, symbol: "Symbol") -> None:
# note: slightly different signature of describe_signature due to the prefix
verify_description_mode(mode)
if mode == 'markType':
targetText = prefix + self.identifier
pnode = addnodes.pending_xref('', refdomain='c',
reftype='identifier',
reftarget=targetText, modname=None,
classname=None)
# key = symbol.get_lookup_key()
# pnode['c:parent_key'] = key
if self.is_anon():
pnode += nodes.strong(text="[anonymous]")
else:
pnode += nodes.Text(self.identifier)
signode += pnode
elif mode == 'lastIsName':
if self.is_anon():
signode += nodes.strong(text="[anonymous]")
else:
signode += addnodes.desc_name(self.identifier, self.identifier)
elif mode == 'noneIsName':
if self.is_anon():
signode += nodes.strong(text="[anonymous]")
else:
signode += nodes.Text(self.identifier)
else:
raise Exception('Unknown description mode: %s' % mode)
class ASTNestedName(ASTBase):
def __init__(self, names: List[ASTIdentifier], rooted: bool) -> None:
assert len(names) > 0
self.names = names
self.rooted = rooted
@property
def name(self) -> "ASTNestedName":
return self
def get_id(self, version: int) -> str:
return '.'.join(str(n) for n in self.names)
def _stringify(self, transform: StringifyTransform) -> str:
res = '.'.join(transform(n) for n in self.names)
if self.rooted:
return '.' + res
else:
return res
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
# just print the name part, with template args, not template params
if mode == 'noneIsName':
signode += nodes.Text(str(self))
elif mode == 'param':
name = str(self)
signode += nodes.emphasis(name, name)
elif mode == 'markType' or mode == 'lastIsName' or mode == 'markName':
# Each element should be a pending xref targeting the complete
# prefix.
prefix = ''
first = True
names = self.names[:-1] if mode == 'lastIsName' else self.names
# If lastIsName, then wrap all of the prefix in a desc_addname,
# else append directly to signode.
# TODO: also for C?
# NOTE: Breathe previously relied on the prefix being in the desc_addname node,
# so it can remove it in inner declarations.
dest = signode
if mode == 'lastIsName':
dest = addnodes.desc_addname()
if self.rooted:
prefix += '.'
if mode == 'lastIsName' and len(names) == 0:
signode += nodes.Text('.')
else:
dest += nodes.Text('.')
for i in range(len(names)):
ident = names[i]
if not first:
dest += nodes.Text('.')
prefix += '.'
first = False
txt_ident = str(ident)
if txt_ident != '':
ident.describe_signature(dest, 'markType', env, prefix, symbol)
prefix += txt_ident
if mode == 'lastIsName':
if len(self.names) > 1:
dest += addnodes.desc_addname('.', '.')
signode += dest
self.names[-1].describe_signature(signode, mode, env, '', symbol)
else:
raise Exception('Unknown description mode: %s' % mode)
################################################################################
# Expressions
################################################################################
class ASTExpression(ASTBase):
pass
# Primary expressions
################################################################################
class ASTLiteral(ASTExpression):
pass
class ASTBooleanLiteral(ASTLiteral):
def __init__(self, value: bool) -> None:
self.value = value
def _stringify(self, transform: StringifyTransform) -> str:
if self.value:
return 'true'
else:
return 'false'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text(str(self)))
class ASTNumberLiteral(ASTLiteral):
def __init__(self, data: str) -> None:
self.data = data
def _stringify(self, transform: StringifyTransform) -> str:
return self.data
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
txt = str(self)
signode.append(nodes.Text(txt, txt))
class ASTCharLiteral(ASTLiteral):
def __init__(self, prefix: str, data: str) -> None:
self.prefix = prefix # may be None when no prefix
self.data = data
decoded = data.encode().decode('unicode-escape')
if len(decoded) == 1:
self.value = ord(decoded)
else:
raise UnsupportedMultiCharacterCharLiteral(decoded)
def _stringify(self, transform: StringifyTransform) -> str:
if self.prefix is None:
return "'" + self.data + "'"
else:
return self.prefix + "'" + self.data + "'"
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
txt = str(self)
signode.append(nodes.Text(txt, txt))
class ASTStringLiteral(ASTLiteral):
def __init__(self, data: str) -> None:
self.data = data
def _stringify(self, transform: StringifyTransform) -> str:
return self.data
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
txt = str(self)
signode.append(nodes.Text(txt, txt))
class ASTIdExpression(ASTExpression):
def __init__(self, name: ASTNestedName):
# note: this class is basically to cast a nested name as an expression
self.name = name
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.name)
def get_id(self, version: int) -> str:
return self.name.get_id(version)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.name.describe_signature(signode, mode, env, symbol)
class ASTParenExpr(ASTExpression):
def __init__(self, expr):
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
return '(' + transform(self.expr) + ')'
def get_id(self, version: int) -> str:
return self.expr.get_id(version)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('(', '('))
self.expr.describe_signature(signode, mode, env, symbol)
signode.append(nodes.Text(')', ')'))
# Postfix expressions
################################################################################
class ASTPostfixOp(ASTBase):
pass
class ASTPostfixCallExpr(ASTPostfixOp):
def __init__(self, lst: Union["ASTParenExprList", "ASTBracedInitList"]) -> None:
self.lst = lst
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.lst)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.lst.describe_signature(signode, mode, env, symbol)
class ASTPostfixArray(ASTPostfixOp):
def __init__(self, expr: ASTExpression) -> None:
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
return '[' + transform(self.expr) + ']'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('['))
self.expr.describe_signature(signode, mode, env, symbol)
signode.append(nodes.Text(']'))
class ASTPostfixInc(ASTPostfixOp):
def _stringify(self, transform: StringifyTransform) -> str:
return '++'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('++'))
class ASTPostfixDec(ASTPostfixOp):
def _stringify(self, transform: StringifyTransform) -> str:
return '--'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('--'))
class ASTPostfixMember(ASTPostfixOp):
def __init__(self, name):
self.name = name
def _stringify(self, transform: StringifyTransform) -> str:
return '.' + transform(self.name)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('.'))
self.name.describe_signature(signode, 'noneIsName', env, symbol)
class ASTPostfixMemberOfPointer(ASTPostfixOp):
def __init__(self, name):
self.name = name
def _stringify(self, transform: StringifyTransform) -> str:
return '->' + transform(self.name)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('->'))
self.name.describe_signature(signode, 'noneIsName', env, symbol)
class ASTPostfixExpr(ASTExpression):
def __init__(self, prefix: ASTExpression, postFixes: List[ASTPostfixOp]):
self.prefix = prefix
self.postFixes = postFixes
def _stringify(self, transform: StringifyTransform) -> str:
res = [transform(self.prefix)]
for p in self.postFixes:
res.append(transform(p))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.prefix.describe_signature(signode, mode, env, symbol)
for p in self.postFixes:
p.describe_signature(signode, mode, env, symbol)
# Unary expressions
################################################################################
class ASTUnaryOpExpr(ASTExpression):
def __init__(self, op: str, expr: ASTExpression):
self.op = op
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
if self.op[0] in 'cn':
return self.op + " " + transform(self.expr)
else:
return self.op + transform(self.expr)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text(self.op))
if self.op[0] in 'cn':
signode.append(nodes.Text(" "))
self.expr.describe_signature(signode, mode, env, symbol)
class ASTSizeofType(ASTExpression):
def __init__(self, typ):
self.typ = typ
def _stringify(self, transform: StringifyTransform) -> str:
return "sizeof(" + transform(self.typ) + ")"
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('sizeof('))
self.typ.describe_signature(signode, mode, env, symbol)
signode.append(nodes.Text(')'))
class ASTSizeofExpr(ASTExpression):
def __init__(self, expr: ASTExpression):
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
return "sizeof " + transform(self.expr)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('sizeof '))
self.expr.describe_signature(signode, mode, env, symbol)
class ASTAlignofExpr(ASTExpression):
def __init__(self, typ: "ASTType"):
self.typ = typ
def _stringify(self, transform: StringifyTransform) -> str:
return "alignof(" + transform(self.typ) + ")"
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('alignof('))
self.typ.describe_signature(signode, mode, env, symbol)
signode.append(nodes.Text(')'))
# Other expressions
################################################################################
class ASTCastExpr(ASTExpression):
def __init__(self, typ: "ASTType", expr: ASTExpression):
self.typ = typ
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
res = ['(']
res.append(transform(self.typ))
res.append(')')
res.append(transform(self.expr))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('('))
self.typ.describe_signature(signode, mode, env, symbol)
signode.append(nodes.Text(')'))
self.expr.describe_signature(signode, mode, env, symbol)
class ASTBinOpExpr(ASTBase):
def __init__(self, exprs: List[ASTExpression], ops: List[str]):
assert len(exprs) > 0
assert len(exprs) == len(ops) + 1
self.exprs = exprs
self.ops = ops
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append(transform(self.exprs[0]))
for i in range(1, len(self.exprs)):
res.append(' ')
res.append(self.ops[i - 1])
res.append(' ')
res.append(transform(self.exprs[i]))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.exprs[0].describe_signature(signode, mode, env, symbol)
for i in range(1, len(self.exprs)):
signode.append(nodes.Text(' '))
signode.append(nodes.Text(self.ops[i - 1]))
signode.append(nodes.Text(' '))
self.exprs[i].describe_signature(signode, mode, env, symbol)
class ASTAssignmentExpr(ASTExpression):
def __init__(self, exprs: List[ASTExpression], ops: List[str]):
assert len(exprs) > 0
assert len(exprs) == len(ops) + 1
self.exprs = exprs
self.ops = ops
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append(transform(self.exprs[0]))
for i in range(1, len(self.exprs)):
res.append(' ')
res.append(self.ops[i - 1])
res.append(' ')
res.append(transform(self.exprs[i]))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.exprs[0].describe_signature(signode, mode, env, symbol)
for i in range(1, len(self.exprs)):
signode.append(nodes.Text(' '))
signode.append(nodes.Text(self.ops[i - 1]))
signode.append(nodes.Text(' '))
self.exprs[i].describe_signature(signode, mode, env, symbol)
class ASTFallbackExpr(ASTExpression):
def __init__(self, expr: str):
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
return self.expr
def get_id(self, version: int) -> str:
return str(self.expr)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += nodes.Text(self.expr)
################################################################################
# Types
################################################################################
class ASTTrailingTypeSpec(ASTBase):
pass
class ASTTrailingTypeSpecFundamental(ASTTrailingTypeSpec):
def __init__(self, name: str) -> None:
self.name = name
def _stringify(self, transform: StringifyTransform) -> str:
return self.name
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += nodes.Text(str(self.name))
class ASTTrailingTypeSpecName(ASTTrailingTypeSpec):
def __init__(self, prefix: str, nestedName: ASTNestedName) -> None:
self.prefix = prefix
self.nestedName = nestedName
@property
def name(self) -> ASTNestedName:
return self.nestedName
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.prefix:
res.append(self.prefix)
res.append(' ')
res.append(transform(self.nestedName))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
if self.prefix:
signode += addnodes.desc_annotation(self.prefix, self.prefix)
signode += nodes.Text(' ')
self.nestedName.describe_signature(signode, mode, env, symbol=symbol)
class ASTFunctionParameter(ASTBase):
def __init__(self, arg: "ASTTypeWithInit", ellipsis: bool = False) -> None:
self.arg = arg
self.ellipsis = ellipsis
def _stringify(self, transform: StringifyTransform) -> str:
if self.ellipsis:
return '...'
else:
return transform(self.arg)
def describe_signature(self, signode: Any, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
if self.ellipsis:
signode += nodes.Text('...')
else:
self.arg.describe_signature(signode, mode, env, symbol=symbol)
class ASTParameters(ASTBase):
def __init__(self, args: List[ASTFunctionParameter]) -> None:
self.args = args
@property
def function_params(self) -> List[ASTFunctionParameter]:
return self.args
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append('(')
first = True
for a in self.args:
if not first:
res.append(', ')
first = False
res.append(str(a))
res.append(')')
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
paramlist = addnodes.desc_parameterlist()
for arg in self.args:
param = addnodes.desc_parameter('', '', noemph=True)
if mode == 'lastIsName': # i.e., outer-function params
arg.describe_signature(param, 'param', env, symbol=symbol)
else:
arg.describe_signature(param, 'markType', env, symbol=symbol)
paramlist += param
signode += paramlist
class ASTDeclSpecsSimple(ASTBaseBase):
def __init__(self, storage: str, threadLocal: str, inline: bool,
restrict: bool, volatile: bool, const: bool, attrs: List[Any]) -> None:
self.storage = storage
self.threadLocal = threadLocal
self.inline = inline
self.restrict = restrict
self.volatile = volatile
self.const = const
self.attrs = attrs
def mergeWith(self, other: "ASTDeclSpecsSimple") -> "ASTDeclSpecsSimple":
if not other:
return self
return ASTDeclSpecsSimple(self.storage or other.storage,
self.threadLocal or other.threadLocal,
self.inline or other.inline,
self.volatile or other.volatile,
self.const or other.const,
self.restrict or other.restrict,
self.attrs + other.attrs)
def _stringify(self, transform: StringifyTransform) -> str:
res = [] # type: List[str]
res.extend(transform(attr) for attr in self.attrs)
if self.storage:
res.append(self.storage)
if self.threadLocal:
res.append(self.threadLocal)
if self.inline:
res.append('inline')
if self.restrict:
res.append('restrict')
if self.volatile:
res.append('volatile')
if self.const:
res.append('const')
return ' '.join(res)
def describe_signature(self, modifiers: List[Node]) -> None:
def _add(modifiers: List[Node], text: str) -> None:
if len(modifiers) > 0:
modifiers.append(nodes.Text(' '))
modifiers.append(addnodes.desc_annotation(text, text))
for attr in self.attrs:
if len(modifiers) > 0:
modifiers.append(nodes.Text(' '))
modifiers.append(attr.describe_signature(modifiers))
if self.storage:
_add(modifiers, self.storage)
if self.threadLocal:
_add(modifiers, self.threadLocal)
if self.inline:
_add(modifiers, 'inline')
if self.restrict:
_add(modifiers, 'restrict')
if self.volatile:
_add(modifiers, 'volatile')
if self.const:
_add(modifiers, 'const')
class ASTDeclSpecs(ASTBase):
def __init__(self, outer: str,
leftSpecs: ASTDeclSpecsSimple,
rightSpecs: ASTDeclSpecsSimple,
trailing: ASTTrailingTypeSpec) -> None:
# leftSpecs and rightSpecs are used for output
# allSpecs are used for id generation TODO: remove?
self.outer = outer
self.leftSpecs = leftSpecs
self.rightSpecs = rightSpecs
self.allSpecs = self.leftSpecs.mergeWith(self.rightSpecs)
self.trailingTypeSpec = trailing
def _stringify(self, transform: StringifyTransform) -> str:
res = [] # type: List[str]
l = transform(self.leftSpecs)
if len(l) > 0:
res.append(l)
if self.trailingTypeSpec:
if len(res) > 0:
res.append(" ")
res.append(transform(self.trailingTypeSpec))
r = str(self.rightSpecs)
if len(r) > 0:
if len(res) > 0:
res.append(" ")
res.append(r)
return "".join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
modifiers = [] # type: List[Node]
def _add(modifiers: List[Node], text: str) -> None:
if len(modifiers) > 0:
modifiers.append(nodes.Text(' '))
modifiers.append(addnodes.desc_annotation(text, text))
self.leftSpecs.describe_signature(modifiers)
for m in modifiers:
signode += m
if self.trailingTypeSpec:
if len(modifiers) > 0:
signode += nodes.Text(' ')
self.trailingTypeSpec.describe_signature(signode, mode, env,
symbol=symbol)
modifiers = []
self.rightSpecs.describe_signature(modifiers)
if len(modifiers) > 0:
signode += nodes.Text(' ')
for m in modifiers:
signode += m
# Declarator
################################################################################
class ASTArray(ASTBase):
def __init__(self, static: bool, const: bool, volatile: bool, restrict: bool,
vla: bool, size: ASTExpression):
self.static = static
self.const = const
self.volatile = volatile
self.restrict = restrict
self.vla = vla
self.size = size
if vla:
assert size is None
if size is not None:
assert not vla
def _stringify(self, transform: StringifyTransform) -> str:
el = []
if self.static:
el.append('static')
if self.restrict:
el.append('restrict')
if self.volatile:
el.append('volatile')
if self.const:
el.append('const')
if self.vla:
return '[' + ' '.join(el) + '*]'
elif self.size:
el.append(transform(self.size))
return '[' + ' '.join(el) + ']'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
signode.append(nodes.Text("["))
addSpace = False
def _add(signode: TextElement, text: str) -> bool:
if addSpace:
signode += nodes.Text(' ')
signode += addnodes.desc_annotation(text, text)
return True
if self.static:
addSpace = _add(signode, 'static')
if self.restrict:
addSpace = _add(signode, 'restrict')
if self.volatile:
addSpace = _add(signode, 'volatile')
if self.const:
addSpace = _add(signode, 'const')
if self.vla:
signode.append(nodes.Text('*'))
elif self.size:
if addSpace:
signode += nodes.Text(' ')
self.size.describe_signature(signode, mode, env, symbol)
signode.append(nodes.Text("]"))
class ASTDeclarator(ASTBase):
@property
def name(self) -> ASTNestedName:
raise NotImplementedError(repr(self))
@property
def function_params(self) -> List[ASTFunctionParameter]:
raise NotImplementedError(repr(self))
def require_space_after_declSpecs(self) -> bool:
raise NotImplementedError(repr(self))
class ASTDeclaratorNameParam(ASTDeclarator):
def __init__(self, declId: ASTNestedName,
arrayOps: List[ASTArray], param: ASTParameters) -> None:
self.declId = declId
self.arrayOps = arrayOps
self.param = param
@property
def name(self) -> ASTNestedName:
return self.declId
@property
def function_params(self) -> List[ASTFunctionParameter]:
return self.param.function_params
# ------------------------------------------------------------------------
def require_space_after_declSpecs(self) -> bool:
return self.declId is not None
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.declId:
res.append(transform(self.declId))
for op in self.arrayOps:
res.append(transform(op))
if self.param:
res.append(transform(self.param))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
if self.declId:
self.declId.describe_signature(signode, mode, env, symbol)
for op in self.arrayOps:
op.describe_signature(signode, mode, env, symbol)
if self.param:
self.param.describe_signature(signode, mode, env, symbol)
class ASTDeclaratorNameBitField(ASTDeclarator):
def __init__(self, declId: ASTNestedName, size: ASTExpression):
self.declId = declId
self.size = size
@property
def name(self) -> ASTNestedName:
return self.declId
# ------------------------------------------------------------------------
def require_space_after_declSpecs(self) -> bool:
return self.declId is not None
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.declId:
res.append(transform(self.declId))
res.append(" : ")
res.append(transform(self.size))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
if self.declId:
self.declId.describe_signature(signode, mode, env, symbol)
signode += nodes.Text(' : ', ' : ')
self.size.describe_signature(signode, mode, env, symbol)
class ASTDeclaratorPtr(ASTDeclarator):
def __init__(self, next: ASTDeclarator, restrict: bool, volatile: bool, const: bool,
attrs: Any) -> None:
assert next
self.next = next
self.restrict = restrict
self.volatile = volatile
self.const = const
self.attrs = attrs
@property
def name(self) -> ASTNestedName:
return self.next.name
@property
def function_params(self) -> List[ASTFunctionParameter]:
return self.next.function_params
def require_space_after_declSpecs(self) -> bool:
return self.const or self.volatile or self.restrict or \
len(self.attrs) > 0 or \
self.next.require_space_after_declSpecs()
def _stringify(self, transform: StringifyTransform) -> str:
res = ['*']
for a in self.attrs:
res.append(transform(a))
if len(self.attrs) > 0 and (self.restrict or self.volatile or self.const):
res.append(' ')
if self.restrict:
res.append('restrict')
if self.volatile:
if self.restrict:
res.append(' ')
res.append('volatile')
if self.const:
if self.restrict or self.volatile:
res.append(' ')
res.append('const')
if self.const or self.volatile or self.restrict or len(self.attrs) > 0:
if self.next.require_space_after_declSpecs():
res.append(' ')
res.append(transform(self.next))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
signode += nodes.Text("*")
for a in self.attrs:
a.describe_signature(signode)
if len(self.attrs) > 0 and (self.restrict or self.volatile or self.const):
signode += nodes.Text(' ')
def _add_anno(signode: TextElement, text: str) -> None:
signode += addnodes.desc_annotation(text, text)
if self.restrict:
_add_anno(signode, 'restrict')
if self.volatile:
if self.restrict:
signode += nodes.Text(' ')
_add_anno(signode, 'volatile')
if self.const:
if self.restrict or self.volatile:
signode += nodes.Text(' ')
_add_anno(signode, 'const')
if self.const or self.volatile or self.restrict or len(self.attrs) > 0:
if self.next.require_space_after_declSpecs():
signode += nodes.Text(' ')
self.next.describe_signature(signode, mode, env, symbol)
class ASTDeclaratorParen(ASTDeclarator):
def __init__(self, inner: ASTDeclarator, next: ASTDeclarator) -> None:
assert inner
assert next
self.inner = inner
self.next = next
# TODO: we assume the name and params are in inner
@property
def name(self) -> ASTNestedName:
return self.inner.name
@property
def function_params(self) -> List[ASTFunctionParameter]:
return self.inner.function_params
def require_space_after_declSpecs(self) -> bool:
return True
def _stringify(self, transform: StringifyTransform) -> str:
res = ['(']
res.append(transform(self.inner))
res.append(')')
res.append(transform(self.next))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
signode += nodes.Text('(')
self.inner.describe_signature(signode, mode, env, symbol)
signode += nodes.Text(')')
self.next.describe_signature(signode, "noneIsName", env, symbol)
# Initializer
################################################################################
class ASTParenExprList(ASTBase):
def __init__(self, exprs: List[ASTExpression]) -> None:
self.exprs = exprs
def _stringify(self, transform: StringifyTransform) -> str:
exprs = [transform(e) for e in self.exprs]
return '(%s)' % ', '.join(exprs)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
signode.append(nodes.Text('('))
first = True
for e in self.exprs:
if not first:
signode.append(nodes.Text(', '))
else:
first = False
e.describe_signature(signode, mode, env, symbol)
signode.append(nodes.Text(')'))
class ASTBracedInitList(ASTBase):
def __init__(self, exprs: List[ASTExpression], trailingComma: bool) -> None:
self.exprs = exprs
self.trailingComma = trailingComma
def _stringify(self, transform: StringifyTransform) -> str:
exprs = [transform(e) for e in self.exprs]
trailingComma = ',' if self.trailingComma else ''
return '{%s%s}' % (', '.join(exprs), trailingComma)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
signode.append(nodes.Text('{'))
first = True
for e in self.exprs:
if not first:
signode.append(nodes.Text(', '))
else:
first = False
e.describe_signature(signode, mode, env, symbol)
if self.trailingComma:
signode.append(nodes.Text(','))
signode.append(nodes.Text('}'))
class ASTInitializer(ASTBase):
def __init__(self, value: Union[ASTBracedInitList, ASTExpression],
hasAssign: bool = True) -> None:
self.value = value
self.hasAssign = hasAssign
def _stringify(self, transform: StringifyTransform) -> str:
val = transform(self.value)
if self.hasAssign:
return ' = ' + val
else:
return val
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
if self.hasAssign:
signode.append(nodes.Text(' = '))
self.value.describe_signature(signode, 'markType', env, symbol)
class ASTType(ASTBase):
def __init__(self, declSpecs: ASTDeclSpecs, decl: ASTDeclarator) -> None:
assert declSpecs
assert decl
self.declSpecs = declSpecs
self.decl = decl
@property
def name(self) -> ASTNestedName:
return self.decl.name
@property
def function_params(self) -> List[ASTFunctionParameter]:
return self.decl.function_params
def _stringify(self, transform: StringifyTransform) -> str:
res = []
declSpecs = transform(self.declSpecs)
res.append(declSpecs)
if self.decl.require_space_after_declSpecs() and len(declSpecs) > 0:
res.append(' ')
res.append(transform(self.decl))
return ''.join(res)
def get_type_declaration_prefix(self) -> str:
if self.declSpecs.trailingTypeSpec:
return 'typedef'
else:
return 'type'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.declSpecs.describe_signature(signode, 'markType', env, symbol)
if (self.decl.require_space_after_declSpecs() and
len(str(self.declSpecs)) > 0):
signode += nodes.Text(' ')
# for parameters that don't really declare new names we get 'markType',
# this should not be propagated, but be 'noneIsName'.
if mode == 'markType':
mode = 'noneIsName'
self.decl.describe_signature(signode, mode, env, symbol)
class ASTTypeWithInit(ASTBase):
def __init__(self, type: ASTType, init: ASTInitializer) -> None:
self.type = type
self.init = init
@property
def name(self) -> ASTNestedName:
return self.type.name
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append(transform(self.type))
if self.init:
res.append(transform(self.init))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.type.describe_signature(signode, mode, env, symbol)
if self.init:
self.init.describe_signature(signode, mode, env, symbol)
class ASTMacroParameter(ASTBase):
def __init__(self, arg: ASTNestedName, ellipsis: bool = False) -> None:
self.arg = arg
self.ellipsis = ellipsis
def _stringify(self, transform: StringifyTransform) -> str:
if self.ellipsis:
return '...'
else:
return transform(self.arg)
def describe_signature(self, signode: Any, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
if self.ellipsis:
signode += nodes.Text('...')
else:
self.arg.describe_signature(signode, mode, env, symbol=symbol)
class ASTMacro(ASTBase):
def __init__(self, ident: ASTNestedName, args: List[ASTMacroParameter]) -> None:
self.ident = ident
self.args = args
@property
def name(self) -> ASTNestedName:
return self.ident
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append(transform(self.ident))
if self.args is not None:
res.append('(')
first = True
for arg in self.args:
if not first:
res.append(', ')
first = False
res.append(transform(arg))
res.append(')')
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.ident.describe_signature(signode, mode, env, symbol)
if self.args is None:
return
paramlist = addnodes.desc_parameterlist()
for arg in self.args:
param = addnodes.desc_parameter('', '', noemph=True)
arg.describe_signature(param, 'param', env, symbol=symbol)
paramlist += param
signode += paramlist
class ASTStruct(ASTBase):
def __init__(self, name: ASTNestedName) -> None:
self.name = name
def get_id(self, version: int, objectType: str, symbol: "Symbol") -> str:
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.name)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol=symbol)
class ASTUnion(ASTBase):
def __init__(self, name: ASTNestedName) -> None:
self.name = name
def get_id(self, version: int, objectType: str, symbol: "Symbol") -> str:
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.name)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol=symbol)
class ASTEnum(ASTBase):
def __init__(self, name: ASTNestedName) -> None:
self.name = name
def get_id(self, version: int, objectType: str, symbol: "Symbol") -> str:
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.name)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol=symbol)
class ASTEnumerator(ASTBase):
def __init__(self, name: ASTNestedName, init: ASTInitializer) -> None:
self.name = name
self.init = init
def get_id(self, version: int, objectType: str, symbol: "Symbol") -> str:
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append(transform(self.name))
if self.init:
res.append(transform(self.init))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol)
if self.init:
self.init.describe_signature(signode, 'markType', env, symbol)
class ASTDeclaration(ASTBaseBase):
def __init__(self, objectType: str, directiveType: str, declaration: Any,
semicolon: bool = False) -> None:
self.objectType = objectType
self.directiveType = directiveType
self.declaration = declaration
self.semicolon = semicolon
self.symbol = None # type: Symbol
# set by CObject._add_enumerator_to_parent
self.enumeratorScopedSymbol = None # type: Symbol
@property
def name(self) -> ASTNestedName:
return self.declaration.name
@property
def function_params(self) -> List[ASTFunctionParameter]:
if self.objectType != 'function':
return None
return self.declaration.function_params
def get_id(self, version: int, prefixed: bool = True) -> str:
if self.objectType == 'enumerator' and self.enumeratorScopedSymbol:
return self.enumeratorScopedSymbol.declaration.get_id(version, prefixed)
id_ = self.symbol.get_full_nested_name().get_id(version)
if prefixed:
return _id_prefix[version] + id_
else:
return id_
def get_newest_id(self) -> str:
return self.get_id(_max_id, True)
def _stringify(self, transform: StringifyTransform) -> str:
res = transform(self.declaration)
if self.semicolon:
res += ';'
return res
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", options: Dict) -> None:
verify_description_mode(mode)
assert self.symbol
# The caller of the domain added a desc_signature node.
# Always enable multiline:
signode['is_multiline'] = True
# Put each line in a desc_signature_line node.
mainDeclNode = addnodes.desc_signature_line()
mainDeclNode.sphinx_line_type = 'declarator'
mainDeclNode['add_permalink'] = not self.symbol.isRedeclaration
signode += mainDeclNode
if self.objectType == 'member':
pass
elif self.objectType == 'function':
pass
elif self.objectType == 'macro':
pass
elif self.objectType == 'struct':
mainDeclNode += addnodes.desc_annotation('struct ', 'struct ')
elif self.objectType == 'union':
mainDeclNode += addnodes.desc_annotation('union ', 'union ')
elif self.objectType == 'enum':
mainDeclNode += addnodes.desc_annotation('enum ', 'enum ')
elif self.objectType == 'enumerator':
mainDeclNode += addnodes.desc_annotation('enumerator ', 'enumerator ')
elif self.objectType == 'type':
prefix = self.declaration.get_type_declaration_prefix()
prefix += ' '
mainDeclNode += addnodes.desc_annotation(prefix, prefix)
else:
assert False
self.declaration.describe_signature(mainDeclNode, mode, env, self.symbol)
if self.semicolon:
mainDeclNode += nodes.Text(';')
class SymbolLookupResult:
def __init__(self, symbols: Iterator["Symbol"], parentSymbol: "Symbol",
ident: ASTIdentifier) -> None:
self.symbols = symbols
self.parentSymbol = parentSymbol
self.ident = ident
class LookupKey:
def __init__(self, data: List[Tuple[ASTIdentifier, str]]) -> None:
self.data = data
def __str__(self) -> str:
return '[{}]'.format(', '.join("({}, {})".format(
ident, id_) for ident, id_ in self.data))
class Symbol:
debug_indent = 0
debug_indent_string = " "
debug_lookup = False
debug_show_tree = False
@staticmethod
def debug_print(*args: Any) -> None:
print(Symbol.debug_indent_string * Symbol.debug_indent, end="")
print(*args)
def _assert_invariants(self) -> None:
if not self.parent:
# parent == None means global scope, so declaration means a parent
assert not self.declaration
assert not self.docname
else:
if self.declaration:
assert self.docname
def __setattr__(self, key: str, value: Any) -> None:
if key == "children":
assert False
else:
return super().__setattr__(key, value)
def __init__(self, parent: "Symbol", ident: ASTIdentifier,
declaration: ASTDeclaration, docname: str) -> None:
self.parent = parent
# declarations in a single directive are linked together
self.siblingAbove = None # type: Symbol
self.siblingBelow = None # type: Symbol
self.ident = ident
self.declaration = declaration
self.docname = docname
self.isRedeclaration = False
self._assert_invariants()
# Remember to modify Symbol.remove if modifications to the parent change.
self._children = [] # type: List[Symbol]
self._anonChildren = [] # type: List[Symbol]
# note: _children includes _anonChildren
if self.parent:
self.parent._children.append(self)
if self.declaration:
self.declaration.symbol = self
# Do symbol addition after self._children has been initialised.
self._add_function_params()
def _fill_empty(self, declaration: ASTDeclaration, docname: str) -> None:
self._assert_invariants()
assert not self.declaration
assert not self.docname
assert declaration
assert docname
self.declaration = declaration
self.declaration.symbol = self
self.docname = docname
self._assert_invariants()
# and symbol addition should be done as well
self._add_function_params()
def _add_function_params(self) -> None:
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("_add_function_params:")
# Note: we may be called from _fill_empty, so the symbols we want
# to add may actually already be present (as empty symbols).
# add symbols for function parameters, if any
if self.declaration is not None and self.declaration.function_params is not None:
for p in self.declaration.function_params:
if p.arg is None:
continue
nn = p.arg.name
if nn is None:
continue
# (comparing to the template params: we have checked that we are a declaration)
decl = ASTDeclaration('functionParam', None, p)
assert not nn.rooted
assert len(nn.names) == 1
self._add_symbols(nn, decl, self.docname)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
def remove(self) -> None:
if self.parent is None:
return
assert self in self.parent._children
self.parent._children.remove(self)
self.parent = None
def clear_doc(self, docname: str) -> None:
newChildren = [] # type: List[Symbol]
for sChild in self._children:
sChild.clear_doc(docname)
if sChild.declaration and sChild.docname == docname:
sChild.declaration = None
sChild.docname = None
if sChild.siblingAbove is not None:
sChild.siblingAbove.siblingBelow = sChild.siblingBelow
if sChild.siblingBelow is not None:
sChild.siblingBelow.siblingAbove = sChild.siblingAbove
sChild.siblingAbove = None
sChild.siblingBelow = None
newChildren.append(sChild)
self._children = newChildren
def get_all_symbols(self) -> Iterator["Symbol"]:
yield self
for sChild in self._children:
for s in sChild.get_all_symbols():
yield s
@property
def children_recurse_anon(self) -> Iterator["Symbol"]:
for c in self._children:
yield c
if not c.ident.is_anon():
continue
yield from c.children_recurse_anon
def get_lookup_key(self) -> "LookupKey":
# The pickle files for the environment and for each document are distinct.
# The environment has all the symbols, but the documents has xrefs that
# must know their scope. A lookup key is essentially a specification of
# how to find a specific symbol.
symbols = []
s = self
while s.parent:
symbols.append(s)
s = s.parent
symbols.reverse()
key = []
for s in symbols:
if s.declaration is not None:
# TODO: do we need the ID?
key.append((s.ident, s.declaration.get_newest_id()))
else:
key.append((s.ident, None))
return LookupKey(key)
def get_full_nested_name(self) -> ASTNestedName:
symbols = []
s = self
while s.parent:
symbols.append(s)
s = s.parent
symbols.reverse()
names = []
for s in symbols:
names.append(s.ident)
return ASTNestedName(names, rooted=False)
def _find_first_named_symbol(self, ident: ASTIdentifier,
matchSelf: bool, recurseInAnon: bool) -> "Symbol":
# TODO: further simplification from C++ to C
if Symbol.debug_lookup:
Symbol.debug_print("_find_first_named_symbol ->")
res = self._find_named_symbols(ident, matchSelf, recurseInAnon,
searchInSiblings=False)
try:
return next(res)
except StopIteration:
return None
def _find_named_symbols(self, ident: ASTIdentifier,
matchSelf: bool, recurseInAnon: bool,
searchInSiblings: bool) -> Iterator["Symbol"]:
# TODO: further simplification from C++ to C
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("_find_named_symbols:")
Symbol.debug_indent += 1
Symbol.debug_print("self:")
print(self.to_string(Symbol.debug_indent + 1), end="")
Symbol.debug_print("ident: ", ident)
Symbol.debug_print("matchSelf: ", matchSelf)
Symbol.debug_print("recurseInAnon: ", recurseInAnon)
Symbol.debug_print("searchInSiblings: ", searchInSiblings)
def candidates() -> Generator["Symbol", None, None]:
s = self
if Symbol.debug_lookup:
Symbol.debug_print("searching in self:")
print(s.to_string(Symbol.debug_indent + 1), end="")
while True:
if matchSelf:
yield s
if recurseInAnon:
yield from s.children_recurse_anon
else:
yield from s._children
if s.siblingAbove is None:
break
s = s.siblingAbove
if Symbol.debug_lookup:
Symbol.debug_print("searching in sibling:")
print(s.to_string(Symbol.debug_indent + 1), end="")
for s in candidates():
if Symbol.debug_lookup:
Symbol.debug_print("candidate:")
print(s.to_string(Symbol.debug_indent + 1), end="")
if s.ident == ident:
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("matches")
Symbol.debug_indent -= 3
yield s
if Symbol.debug_lookup:
Symbol.debug_indent += 2
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
def _symbol_lookup(self, nestedName: ASTNestedName,
onMissingQualifiedSymbol: Callable[["Symbol", ASTIdentifier], "Symbol"], # NOQA
ancestorLookupType: str, matchSelf: bool,
recurseInAnon: bool, searchInSiblings: bool) -> SymbolLookupResult:
# TODO: further simplification from C++ to C
# ancestorLookupType: if not None, specifies the target type of the lookup
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("_symbol_lookup:")
Symbol.debug_indent += 1
Symbol.debug_print("self:")
print(self.to_string(Symbol.debug_indent + 1), end="")
Symbol.debug_print("nestedName: ", nestedName)
Symbol.debug_print("ancestorLookupType:", ancestorLookupType)
Symbol.debug_print("matchSelf: ", matchSelf)
Symbol.debug_print("recurseInAnon: ", recurseInAnon)
Symbol.debug_print("searchInSiblings: ", searchInSiblings)
names = nestedName.names
# find the right starting point for lookup
parentSymbol = self
if nestedName.rooted:
while parentSymbol.parent:
parentSymbol = parentSymbol.parent
if ancestorLookupType is not None:
# walk up until we find the first identifier
firstName = names[0]
while parentSymbol.parent:
if parentSymbol.find_identifier(firstName,
matchSelf=matchSelf,
recurseInAnon=recurseInAnon,
searchInSiblings=searchInSiblings):
break
parentSymbol = parentSymbol.parent
if Symbol.debug_lookup:
Symbol.debug_print("starting point:")
print(parentSymbol.to_string(Symbol.debug_indent + 1), end="")
# and now the actual lookup
for ident in names[:-1]:
symbol = parentSymbol._find_first_named_symbol(
ident, matchSelf=matchSelf, recurseInAnon=recurseInAnon)
if symbol is None:
symbol = onMissingQualifiedSymbol(parentSymbol, ident)
if symbol is None:
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
return None
# We have now matched part of a nested name, and need to match more
# so even if we should matchSelf before, we definitely shouldn't
# even more. (see also issue #2666)
matchSelf = False
parentSymbol = symbol
if Symbol.debug_lookup:
Symbol.debug_print("handle last name from:")
print(parentSymbol.to_string(Symbol.debug_indent + 1), end="")
# handle the last name
ident = names[-1]
symbols = parentSymbol._find_named_symbols(
ident, matchSelf=matchSelf,
recurseInAnon=recurseInAnon,
searchInSiblings=searchInSiblings)
if Symbol.debug_lookup:
symbols = list(symbols) # type: ignore
Symbol.debug_indent -= 2
return SymbolLookupResult(symbols, parentSymbol, ident)
def _add_symbols(self, nestedName: ASTNestedName,
declaration: ASTDeclaration, docname: str) -> "Symbol":
# TODO: further simplification from C++ to C
# Used for adding a whole path of symbols, where the last may or may not
# be an actual declaration.
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("_add_symbols:")
Symbol.debug_indent += 1
Symbol.debug_print("nn: ", nestedName)
Symbol.debug_print("decl: ", declaration)
Symbol.debug_print("doc: ", docname)
def onMissingQualifiedSymbol(parentSymbol: "Symbol", ident: ASTIdentifier) -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("_add_symbols, onMissingQualifiedSymbol:")
Symbol.debug_indent += 1
Symbol.debug_print("ident: ", ident)
Symbol.debug_indent -= 2
return Symbol(parent=parentSymbol, ident=ident,
declaration=None, docname=None)
lookupResult = self._symbol_lookup(nestedName,
onMissingQualifiedSymbol,
ancestorLookupType=None,
matchSelf=False,
recurseInAnon=False,
searchInSiblings=False)
assert lookupResult is not None # we create symbols all the way, so that can't happen
symbols = list(lookupResult.symbols)
if len(symbols) == 0:
if Symbol.debug_lookup:
Symbol.debug_print("_add_symbols, result, no symbol:")
Symbol.debug_indent += 1
Symbol.debug_print("ident: ", lookupResult.ident)
Symbol.debug_print("declaration: ", declaration)
Symbol.debug_print("docname: ", docname)
Symbol.debug_indent -= 1
symbol = Symbol(parent=lookupResult.parentSymbol,
ident=lookupResult.ident,
declaration=declaration,
docname=docname)
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
return symbol
if Symbol.debug_lookup:
Symbol.debug_print("_add_symbols, result, symbols:")
Symbol.debug_indent += 1
Symbol.debug_print("number symbols:", len(symbols))
Symbol.debug_indent -= 1
if not declaration:
if Symbol.debug_lookup:
Symbol.debug_print("no delcaration")
Symbol.debug_indent -= 2
# good, just a scope creation
# TODO: what if we have more than one symbol?
return symbols[0]
noDecl = []
withDecl = []
dupDecl = []
for s in symbols:
if s.declaration is None:
noDecl.append(s)
elif s.isRedeclaration:
dupDecl.append(s)
else:
withDecl.append(s)
if Symbol.debug_lookup:
Symbol.debug_print("#noDecl: ", len(noDecl))
Symbol.debug_print("#withDecl:", len(withDecl))
Symbol.debug_print("#dupDecl: ", len(dupDecl))
# With partial builds we may start with a large symbol tree stripped of declarations.
# Essentially any combination of noDecl, withDecl, and dupDecls seems possible.
# TODO: make partial builds fully work. What should happen when the primary symbol gets
# deleted, and other duplicates exist? The full document should probably be rebuild.
# First check if one of those with a declaration matches.
# If it's a function, we need to compare IDs,
# otherwise there should be only one symbol with a declaration.
def makeCandSymbol() -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_print("begin: creating candidate symbol")
symbol = Symbol(parent=lookupResult.parentSymbol,
ident=lookupResult.ident,
declaration=declaration,
docname=docname)
if Symbol.debug_lookup:
Symbol.debug_print("end: creating candidate symbol")
return symbol
if len(withDecl) == 0:
candSymbol = None
else:
candSymbol = makeCandSymbol()
def handleDuplicateDeclaration(symbol: "Symbol", candSymbol: "Symbol") -> None:
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("redeclaration")
Symbol.debug_indent -= 1
Symbol.debug_indent -= 2
# Redeclaration of the same symbol.
# Let the new one be there, but raise an error to the client
# so it can use the real symbol as subscope.
# This will probably result in a duplicate id warning.
candSymbol.isRedeclaration = True
raise _DuplicateSymbolError(symbol, declaration)
if declaration.objectType != "function":
assert len(withDecl) <= 1
handleDuplicateDeclaration(withDecl[0], candSymbol)
# (not reachable)
# a function, so compare IDs
candId = declaration.get_newest_id()
if Symbol.debug_lookup:
Symbol.debug_print("candId:", candId)
for symbol in withDecl:
oldId = symbol.declaration.get_newest_id()
if Symbol.debug_lookup:
Symbol.debug_print("oldId: ", oldId)
if candId == oldId:
handleDuplicateDeclaration(symbol, candSymbol)
# (not reachable)
# no candidate symbol found with matching ID
# if there is an empty symbol, fill that one
if len(noDecl) == 0:
if Symbol.debug_lookup:
Symbol.debug_print("no match, no empty, candSybmol is not None?:", candSymbol is not None) # NOQA
Symbol.debug_indent -= 2
if candSymbol is not None:
return candSymbol
else:
return makeCandSymbol()
else:
if Symbol.debug_lookup:
Symbol.debug_print(
"no match, but fill an empty declaration, candSybmol is not None?:",
candSymbol is not None) # NOQA
Symbol.debug_indent -= 2
if candSymbol is not None:
candSymbol.remove()
# assert len(noDecl) == 1
# TODO: enable assertion when we at some point find out how to do cleanup
# for now, just take the first one, it should work fine ... right?
symbol = noDecl[0]
# If someone first opened the scope, and then later
# declares it, e.g,
# .. namespace:: Test
# .. namespace:: nullptr
# .. class:: Test
symbol._fill_empty(declaration, docname)
return symbol
def merge_with(self, other: "Symbol", docnames: List[str],
env: "BuildEnvironment") -> None:
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("merge_with:")
assert other is not None
for otherChild in other._children:
ourChild = self._find_first_named_symbol(
ident=otherChild.ident, matchSelf=False,
recurseInAnon=False)
if ourChild is None:
# TODO: hmm, should we prune by docnames?
self._children.append(otherChild)
otherChild.parent = self
otherChild._assert_invariants()
continue
if otherChild.declaration and otherChild.docname in docnames:
if not ourChild.declaration:
ourChild._fill_empty(otherChild.declaration, otherChild.docname)
elif ourChild.docname != otherChild.docname:
name = str(ourChild.declaration)
msg = __("Duplicate declaration, also defined in '%s'.\n"
"Declaration is '%s'.")
msg = msg % (ourChild.docname, name)
logger.warning(msg, location=otherChild.docname)
else:
# Both have declarations, and in the same docname.
# This can apparently happen, it should be safe to
# just ignore it, right?
pass
ourChild.merge_with(otherChild, docnames, env)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
def add_name(self, nestedName: ASTNestedName) -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("add_name:")
res = self._add_symbols(nestedName, declaration=None, docname=None)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
return res
def add_declaration(self, declaration: ASTDeclaration, docname: str) -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("add_declaration:")
assert declaration
assert docname
nestedName = declaration.name
res = self._add_symbols(nestedName, declaration, docname)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
return res
def find_identifier(self, ident: ASTIdentifier,
matchSelf: bool, recurseInAnon: bool, searchInSiblings: bool
) -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("find_identifier:")
Symbol.debug_indent += 1
Symbol.debug_print("ident: ", ident)
Symbol.debug_print("matchSelf: ", matchSelf)
Symbol.debug_print("recurseInAnon: ", recurseInAnon)
Symbol.debug_print("searchInSiblings:", searchInSiblings)
print(self.to_string(Symbol.debug_indent + 1), end="")
Symbol.debug_indent -= 2
current = self
while current is not None:
if Symbol.debug_lookup:
Symbol.debug_indent += 2
Symbol.debug_print("trying:")
print(current.to_string(Symbol.debug_indent + 1), end="")
Symbol.debug_indent -= 2
if matchSelf and current.ident == ident:
return current
children = current.children_recurse_anon if recurseInAnon else current._children
for s in children:
if s.ident == ident:
return s
if not searchInSiblings:
break
current = current.siblingAbove
return None
def direct_lookup(self, key: "LookupKey") -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("direct_lookup:")
Symbol.debug_indent += 1
s = self
for name, id_ in key.data:
res = None
for cand in s._children:
if cand.ident == name:
res = cand
break
s = res
if Symbol.debug_lookup:
Symbol.debug_print("name: ", name)
Symbol.debug_print("id: ", id_)
if s is not None:
print(s.to_string(Symbol.debug_indent + 1), end="")
else:
Symbol.debug_print("not found")
if s is None:
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
return None
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
return s
def find_declaration(self, nestedName: ASTNestedName, typ: str,
matchSelf: bool, recurseInAnon: bool) -> "Symbol":
# templateShorthand: missing template parameter lists for templates is ok
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("find_declaration:")
def onMissingQualifiedSymbol(parentSymbol: "Symbol",
ident: ASTIdentifier) -> "Symbol":
return None
lookupResult = self._symbol_lookup(nestedName,
onMissingQualifiedSymbol,
ancestorLookupType=typ,
matchSelf=matchSelf,
recurseInAnon=recurseInAnon,
searchInSiblings=False)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
if lookupResult is None:
return None
symbols = list(lookupResult.symbols)
if len(symbols) == 0:
return None
return symbols[0]
def to_string(self, indent: int) -> str:
res = [Symbol.debug_indent_string * indent]
if not self.parent:
res.append('::')
else:
if self.ident:
res.append(str(self.ident))
else:
res.append(str(self.declaration))
if self.declaration:
res.append(": ")
if self.isRedeclaration:
res.append('!!duplicate!! ')
res.append(str(self.declaration))
if self.docname:
res.append('\t(')
res.append(self.docname)
res.append(')')
res.append('\n')
return ''.join(res)
def dump(self, indent: int) -> str:
res = [self.to_string(indent)]
for c in self._children:
res.append(c.dump(indent + 1))
return ''.join(res)
class DefinitionParser(BaseParser):
# those without signedness and size modifiers
# see https://en.cppreference.com/w/cpp/language/types
_simple_fundamental_types = (
'void', '_Bool', 'bool', 'char', 'int', 'float', 'double',
'__int64',
)
_prefix_keys = ('struct', 'enum', 'union')
@property
def language(self) -> str:
return 'C'
@property
def id_attributes(self):
return self.config.c_id_attributes
@property
def paren_attributes(self):
return self.config.c_paren_attributes
def _parse_string(self) -> str:
if self.current_char != '"':
return None
startPos = self.pos
self.pos += 1
escape = False
while True:
if self.eof:
self.fail("Unexpected end during inside string.")
elif self.current_char == '"' and not escape:
self.pos += 1
break
elif self.current_char == '\\':
escape = True
else:
escape = False
self.pos += 1
return self.definition[startPos:self.pos]
def _parse_literal(self) -> ASTLiteral:
# -> integer-literal
# | character-literal
# | floating-literal
# | string-literal
# | boolean-literal -> "false" | "true"
self.skip_ws()
if self.skip_word('true'):
return ASTBooleanLiteral(True)
if self.skip_word('false'):
return ASTBooleanLiteral(False)
pos = self.pos
if self.match(float_literal_re):
self.match(float_literal_suffix_re)
return ASTNumberLiteral(self.definition[pos:self.pos])
for regex in [binary_literal_re, hex_literal_re,
integer_literal_re, octal_literal_re]:
if self.match(regex):
self.match(integers_literal_suffix_re)
return ASTNumberLiteral(self.definition[pos:self.pos])
string = self._parse_string()
if string is not None:
return ASTStringLiteral(string)
# character-literal
if self.match(char_literal_re):
prefix = self.last_match.group(1) # may be None when no prefix
data = self.last_match.group(2)
try:
return ASTCharLiteral(prefix, data)
except UnicodeDecodeError as e:
self.fail("Can not handle character literal. Internal error was: %s" % e)
except UnsupportedMultiCharacterCharLiteral:
self.fail("Can not handle character literal"
" resulting in multiple decoded characters.")
return None
def _parse_paren_expression(self) -> ASTExpression:
# "(" expression ")"
if self.current_char != '(':
return None
self.pos += 1
res = self._parse_expression()
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expected ')' in end of parenthesized expression.")
return ASTParenExpr(res)
def _parse_primary_expression(self) -> ASTExpression:
# literal
# "(" expression ")"
# id-expression -> we parse this with _parse_nested_name
self.skip_ws()
res = self._parse_literal() # type: ASTExpression
if res is not None:
return res
res = self._parse_paren_expression()
if res is not None:
return res
nn = self._parse_nested_name()
if nn is not None:
return ASTIdExpression(nn)
return None
def _parse_initializer_list(self, name: str, open: str, close: str
) -> Tuple[List[ASTExpression], bool]:
# Parse open and close with the actual initializer-list inbetween
# -> initializer-clause '...'[opt]
# | initializer-list ',' initializer-clause '...'[opt]
# TODO: designators
self.skip_ws()
if not self.skip_string_and_ws(open):
return None, None
if self.skip_string(close):
return [], False
exprs = []
trailingComma = False
while True:
self.skip_ws()
expr = self._parse_expression()
self.skip_ws()
exprs.append(expr)
self.skip_ws()
if self.skip_string(close):
break
if not self.skip_string_and_ws(','):
self.fail("Error in %s, expected ',' or '%s'." % (name, close))
if self.current_char == close and close == '}':
self.pos += 1
trailingComma = True
break
return exprs, trailingComma
def _parse_paren_expression_list(self) -> ASTParenExprList:
# -> '(' expression-list ')'
# though, we relax it to also allow empty parens
# as it's needed in some cases
#
# expression-list
# -> initializer-list
exprs, trailingComma = self._parse_initializer_list("parenthesized expression-list",
'(', ')')
if exprs is None:
return None
return ASTParenExprList(exprs)
def _parse_braced_init_list(self) -> ASTBracedInitList:
# -> '{' initializer-list ','[opt] '}'
# | '{' '}'
exprs, trailingComma = self._parse_initializer_list("braced-init-list", '{', '}')
if exprs is None:
return None
return ASTBracedInitList(exprs, trailingComma)
def _parse_postfix_expression(self) -> ASTPostfixExpr:
# -> primary
# | postfix "[" expression "]"
# | postfix "[" braced-init-list [opt] "]"
# | postfix "(" expression-list [opt] ")"
# | postfix "." id-expression
# | postfix "->" id-expression
# | postfix "++"
# | postfix "--"
prefix = self._parse_primary_expression()
# and now parse postfixes
postFixes = [] # type: List[ASTPostfixOp]
while True:
self.skip_ws()
if self.skip_string_and_ws('['):
expr = self._parse_expression()
self.skip_ws()
if not self.skip_string(']'):
self.fail("Expected ']' in end of postfix expression.")
postFixes.append(ASTPostfixArray(expr))
continue
if self.skip_string('.'):
if self.skip_string('*'):
# don't steal the dot
self.pos -= 2
elif self.skip_string('..'):
# don't steal the dot
self.pos -= 3
else:
name = self._parse_nested_name()
postFixes.append(ASTPostfixMember(name))
continue
if self.skip_string('->'):
if self.skip_string('*'):
# don't steal the arrow
self.pos -= 3
else:
name = self._parse_nested_name()
postFixes.append(ASTPostfixMemberOfPointer(name))
continue
if self.skip_string('++'):
postFixes.append(ASTPostfixInc())
continue
if self.skip_string('--'):
postFixes.append(ASTPostfixDec())
continue
lst = self._parse_paren_expression_list()
if lst is not None:
postFixes.append(ASTPostfixCallExpr(lst))
continue
break
return ASTPostfixExpr(prefix, postFixes)
def _parse_unary_expression(self) -> ASTExpression:
# -> postfix
# | "++" cast
# | "--" cast
# | unary-operator cast -> (* | & | + | - | ! | ~) cast
# The rest:
# | "sizeof" unary
# | "sizeof" "(" type-id ")"
# | "alignof" "(" type-id ")"
self.skip_ws()
for op in _expression_unary_ops:
# TODO: hmm, should we be able to backtrack here?
if op[0] in 'cn':
res = self.skip_word(op)
else:
res = self.skip_string(op)
if res:
expr = self._parse_cast_expression()
return ASTUnaryOpExpr(op, expr)
if self.skip_word_and_ws('sizeof'):
if self.skip_string_and_ws('('):
typ = self._parse_type(named=False)
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expecting ')' to end 'sizeof'.")
return ASTSizeofType(typ)
expr = self._parse_unary_expression()
return ASTSizeofExpr(expr)
if self.skip_word_and_ws('alignof'):
if not self.skip_string_and_ws('('):
self.fail("Expecting '(' after 'alignof'.")
typ = self._parse_type(named=False)
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expecting ')' to end 'alignof'.")
return ASTAlignofExpr(typ)
return self._parse_postfix_expression()
def _parse_cast_expression(self) -> ASTExpression:
# -> unary | "(" type-id ")" cast
pos = self.pos
self.skip_ws()
if self.skip_string('('):
try:
typ = self._parse_type(False)
if not self.skip_string(')'):
self.fail("Expected ')' in cast expression.")
expr = self._parse_cast_expression()
return ASTCastExpr(typ, expr)
except DefinitionError as exCast:
self.pos = pos
try:
return self._parse_unary_expression()
except DefinitionError as exUnary:
errs = []
errs.append((exCast, "If type cast expression"))
errs.append((exUnary, "If unary expression"))
raise self._make_multi_error(errs, "Error in cast expression.")
else:
return self._parse_unary_expression()
def _parse_logical_or_expression(self) -> ASTExpression:
# logical-or = logical-and ||
# logical-and = inclusive-or &&
# inclusive-or = exclusive-or |
# exclusive-or = and ^
# and = equality &
# equality = relational ==, !=
# relational = shift <, >, <=, >=
# shift = additive <<, >>
# additive = multiplicative +, -
# multiplicative = pm *, /, %
# pm = cast .*, ->*
def _parse_bin_op_expr(self, opId):
if opId + 1 == len(_expression_bin_ops):
def parser() -> ASTExpression:
return self._parse_cast_expression()
else:
def parser() -> ASTExpression:
return _parse_bin_op_expr(self, opId + 1)
exprs = []
ops = []
exprs.append(parser())
while True:
self.skip_ws()
pos = self.pos
oneMore = False
for op in _expression_bin_ops[opId]:
if op[0] in 'abcnox':
if not self.skip_word(op):
continue
else:
if not self.skip_string(op):
continue
if op == '&' and self.current_char == '&':
# don't split the && 'token'
self.pos -= 1
# and btw. && has lower precedence, so we are done
break
try:
expr = parser()
exprs.append(expr)
ops.append(op)
oneMore = True
break
except DefinitionError:
self.pos = pos
if not oneMore:
break
return ASTBinOpExpr(exprs, ops)
return _parse_bin_op_expr(self, 0)
def _parse_conditional_expression_tail(self, orExprHead: Any) -> ASTExpression:
# -> "?" expression ":" assignment-expression
return None
def _parse_assignment_expression(self) -> ASTExpression:
# -> conditional-expression
# | logical-or-expression assignment-operator initializer-clause
# -> conditional-expression ->
# logical-or-expression
# | logical-or-expression "?" expression ":" assignment-expression
# | logical-or-expression assignment-operator initializer-clause
exprs = []
ops = []
orExpr = self._parse_logical_or_expression()
exprs.append(orExpr)
# TODO: handle ternary with _parse_conditional_expression_tail
while True:
oneMore = False
self.skip_ws()
for op in _expression_assignment_ops:
if op[0] in 'abcnox':
if not self.skip_word(op):
continue
else:
if not self.skip_string(op):
continue
expr = self._parse_logical_or_expression()
exprs.append(expr)
ops.append(op)
oneMore = True
if not oneMore:
break
return ASTAssignmentExpr(exprs, ops)
def _parse_constant_expression(self) -> ASTExpression:
# -> conditional-expression
orExpr = self._parse_logical_or_expression()
# TODO: use _parse_conditional_expression_tail
return orExpr
def _parse_expression(self) -> ASTExpression:
# -> assignment-expression
# | expression "," assignment-expresion
# TODO: actually parse the second production
return self._parse_assignment_expression()
def _parse_expression_fallback(
self, end: List[str],
parser: Callable[[], ASTExpression],
allow: bool = True) -> ASTExpression:
# Stupidly "parse" an expression.
# 'end' should be a list of characters which ends the expression.
# first try to use the provided parser
prevPos = self.pos
try:
return parser()
except DefinitionError as e:
# some places (e.g., template parameters) we really don't want to use fallback,
# and for testing we may want to globally disable it
if not allow or not self.allowFallbackExpressionParsing:
raise
self.warn("Parsing of expression failed. Using fallback parser."
" Error was:\n%s" % e)
self.pos = prevPos
# and then the fallback scanning
assert end is not None
self.skip_ws()
startPos = self.pos
if self.match(_string_re):
value = self.matched_text
else:
# TODO: add handling of more bracket-like things, and quote handling
brackets = {'(': ')', '{': '}', '[': ']'}
symbols = [] # type: List[str]
while not self.eof:
if (len(symbols) == 0 and self.current_char in end):
break
if self.current_char in brackets.keys():
symbols.append(brackets[self.current_char])
elif len(symbols) > 0 and self.current_char == symbols[-1]:
symbols.pop()
self.pos += 1
if len(end) > 0 and self.eof:
self.fail("Could not find end of expression starting at %d."
% startPos)
value = self.definition[startPos:self.pos].strip()
return ASTFallbackExpr(value.strip())
def _parse_nested_name(self) -> ASTNestedName:
names = [] # type: List[Any]
self.skip_ws()
rooted = False
if self.skip_string('.'):
rooted = True
while 1:
self.skip_ws()
if not self.match(identifier_re):
self.fail("Expected identifier in nested name.")
identifier = self.matched_text
# make sure there isn't a keyword
if identifier in _keywords:
self.fail("Expected identifier in nested name, "
"got keyword: %s" % identifier)
ident = ASTIdentifier(identifier)
names.append(ident)
self.skip_ws()
if not self.skip_string('.'):
break
return ASTNestedName(names, rooted)
def _parse_trailing_type_spec(self) -> ASTTrailingTypeSpec:
# fundamental types
self.skip_ws()
for t in self._simple_fundamental_types:
if self.skip_word(t):
return ASTTrailingTypeSpecFundamental(t)
# TODO: this could/should be more strict
elements = []
if self.skip_word_and_ws('signed'):
elements.append('signed')
elif self.skip_word_and_ws('unsigned'):
elements.append('unsigned')
while 1:
if self.skip_word_and_ws('short'):
elements.append('short')
elif self.skip_word_and_ws('long'):
elements.append('long')
else:
break
if self.skip_word_and_ws('char'):
elements.append('char')
elif self.skip_word_and_ws('int'):
elements.append('int')
elif self.skip_word_and_ws('double'):
elements.append('double')
elif self.skip_word_and_ws('__int64'):
elements.append('__int64')
if len(elements) > 0:
return ASTTrailingTypeSpecFundamental(' '.join(elements))
# prefixed
prefix = None
self.skip_ws()
for k in self._prefix_keys:
if self.skip_word_and_ws(k):
prefix = k
break
nestedName = self._parse_nested_name()
return ASTTrailingTypeSpecName(prefix, nestedName)
def _parse_parameters(self, paramMode: str) -> ASTParameters:
self.skip_ws()
if not self.skip_string('('):
if paramMode == 'function':
self.fail('Expecting "(" in parameters.')
else:
return None
args = []
self.skip_ws()
if not self.skip_string(')'):
while 1:
self.skip_ws()
if self.skip_string('...'):
args.append(ASTFunctionParameter(None, True))
self.skip_ws()
if not self.skip_string(')'):
self.fail('Expected ")" after "..." in parameters.')
break
# note: it seems that function arguments can always be named,
# even in function pointers and similar.
arg = self._parse_type_with_init(outer=None, named='single')
# TODO: parse default parameters # TODO: didn't we just do that?
args.append(ASTFunctionParameter(arg))
self.skip_ws()
if self.skip_string(','):
continue
elif self.skip_string(')'):
break
else:
self.fail(
'Expecting "," or ")" in parameters, '
'got "%s".' % self.current_char)
return ASTParameters(args)
def _parse_decl_specs_simple(self, outer: str, typed: bool) -> ASTDeclSpecsSimple:
"""Just parse the simple ones."""
storage = None
threadLocal = None
inline = None
restrict = None
volatile = None
const = None
attrs = []
while 1: # accept any permutation of a subset of some decl-specs
self.skip_ws()
if not storage:
if outer == 'member':
if self.skip_word('auto'):
storage = 'auto'
continue
if self.skip_word('register'):
storage = 'register'
continue
if outer in ('member', 'function'):
if self.skip_word('static'):
storage = 'static'
continue
if self.skip_word('extern'):
storage = 'extern'
continue
if outer == 'member' and not threadLocal:
if self.skip_word('thread_local'):
threadLocal = 'thread_local'
continue
if self.skip_word('_Thread_local'):
threadLocal = '_Thread_local'
continue
if outer == 'function' and not inline:
inline = self.skip_word('inline')
if inline:
continue
if not restrict and typed:
restrict = self.skip_word('restrict')
if restrict:
continue
if not volatile and typed:
volatile = self.skip_word('volatile')
if volatile:
continue
if not const and typed:
const = self.skip_word('const')
if const:
continue
attr = self._parse_attribute()
if attr:
attrs.append(attr)
continue
break
return ASTDeclSpecsSimple(storage, threadLocal, inline,
restrict, volatile, const, attrs)
def _parse_decl_specs(self, outer: str, typed: bool = True) -> ASTDeclSpecs:
if outer:
if outer not in ('type', 'member', 'function'):
raise Exception('Internal error, unknown outer "%s".' % outer)
leftSpecs = self._parse_decl_specs_simple(outer, typed)
rightSpecs = None
if typed:
trailing = self._parse_trailing_type_spec()
rightSpecs = self._parse_decl_specs_simple(outer, typed)
else:
trailing = None
return ASTDeclSpecs(outer, leftSpecs, rightSpecs, trailing)
def _parse_declarator_name_suffix(
self, named: Union[bool, str], paramMode: str, typed: bool
) -> ASTDeclarator:
# now we should parse the name, and then suffixes
if named == 'maybe':
pos = self.pos
try:
declId = self._parse_nested_name()
except DefinitionError:
self.pos = pos
declId = None
elif named == 'single':
if self.match(identifier_re):
identifier = ASTIdentifier(self.matched_text)
declId = ASTNestedName([identifier], rooted=False)
else:
declId = None
elif named:
declId = self._parse_nested_name()
else:
declId = None
arrayOps = []
while 1:
self.skip_ws()
if typed and self.skip_string('['):
self.skip_ws()
static = False
const = False
volatile = False
restrict = False
while True:
if not static:
if self.skip_word_and_ws('static'):
static = True
continue
if not const:
if self.skip_word_and_ws('const'):
const = True
continue
if not volatile:
if self.skip_word_and_ws('volatile'):
volatile = True
continue
if not restrict:
if self.skip_word_and_ws('restrict'):
restrict = True
continue
break
vla = False if static else self.skip_string_and_ws('*')
if vla:
if not self.skip_string(']'):
self.fail("Expected ']' in end of array operator.")
size = None
else:
if self.skip_string(']'):
size = None
else:
def parser():
return self._parse_expression()
size = self._parse_expression_fallback([']'], parser)
self.skip_ws()
if not self.skip_string(']'):
self.fail("Expected ']' in end of array operator.")
arrayOps.append(ASTArray(static, const, volatile, restrict, vla, size))
else:
break
param = self._parse_parameters(paramMode)
if param is None and len(arrayOps) == 0:
# perhaps a bit-field
if named and paramMode == 'type' and typed:
self.skip_ws()
if self.skip_string(':'):
size = self._parse_constant_expression()
return ASTDeclaratorNameBitField(declId=declId, size=size)
return ASTDeclaratorNameParam(declId=declId, arrayOps=arrayOps,
param=param)
def _parse_declarator(self, named: Union[bool, str], paramMode: str,
typed: bool = True) -> ASTDeclarator:
# 'typed' here means 'parse return type stuff'
if paramMode not in ('type', 'function'):
raise Exception(
"Internal error, unknown paramMode '%s'." % paramMode)
prevErrors = []
self.skip_ws()
if typed and self.skip_string('*'):
self.skip_ws()
restrict = False
volatile = False
const = False
attrs = []
while 1:
if not restrict:
restrict = self.skip_word_and_ws('restrict')
if restrict:
continue
if not volatile:
volatile = self.skip_word_and_ws('volatile')
if volatile:
continue
if not const:
const = self.skip_word_and_ws('const')
if const:
continue
attr = self._parse_attribute()
if attr is not None:
attrs.append(attr)
continue
break
next = self._parse_declarator(named, paramMode, typed)
return ASTDeclaratorPtr(next=next,
restrict=restrict, volatile=volatile, const=const,
attrs=attrs)
if typed and self.current_char == '(': # note: peeking, not skipping
# maybe this is the beginning of params, try that first,
# otherwise assume it's noptr->declarator > ( ptr-declarator )
pos = self.pos
try:
# assume this is params
res = self._parse_declarator_name_suffix(named, paramMode,
typed)
return res
except DefinitionError as exParamQual:
msg = "If declarator-id with parameters"
if paramMode == 'function':
msg += " (e.g., 'void f(int arg)')"
prevErrors.append((exParamQual, msg))
self.pos = pos
try:
assert self.current_char == '('
self.skip_string('(')
# TODO: hmm, if there is a name, it must be in inner, right?
# TODO: hmm, if there must be parameters, they must b
# inside, right?
inner = self._parse_declarator(named, paramMode, typed)
if not self.skip_string(')'):
self.fail("Expected ')' in \"( ptr-declarator )\"")
next = self._parse_declarator(named=False,
paramMode="type",
typed=typed)
return ASTDeclaratorParen(inner=inner, next=next)
except DefinitionError as exNoPtrParen:
self.pos = pos
msg = "If parenthesis in noptr-declarator"
if paramMode == 'function':
msg += " (e.g., 'void (*f(int arg))(double)')"
prevErrors.append((exNoPtrParen, msg))
header = "Error in declarator"
raise self._make_multi_error(prevErrors, header)
pos = self.pos
try:
return self._parse_declarator_name_suffix(named, paramMode, typed)
except DefinitionError as e:
self.pos = pos
prevErrors.append((e, "If declarator-id"))
header = "Error in declarator or parameters"
raise self._make_multi_error(prevErrors, header)
def _parse_initializer(self, outer: str = None, allowFallback: bool = True
) -> ASTInitializer:
self.skip_ws()
if outer == 'member' and False: # TODO
bracedInit = self._parse_braced_init_list()
if bracedInit is not None:
return ASTInitializer(bracedInit, hasAssign=False)
if not self.skip_string('='):
return None
bracedInit = self._parse_braced_init_list()
if bracedInit is not None:
return ASTInitializer(bracedInit)
if outer == 'member':
fallbackEnd = [] # type: List[str]
elif outer is None: # function parameter
fallbackEnd = [',', ')']
else:
self.fail("Internal error, initializer for outer '%s' not "
"implemented." % outer)
def parser():
return self._parse_assignment_expression()
value = self._parse_expression_fallback(fallbackEnd, parser, allow=allowFallback)
return ASTInitializer(value)
def _parse_type(self, named: Union[bool, str], outer: str = None) -> ASTType:
"""
named=False|'maybe'|True: 'maybe' is e.g., for function objects which
doesn't need to name the arguments
"""
if outer: # always named
if outer not in ('type', 'member', 'function'):
raise Exception('Internal error, unknown outer "%s".' % outer)
assert named
if outer == 'type':
# We allow type objects to just be a name.
prevErrors = []
startPos = self.pos
# first try without the type
try:
declSpecs = self._parse_decl_specs(outer=outer, typed=False)
decl = self._parse_declarator(named=True, paramMode=outer,
typed=False)
self.assert_end(allowSemicolon=True)
except DefinitionError as exUntyped:
desc = "If just a name"
prevErrors.append((exUntyped, desc))
self.pos = startPos
try:
declSpecs = self._parse_decl_specs(outer=outer)
decl = self._parse_declarator(named=True, paramMode=outer)
except DefinitionError as exTyped:
self.pos = startPos
desc = "If typedef-like declaration"
prevErrors.append((exTyped, desc))
# Retain the else branch for easier debugging.
# TODO: it would be nice to save the previous stacktrace
# and output it here.
if True:
header = "Type must be either just a name or a "
header += "typedef-like declaration."
raise self._make_multi_error(prevErrors, header)
else:
# For testing purposes.
# do it again to get the proper traceback (how do you
# reliably save a traceback when an exception is
# constructed?)
self.pos = startPos
typed = True
declSpecs = self._parse_decl_specs(outer=outer, typed=typed)
decl = self._parse_declarator(named=True, paramMode=outer,
typed=typed)
elif outer == 'function':
declSpecs = self._parse_decl_specs(outer=outer)
decl = self._parse_declarator(named=True, paramMode=outer)
else:
paramMode = 'type'
if outer == 'member': # i.e., member
named = True
declSpecs = self._parse_decl_specs(outer=outer)
decl = self._parse_declarator(named=named, paramMode=paramMode)
return ASTType(declSpecs, decl)
def _parse_type_with_init(self, named: Union[bool, str], outer: str) -> ASTTypeWithInit:
if outer:
assert outer in ('type', 'member', 'function')
type = self._parse_type(outer=outer, named=named)
init = self._parse_initializer(outer=outer)
return ASTTypeWithInit(type, init)
def _parse_macro(self) -> ASTMacro:
self.skip_ws()
ident = self._parse_nested_name()
if ident is None:
self.fail("Expected identifier in macro definition.")
self.skip_ws()
if not self.skip_string_and_ws('('):
return ASTMacro(ident, None)
if self.skip_string(')'):
return ASTMacro(ident, [])
args = []
while 1:
self.skip_ws()
if self.skip_string('...'):
args.append(ASTMacroParameter(None, True))
self.skip_ws()
if not self.skip_string(')'):
self.fail('Expected ")" after "..." in macro parameters.')
break
if not self.match(identifier_re):
self.fail("Expected identifier in macro parameters.")
nn = ASTNestedName([ASTIdentifier(self.matched_text)], rooted=False)
arg = ASTMacroParameter(nn)
args.append(arg)
self.skip_ws()
if self.skip_string_and_ws(','):
continue
elif self.skip_string_and_ws(')'):
break
else:
self.fail("Expected identifier, ')', or ',' in macro parameter list.")
return ASTMacro(ident, args)
def _parse_struct(self) -> ASTStruct:
name = self._parse_nested_name()
return ASTStruct(name)
def _parse_union(self) -> ASTUnion:
name = self._parse_nested_name()
return ASTUnion(name)
def _parse_enum(self) -> ASTEnum:
name = self._parse_nested_name()
return ASTEnum(name)
def _parse_enumerator(self) -> ASTEnumerator:
name = self._parse_nested_name()
self.skip_ws()
init = None
if self.skip_string('='):
self.skip_ws()
def parser() -> ASTExpression:
return self._parse_constant_expression()
initVal = self._parse_expression_fallback([], parser)
init = ASTInitializer(initVal)
return ASTEnumerator(name, init)
def parse_declaration(self, objectType: str, directiveType: str) -> ASTDeclaration:
if objectType not in ('function', 'member',
'macro', 'struct', 'union', 'enum', 'enumerator', 'type'):
raise Exception('Internal error, unknown objectType "%s".' % objectType)
if directiveType not in ('function', 'member', 'var',
'macro', 'struct', 'union', 'enum', 'enumerator', 'type'):
raise Exception('Internal error, unknown directiveType "%s".' % directiveType)
declaration = None # type: Any
if objectType == 'member':
declaration = self._parse_type_with_init(named=True, outer='member')
elif objectType == 'function':
declaration = self._parse_type(named=True, outer='function')
elif objectType == 'macro':
declaration = self._parse_macro()
elif objectType == 'struct':
declaration = self._parse_struct()
elif objectType == 'union':
declaration = self._parse_union()
elif objectType == 'enum':
declaration = self._parse_enum()
elif objectType == 'enumerator':
declaration = self._parse_enumerator()
elif objectType == 'type':
declaration = self._parse_type(named=True, outer='type')
else:
assert False
if objectType != 'macro':
self.skip_ws()
semicolon = self.skip_string(';')
else:
semicolon = False
return ASTDeclaration(objectType, directiveType, declaration, semicolon)
def parse_namespace_object(self) -> ASTNestedName:
return self._parse_nested_name()
def parse_xref_object(self) -> ASTNestedName:
name = self._parse_nested_name()
# if there are '()' left, just skip them
self.skip_ws()
self.skip_string('()')
self.assert_end()
return name
def parse_expression(self) -> Union[ASTExpression, ASTType]:
pos = self.pos
res = None # type: Union[ASTExpression, ASTType]
try:
res = self._parse_expression()
self.skip_ws()
self.assert_end()
except DefinitionError as exExpr:
self.pos = pos
try:
res = self._parse_type(False)
self.skip_ws()
self.assert_end()
except DefinitionError as exType:
header = "Error when parsing (type) expression."
errs = []
errs.append((exExpr, "If expression"))
errs.append((exType, "If type"))
raise self._make_multi_error(errs, header)
return res
def _make_phony_error_name() -> ASTNestedName:
return ASTNestedName([ASTIdentifier("PhonyNameDueToError")], rooted=False)
class CObject(ObjectDescription):
"""
Description of a C language object.
"""
doc_field_types = [
TypedField('parameter', label=_('Parameters'),
names=('param', 'parameter', 'arg', 'argument'),
typerolename='type', typenames=('type',)),
Field('returnvalue', label=_('Returns'), has_arg=False,
names=('returns', 'return')),
Field('returntype', label=_('Return type'), has_arg=False,
names=('rtype',)),
]
def _add_enumerator_to_parent(self, ast: ASTDeclaration) -> None:
assert ast.objectType == 'enumerator'
# find the parent, if it exists && is an enum
# then add the name to the parent scope
symbol = ast.symbol
assert symbol
assert symbol.ident is not None
parentSymbol = symbol.parent
assert parentSymbol
if parentSymbol.parent is None:
# TODO: we could warn, but it is somewhat equivalent to
# enumeratorss, without the enum
return # no parent
parentDecl = parentSymbol.declaration
if parentDecl is None:
# the parent is not explicitly declared
# TODO: we could warn, but?
return
if parentDecl.objectType != 'enum':
# TODO: maybe issue a warning, enumerators in non-enums is weird,
# but it is somewhat equivalent to enumeratorss, without the enum
return
if parentDecl.directiveType != 'enum':
return
targetSymbol = parentSymbol.parent
s = targetSymbol.find_identifier(symbol.ident, matchSelf=False, recurseInAnon=True,
searchInSiblings=False)
if s is not None:
# something is already declared with that name
return
declClone = symbol.declaration.clone()
declClone.enumeratorScopedSymbol = symbol
Symbol(parent=targetSymbol, ident=symbol.ident,
declaration=declClone,
docname=self.env.docname)
def add_target_and_index(self, ast: ASTDeclaration, sig: str,
signode: TextElement) -> None:
ids = []
for i in range(1, _max_id + 1):
try:
id = ast.get_id(version=i)
ids.append(id)
except NoOldIdError:
assert i < _max_id
# let's keep the newest first
ids = list(reversed(ids))
newestId = ids[0]
assert newestId # shouldn't be None
name = ast.symbol.get_full_nested_name().get_display_string().lstrip('.')
if newestId not in self.state.document.ids:
# always add the newest id
assert newestId
signode['ids'].append(newestId)
# only add compatibility ids when there are no conflicts
for id in ids[1:]:
if not id: # is None when the element didn't exist in that version
continue
if id not in self.state.document.ids:
signode['ids'].append(id)
self.state.document.note_explicit_target(signode)
domain = cast(CDomain, self.env.get_domain('c'))
domain.note_object(name, self.objtype, newestId)
indexText = self.get_index_text(name)
self.indexnode['entries'].append(('single', indexText, newestId, '', None))
@property
def object_type(self) -> str:
raise NotImplementedError()
@property
def display_object_type(self) -> str:
return self.object_type
def get_index_text(self, name: str) -> str:
return _('%s (C %s)') % (name, self.display_object_type)
def parse_definition(self, parser: DefinitionParser) -> ASTDeclaration:
return parser.parse_declaration(self.object_type, self.objtype)
def describe_signature(self, signode: TextElement, ast: Any, options: Dict) -> None:
ast.describe_signature(signode, 'lastIsName', self.env, options)
def run(self) -> List[Node]:
env = self.state.document.settings.env # from ObjectDescription.run
if 'c:parent_symbol' not in env.temp_data:
root = env.domaindata['c']['root_symbol']
env.temp_data['c:parent_symbol'] = root
env.ref_context['c:parent_key'] = root.get_lookup_key()
# When multiple declarations are made in the same directive
# they need to know about each other to provide symbol lookup for function parameters.
# We use last_symbol to store the latest added declaration in a directive.
env.temp_data['c:last_symbol'] = None
return super().run()
def handle_signature(self, sig: str, signode: TextElement) -> ASTDeclaration:
parentSymbol = self.env.temp_data['c:parent_symbol'] # type: Symbol
parser = DefinitionParser(sig, location=signode, config=self.env.config)
try:
ast = self.parse_definition(parser)
parser.assert_end()
except DefinitionError as e:
logger.warning(e, location=signode)
# It is easier to assume some phony name than handling the error in
# the possibly inner declarations.
name = _make_phony_error_name()
symbol = parentSymbol.add_name(name)
self.env.temp_data['c:last_symbol'] = symbol
raise ValueError
try:
symbol = parentSymbol.add_declaration(ast, docname=self.env.docname)
# append the new declaration to the sibling list
assert symbol.siblingAbove is None
assert symbol.siblingBelow is None
symbol.siblingAbove = self.env.temp_data['c:last_symbol']
if symbol.siblingAbove is not None:
assert symbol.siblingAbove.siblingBelow is None
symbol.siblingAbove.siblingBelow = symbol
self.env.temp_data['c:last_symbol'] = symbol
except _DuplicateSymbolError as e:
# Assume we are actually in the old symbol,
# instead of the newly created duplicate.
self.env.temp_data['c:last_symbol'] = e.symbol
logger.warning("Duplicate declaration, %s", sig, location=signode)
if ast.objectType == 'enumerator':
self._add_enumerator_to_parent(ast)
# note: handle_signature may be called multiple time per directive,
# if it has multiple signatures, so don't mess with the original options.
options = dict(self.options)
self.describe_signature(signode, ast, options)
return ast
def before_content(self) -> None:
lastSymbol = self.env.temp_data['c:last_symbol'] # type: Symbol
assert lastSymbol
self.oldParentSymbol = self.env.temp_data['c:parent_symbol']
self.oldParentKey = self.env.ref_context['c:parent_key'] # type: LookupKey
self.env.temp_data['c:parent_symbol'] = lastSymbol
self.env.ref_context['c:parent_key'] = lastSymbol.get_lookup_key()
def after_content(self) -> None:
self.env.temp_data['c:parent_symbol'] = self.oldParentSymbol
self.env.ref_context['c:parent_key'] = self.oldParentKey
def make_old_id(self, name: str) -> str:
"""Generate old styled node_id for C objects.
.. note:: Old Styled node_id was used until Sphinx-3.0.
This will be removed in Sphinx-5.0.
"""
return 'c.' + name
class CMemberObject(CObject):
object_type = 'member'
@property
def display_object_type(self) -> str:
# the distinction between var and member is only cosmetic
assert self.objtype in ('member', 'var')
return self.objtype
class CFunctionObject(CObject):
object_type = 'function'
class CMacroObject(CObject):
object_type = 'macro'
class CStructObject(CObject):
object_type = 'struct'
class CUnionObject(CObject):
object_type = 'union'
class CEnumObject(CObject):
object_type = 'enum'
class CEnumeratorObject(CObject):
object_type = 'enumerator'
class CTypeObject(CObject):
object_type = 'type'
class CNamespaceObject(SphinxDirective):
"""
This directive is just to tell Sphinx that we're documenting stuff in
namespace foo.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {} # type: Dict
def run(self) -> List[Node]:
rootSymbol = self.env.domaindata['c']['root_symbol']
if self.arguments[0].strip() in ('NULL', '0', 'nullptr'):
symbol = rootSymbol
stack = [] # type: List[Symbol]
else:
parser = DefinitionParser(self.arguments[0],
location=self.get_source_info(),
config=self.env.config)
try:
name = parser.parse_namespace_object()
parser.assert_end()
except DefinitionError as e:
logger.warning(e, location=self.get_source_info())
name = _make_phony_error_name()
symbol = rootSymbol.add_name(name)
stack = [symbol]
self.env.temp_data['c:parent_symbol'] = symbol
self.env.temp_data['c:namespace_stack'] = stack
self.env.ref_context['c:parent_key'] = symbol.get_lookup_key()
return []
class CNamespacePushObject(SphinxDirective):
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {} # type: Dict
def run(self) -> List[Node]:
if self.arguments[0].strip() in ('NULL', '0', 'nullptr'):
return []
parser = DefinitionParser(self.arguments[0],
location=self.get_source_info(),
config=self.env.config)
try:
name = parser.parse_namespace_object()
parser.assert_end()
except DefinitionError as e:
logger.warning(e, location=self.get_source_info())
name = _make_phony_error_name()
oldParent = self.env.temp_data.get('c:parent_symbol', None)
if not oldParent:
oldParent = self.env.domaindata['c']['root_symbol']
symbol = oldParent.add_name(name)
stack = self.env.temp_data.get('c:namespace_stack', [])
stack.append(symbol)
self.env.temp_data['c:parent_symbol'] = symbol
self.env.temp_data['c:namespace_stack'] = stack
self.env.ref_context['c:parent_key'] = symbol.get_lookup_key()
return []
class CNamespacePopObject(SphinxDirective):
has_content = False
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {} # type: Dict
def run(self) -> List[Node]:
stack = self.env.temp_data.get('c:namespace_stack', None)
if not stack or len(stack) == 0:
logger.warning("C namespace pop on empty stack. Defaulting to gobal scope.",
location=self.get_source_info())
stack = []
else:
stack.pop()
if len(stack) > 0:
symbol = stack[-1]
else:
symbol = self.env.domaindata['c']['root_symbol']
self.env.temp_data['c:parent_symbol'] = symbol
self.env.temp_data['c:namespace_stack'] = stack
self.env.ref_context['cp:parent_key'] = symbol.get_lookup_key()
return []
class CXRefRole(XRefRole):
def process_link(self, env: BuildEnvironment, refnode: Element,
has_explicit_title: bool, title: str, target: str) -> Tuple[str, str]:
refnode.attributes.update(env.ref_context)
if not has_explicit_title:
# major hax: replace anon names via simple string manipulation.
# Can this actually fail?
title = anon_identifier_re.sub("[anonymous]", str(title))
if not has_explicit_title:
target = target.lstrip('~') # only has a meaning for the title
# if the first character is a tilde, don't display the module/class
# parts of the contents
if title[0:1] == '~':
title = title[1:]
dot = title.rfind('.')
if dot != -1:
title = title[dot + 1:]
return title, target
class CExprRole(SphinxRole):
def __init__(self, asCode: bool) -> None:
super().__init__()
if asCode:
# render the expression as inline code
self.class_type = 'c-expr'
self.node_type = nodes.literal # type: Type[TextElement]
else:
# render the expression as inline text
self.class_type = 'c-texpr'
self.node_type = nodes.inline
def run(self) -> Tuple[List[Node], List[system_message]]:
text = self.text.replace('\n', ' ')
parser = DefinitionParser(text, location=self.get_source_info(),
config=self.env.config)
# attempt to mimic XRefRole classes, except that...
classes = ['xref', 'c', self.class_type]
try:
ast = parser.parse_expression()
except DefinitionError as ex:
logger.warning('Unparseable C expression: %r\n%s', text, ex,
location=self.get_source_info())
# see below
return [self.node_type(text, text, classes=classes)], []
parentSymbol = self.env.temp_data.get('cpp:parent_symbol', None)
if parentSymbol is None:
parentSymbol = self.env.domaindata['c']['root_symbol']
# ...most if not all of these classes should really apply to the individual references,
# not the container node
signode = self.node_type(classes=classes)
ast.describe_signature(signode, 'markType', self.env, parentSymbol)
return [signode], []
class CDomain(Domain):
"""C language domain."""
name = 'c'
label = 'C'
object_types = {
'function': ObjType(_('function'), 'func'),
'member': ObjType(_('member'), 'member'),
'macro': ObjType(_('macro'), 'macro'),
'type': ObjType(_('type'), 'type'),
'var': ObjType(_('variable'), 'data'),
}
directives = {
'member': CMemberObject,
'var': CMemberObject,
'function': CFunctionObject,
'macro': CMacroObject,
'struct': CStructObject,
'union': CUnionObject,
'enum': CEnumObject,
'enumerator': CEnumeratorObject,
'type': CTypeObject,
# scope control
'namespace': CNamespaceObject,
'namespace-push': CNamespacePushObject,
'namespace-pop': CNamespacePopObject,
}
roles = {
'member': CXRefRole(),
'data': CXRefRole(),
'var': CXRefRole(),
'func': CXRefRole(fix_parens=True),
'macro': CXRefRole(),
'struct': CXRefRole(),
'union': CXRefRole(),
'enum': CXRefRole(),
'enumerator': CXRefRole(),
'type': CXRefRole(),
'expr': CExprRole(asCode=True),
'texpr': CExprRole(asCode=False)
}
initial_data = {
'root_symbol': Symbol(None, None, None, None),
'objects': {}, # fullname -> docname, node_id, objtype
} # type: Dict[str, Union[Symbol, Dict[str, Tuple[str, str, str]]]]
@property
def objects(self) -> Dict[str, Tuple[str, str, str]]:
return self.data.setdefault('objects', {}) # fullname -> docname, node_id, objtype
def note_object(self, name: str, objtype: str, node_id: str, location: Any = None) -> None:
if name in self.objects:
docname = self.objects[name][0]
logger.warning(__('Duplicate C object description of %s, '
'other instance in %s, use :noindex: for one of them'),
name, docname, location=location)
self.objects[name] = (self.env.docname, node_id, objtype)
def clear_doc(self, docname: str) -> None:
if Symbol.debug_show_tree:
print("clear_doc:", docname)
print("\tbefore:")
print(self.data['root_symbol'].dump(1))
print("\tbefore end")
rootSymbol = self.data['root_symbol']
rootSymbol.clear_doc(docname)
if Symbol.debug_show_tree:
print("\tafter:")
print(self.data['root_symbol'].dump(1))
print("\tafter end")
print("clear_doc end:", docname)
for fullname, (fn, _id, _l) in list(self.objects.items()):
if fn == docname:
del self.objects[fullname]
def process_doc(self, env: BuildEnvironment, docname: str,
document: nodes.document) -> None:
if Symbol.debug_show_tree:
print("process_doc:", docname)
print(self.data['root_symbol'].dump(0))
print("process_doc end:", docname)
def process_field_xref(self, pnode: pending_xref) -> None:
pnode.attributes.update(self.env.ref_context)
def merge_domaindata(self, docnames: List[str], otherdata: Dict) -> None:
if Symbol.debug_show_tree:
print("merge_domaindata:")
print("\tself:")
print(self.data['root_symbol'].dump(1))
print("\tself end")
print("\tother:")
print(otherdata['root_symbol'].dump(1))
print("\tother end")
print("merge_domaindata end")
self.data['root_symbol'].merge_with(otherdata['root_symbol'],
docnames, self.env)
ourObjects = self.data['objects']
for fullname, (fn, id_, objtype) in otherdata['objects'].items():
if fn in docnames:
if fullname in ourObjects:
msg = __("Duplicate declaration, also defined in '%s'.\n"
"Name of declaration is '%s'.")
msg = msg % (ourObjects[fullname], fullname)
logger.warning(msg, location=fn)
else:
ourObjects[fullname] = (fn, id_, objtype)
def _resolve_xref_inner(self, env: BuildEnvironment, fromdocname: str, builder: Builder,
typ: str, target: str, node: pending_xref,
contnode: Element) -> Tuple[Element, str]:
parser = DefinitionParser(target, location=node, config=env.config)
try:
name = parser.parse_xref_object()
except DefinitionError as e:
logger.warning('Unparseable C cross-reference: %r\n%s', target, e,
location=node)
return None, None
parentKey = node.get("c:parent_key", None) # type: LookupKey
rootSymbol = self.data['root_symbol']
if parentKey:
parentSymbol = rootSymbol.direct_lookup(parentKey) # type: Symbol
if not parentSymbol:
print("Target: ", target)
print("ParentKey: ", parentKey)
print(rootSymbol.dump(1))
assert parentSymbol # should be there
else:
parentSymbol = rootSymbol
s = parentSymbol.find_declaration(name, typ,
matchSelf=True, recurseInAnon=True)
if s is None or s.declaration is None:
return None, None
# TODO: check role type vs. object type
declaration = s.declaration
displayName = name.get_display_string()
docname = s.docname
assert docname
return make_refnode(builder, fromdocname, docname,
declaration.get_newest_id(), contnode, displayName
), declaration.objectType
def resolve_xref(self, env: BuildEnvironment, fromdocname: str, builder: Builder,
typ: str, target: str, node: pending_xref,
contnode: Element) -> Element:
return self._resolve_xref_inner(env, fromdocname, builder, typ,
target, node, contnode)[0]
def resolve_any_xref(self, env: BuildEnvironment, fromdocname: str, builder: Builder,
target: str, node: pending_xref, contnode: Element
) -> List[Tuple[str, Element]]:
with logging.suppress_logging():
retnode, objtype = self._resolve_xref_inner(env, fromdocname, builder,
'any', target, node, contnode)
if retnode:
return [('c:' + self.role_for_objtype(objtype), retnode)]
return []
def get_objects(self) -> Iterator[Tuple[str, str, str, str, str, int]]:
for refname, (docname, node_id, objtype) in list(self.objects.items()):
yield (refname, refname, objtype, docname, node_id, 1)
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_domain(CDomain)
app.add_config_value("c_id_attributes", [], 'env')
app.add_config_value("c_paren_attributes", [], 'env')
return {
'version': 'builtin',
'env_version': 2,
'parallel_read_safe': True,
'parallel_write_safe': True,
} | PypiClean |
/GeobricksGISRaster-0.1.12.tar.gz/GeobricksGISRaster-0.1.12/geobricks_gis_raster/core/raster.py | import gdal
import os
import subprocess
import copy
import math
import json
import rasterio
from geobricks_common.core.log import logger
from geobricks_common.core.filesystem import create_tmp_filename
from geobricks_proj4_to_epsg.core.proj4_to_epsg import get_epsg_code_from_proj4
log = logger(__file__)
# example of statistics
stats_config = {
"descriptive_stats": {
"force": True
},
"histogram": {
"buckets": 256,
"include_out_of_range": 0,
"force": True
}
}
def crop_raster_on_vector_bbox_and_postgis_db(input_file, db_connection_string, query, minlon, minlat, maxlon, maxlat, srcnodata=None, dstnodata=None):
if srcnodata == None:
srcnodata = get_nodata_value(input_file)
if dstnodata == None:
dstnodata = srcnodata
output_bbox = crop_raster_with_bounding_box(input_file, minlon, minlat, maxlon, maxlat, srcnodata)
output_path = crop_by_vector_from_db(output_bbox, db_connection_string, query, srcnodata, dstnodata)
os.remove(output_bbox)
return output_path
def crop_raster_with_bounding_box(input_file, minlon, minlat, maxlon, maxlat, srcnodata=None):
if srcnodata == None:
srcnodata = get_nodata_value(input_file)
log.info("crop_raster_with_bounding_box")
output_file = create_tmp_filename('.tif', 'gdal_translate_by_bbox')
args = [
"gdal_translate",
"-a_nodata", str(srcnodata),
"-projwin",
str(minlat),
str(minlon),
str(maxlat),
str(maxlon),
input_file,
output_file
]
try:
log.info(args)
proc = subprocess.call(args, stdout=subprocess.PIPE, stderr=None)
# proc = subprocess.check_output(args)
except Exception, e:
raise Exception(e)
return output_file
def crop_by_vector_from_db(input_file, db_connection_string, query, srcnodata='nodata', dstnodata='nodata'):
log.info(query)
output_file_gdal_warp = create_tmp_filename('.tif', 'gdal_warp')
output_file = create_tmp_filename('.tif', 'output')
log.info(input_file)
# crop the layer on cutline
args = [
'gdalwarp',
"-q",
"-multi",
"-of", "GTiff",
"-cutline", db_connection_string,
"-csql", query,
"-srcnodata", str(srcnodata),
"-dstnodata", str(dstnodata),
# -crop_to_cutline is needed otherwise the layer is not cropped
# TODO: resolve shifting problem
# "-crop_to_cutline",
# "-dstalpha",
input_file,
output_file_gdal_warp
]
try:
log.info(args)
#TODO: handle subprocess Error (like that is not taken)
output = subprocess.check_output(args)
# stdout_value, error = proc.communicate()
log.info(output)
except Exception, e:
raise Exception(e)
# TODO: is it useful the third opetation?
args = [
'gdal_translate',
"-co", "COMPRESS=DEFLATE",
"-a_nodata", str(dstnodata),
output_file_gdal_warp,
output_file
]
try:
log.info(" ".join(args))
#TODO: handle subprocess Error (like that is not taken)
proc = subprocess.call(args, stdout=subprocess.PIPE, stderr=None)
except:
stdout_value = proc.communicate()[0]
raise Exception(stdout_value)
#os.remove(output_file_gdal_warp)
if os.path.isfile(output_file):
return output_file
return None
############### OLD METHODS
# TODO: remove the db_spatial from here
def crop_by_vector_database(raster_path, db_spatial, query_extent, query_layer):
# TODO: make it better
geom = json.dumps(db_spatial.query(query_extent))
g = json.loads(geom)
#log.info(g)
obj = g[0][0]
#log.info(obj)
obj = json.loads(obj)
# TODO: this is hardcoded because the returning bbox is different from the one used by GDAL processing
#log.info(obj["coordinates"])
minlat = obj["coordinates"][0][0][0]
minlon = obj["coordinates"][0][1][1]
maxlat = obj["coordinates"][0][2][0]
maxlon = obj["coordinates"][0][0][1]
db_connection_string = db_spatial.get_connection_string(True)
srcnodatavalue = get_nodata_value(raster_path)
return _crop_by_vector_database(raster_path, query_layer, db_connection_string, minlat, minlon, maxlat, maxlon, srcnodatavalue, srcnodatavalue)
# TODO: instead of the connection string pass the geometry
def _crop_by_vector_database(input_file, query, db_connection_string, minlat, minlon, maxlat, maxlon, srcnodata='nodata', dstnodata='nodata'):
log.info(query)
output_file_gdal_translate = create_tmp_filename('.tif', 'gdal_translate')
output_file_gdal_warp = create_tmp_filename('.tif', 'gdal_warp')
output_file = create_tmp_filename('.tif', 'output')
args = [
'gdal_translate',
'-projwin',
str(minlat),
str(minlon),
str(maxlat),
str(maxlon),
input_file,
output_file_gdal_translate
]
try:
log.info(args)
#TODO: handle subprocess Error (like that is not taken)
proc = subprocess.call(args, stdout=subprocess.PIPE, stderr=None)
except:
stdout_value = proc.communicate()[0]
raise Exception(stdout_value)
args = [
'gdalwarp',
"-q",
"-multi",
"-of", "GTiff",
"-cutline", db_connection_string,
"-csql", query,
"-srcnodata", str(srcnodata),
"-dstnodata", str(dstnodata),
# -crop_to_cutline is needed otherwise the layer is not cropped
# TODO: resolve shifting problem
# "-crop_to_cutline",
# "-dstalpha",
output_file_gdal_translate,
output_file_gdal_warp
]
try:
#log.info(args)
#TODO: handle subprocess Error (like that is not taken)
proc = subprocess.call(args, stdout=subprocess.PIPE, stderr=None)
except:
stdout_value = proc.communicate()[0]
raise Exception(stdout_value)
# TODO: is it useful the third opetation?
args = [
'gdal_translate',
"-co", "COMPRESS=DEFLATE",
"-a_nodata", str(dstnodata),
output_file_gdal_warp,
output_file
]
try:
log.info(args)
#TODO: handle subprocess Error (like that is not taken)
proc = subprocess.call(args, stdout=subprocess.PIPE, stderr=None)
except:
stdout_value = proc.communicate()[0]
raise Exception(stdout_value)
os.remove(output_file_gdal_warp)
os.remove(output_file_gdal_translate)
if os.path.isfile(output_file):
return output_file
return None
def get_statistics(input_file, config=stats_config):
"""
:param input_file: file to be processed
:param config: json config file to be passed
:return: computed statistics
"""
#log.info("get_statistics: %s" % input_file)
if config is None:
config = copy.deepcopy(stats_config)
stats = {}
try:
if os.path.isfile(input_file):
ds = gdal.Open(input_file)
if "descriptive_stats" in config:
stats["stats"] = _get_descriptive_statistics(ds, config["descriptive_stats"])
if "histogram" in config:
stats["hist"] = _get_histogram(ds, config["histogram"])
else:
raise Exception("Exceptiuon")
except Exception, e:
raise Exception(e)
return stats
def get_descriptive_statistics(input_file, config=stats_config["descriptive_stats"]):
"""
:param input_file: file to be processed
:param config: json config file to be passed
:return: return and array with the min, max, mean, sd statistics per band i.e. [{"band": 1, "max": 549.0, "mean": 2.8398871527778, "sd": 17.103028971129, "min": 0.0}]
"""
try:
if os.path.isfile(input_file):
ds = gdal.Open(input_file)
return _get_descriptive_statistics(ds, config)
else:
# TODO exception
raise Exception("Exceptiuon")
except Exception, e:
raise Exception(e)
def get_histogram(input_file, config=stats_config["histogram"]):
"""
:param input_file: file to be processed
:type string
:param config: json config file to be passed
:type json
:return: return and array with the min, max, mean, sd statistics per band i.e. [{"band": 1, "buckets": 256, "values": [43256, 357, ...], "max": 998.0, "min": 0.0}]
"""
try:
if os.path.isfile(input_file):
ds = gdal.Open(input_file)
return _get_histogram(ds, config)
else:
# TODO exception
raise Exception("Exceptiuon")
except Exception, e:
raise Exception(e)
def get_location_values(input_files, lat, lon, band=None):
"""
Get the value of a (lat, lon) location
# TODO:
1) pass a json, instead of [files] pass file and id
2) pass as well the projection used i.e. EPSG:4326
3) for now it's used x, y as lat lon (it's not used the projection)
:param input_files: files to be processed
:type array
:param lat: x value (for now it's used LatLon)
:type float
:param lon: y value (for now it's used LatLon)
:type float
:param band: band default=None (not yet used)
:return: and array with the values of the (lat, lon) location
"""
values = []
for input_file in input_files:
values.append(_location_value(input_file, lat, lon, band))
return values
def _location_value(input_file, lat, lon, band=None):
"""
Get the value of a (lat, lon) location
:param input_file: file to be processed
:type string
:param x: x value
:type float
:param y: y value
:type float
:param band: band default=None (not yet used)
:return: and array with the values of the (lat, lon) location
"""
# TODO: check with -wgs84 values instead of -geoloc that is the reference system of the image
cmd = "gdallocationinfo -valonly " + input_file + " -wgs84 " + str(lon) + " " + str(lat)
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, error = process.communicate()
return output.strip()
def _get_descriptive_statistics(ds, config):
# variables
force = True if "force" not in config else bool(config["force"])
# stats
stats = []
for band in range(ds.RasterCount):
band += 1
srcband = ds.GetRasterBand(band)
if srcband is None:
continue
# TODO: check why the "force" doesn't work on GetStatistics but the ComputeStatistics works
if force:
s = srcband.ComputeStatistics(0)
#s = srcband.ComputeRasterMinMax(False)
else:
s = srcband.GetStatistics(False, force)
if stats is None:
continue
if math.isnan(s[2]):
log.warn("polygon is empty! %s " % s)
else:
stats.append({"band": band, "min": s[0], "max": s[1], "mean": s[2], "sd": s[3]})
return stats
def _get_histogram(ds, config=stats_config["histogram"]):
#log.info("config %s " % config)
# variables
# TODO boolean of config value
force = True if "force" not in config else bool(config["force"])
buckets = 256 if "buckets" not in config else int(config["buckets"])
min = None if "min" not in config else int(config["min"])
max = None if "max" not in config else int(config["max"])
include_out_of_range = 0 if "include_out_of_range" not in config else int(config["include_out_of_range"])
# stats
stats = []
for band in range(ds.RasterCount):
band += 1
# TODO: handle better min max
if min is None and max is None:
if force:
(min, max) = ds.GetRasterBand(band).ComputeRasterMinMax(0)
else:
min = ds.GetRasterBand(band).GetMinimum()
max = ds.GetRasterBand(band).GetMaximum()
#hist = ds.GetRasterBand(band).GetDefaultHistogram( force = 0 )
#stats.append({"band": band, "buckets": hist[2], "min": hist[0], "max": hist[1], "values": hist[3]})
hist = ds.GetRasterBand(band).GetHistogram(buckets=buckets, min=min, max=max, include_out_of_range=include_out_of_range, approx_ok = False )
stats.append({"band": band, "buckets": buckets, "min": min, "max": max, "values": hist})
return stats
def get_nodata_value(file_path, band=1):
try:
with rasterio.open(file_path) as src:
return "none" if "nodata" not in src.meta else str(src.meta["nodata"])
except Exception, e:
log.error(e)
raise Exception(e)
# def get_nodata_value(file_path, band=1):
# ds = gdal.Open(file_path)
# return ds.GetRasterBand(band).GetNoDataValue()
def get_authority(file_path):
'''
Get the authority used by a raster i.e. EPSG:4326
:param file_path: path to the file
:return: return the SRID of the raster projection
'''
with rasterio.open(file_path) as src:
log.info(src.meta)
# if 'init' in src.meta['crs']:
# return src.meta['crs']['init']
# elif 'proj' in src.meta['crs']:
# return src.meta['crs']['proj']
if 'init' in src.meta['crs']:
return src.meta['crs']['init']
elif 'proj' in src.meta['crs']:
# TODO: check if works (find a raster to test it...)
return "EPSG:" + str(get_epsg_code_from_proj4(src.meta['crs']['proj']))
return None
def get_srid(file_path):
'''
Get the SRID of a raster (i.e. 4326 or 3857 and not EPSG:4326)
:param file_path: path to the file
:return: return the SRID of the raster projection
'''
proj = get_authority(file_path)
if ":" in proj:
return proj.split(":")[1]
if proj.isdigit():
return proj
return None | PypiClean |
/MySQL-python-vincent-1.2.5.tar.gz/MySQL-python-vincent-1.2.5/MySQLdb/converters.py | from _mysql import string_literal, escape_sequence, escape_dict, escape, NULL
from MySQLdb.constants import FIELD_TYPE, FLAG
from MySQLdb.times import *
try:
from types import IntType, LongType, FloatType, NoneType, TupleType, ListType, DictType, InstanceType, \
StringType, UnicodeType, ObjectType, BooleanType, ClassType, TypeType
except ImportError:
# Python 3
long = int
IntType, LongType, FloatType, NoneType = int, long, float, type(None)
TupleType, ListType, DictType, InstanceType = tuple, list, dict, None
StringType, UnicodeType, ObjectType, BooleanType = bytes, str, object, bool
import array
try:
ArrayType = array.ArrayType
except AttributeError:
ArrayType = array.array
try:
set
except NameError:
from sets import Set as set
def Bool2Str(s, d): return str(int(s))
def Str2Set(s):
return set([ i for i in s.split(',') if i ])
def Set2Str(s, d):
return string_literal(','.join(s), d)
def Thing2Str(s, d):
"""Convert something into a string via str()."""
return str(s)
def Unicode2Str(s, d):
"""Convert a unicode object to a string using the default encoding.
This is only used as a placeholder for the real function, which
is connection-dependent."""
return s.encode()
Long2Int = Thing2Str
def Float2Str(o, d):
return '%.15g' % o
def None2NULL(o, d):
"""Convert None to NULL."""
return NULL # duh
def Thing2Literal(o, d):
"""Convert something into a SQL string literal. If using
MySQL-3.23 or newer, string_literal() is a method of the
_mysql.MYSQL object, and this function will be overridden with
that method when the connection is created."""
return string_literal(o, d)
def Instance2Str(o, d):
"""
Convert an Instance to a string representation. If the __str__()
method produces acceptable output, then you don't need to add the
class to conversions; it will be handled by the default
converter. If the exact class is not found in d, it will use the
first class it can find for which o is an instance.
"""
if o.__class__ in d:
return d[o.__class__](o, d)
cl = filter(lambda x,o=o:
type(x) is ClassType
and isinstance(o, x), d.keys())
if not cl:
cl = filter(lambda x,o=o:
type(x) is TypeType
and isinstance(o, x)
and d[x] is not Instance2Str,
d.keys())
if not cl:
return d[StringType](o,d)
d[o.__class__] = d[cl[0]]
return d[cl[0]](o, d)
def char_array(s):
return array.array('c', s)
def array2Str(o, d):
return Thing2Literal(o.tostring(), d)
def quote_tuple(t, d):
return "(%s)" % (','.join(escape_sequence(t, d)))
conversions = {
IntType: Thing2Str,
LongType: Long2Int,
FloatType: Float2Str,
NoneType: None2NULL,
TupleType: quote_tuple,
ListType: quote_tuple,
DictType: escape_dict,
InstanceType: Instance2Str,
ArrayType: array2Str,
StringType: Thing2Literal, # default
UnicodeType: Unicode2Str,
ObjectType: Instance2Str,
BooleanType: Bool2Str,
DateTimeType: DateTime2literal,
DateTimeDeltaType: DateTimeDelta2literal,
set: Set2Str,
FIELD_TYPE.TINY: int,
FIELD_TYPE.SHORT: int,
FIELD_TYPE.LONG: long,
FIELD_TYPE.FLOAT: float,
FIELD_TYPE.DOUBLE: float,
FIELD_TYPE.DECIMAL: float,
FIELD_TYPE.NEWDECIMAL: float,
FIELD_TYPE.LONGLONG: long,
FIELD_TYPE.INT24: int,
FIELD_TYPE.YEAR: int,
FIELD_TYPE.SET: Str2Set,
FIELD_TYPE.TIMESTAMP: mysql_timestamp_converter,
FIELD_TYPE.DATETIME: DateTime_or_None,
FIELD_TYPE.TIME: TimeDelta_or_None,
FIELD_TYPE.DATE: Date_or_None,
FIELD_TYPE.BLOB: [
(FLAG.BINARY, str),
],
FIELD_TYPE.STRING: [
(FLAG.BINARY, str),
],
FIELD_TYPE.VAR_STRING: [
(FLAG.BINARY, str),
],
FIELD_TYPE.VARCHAR: [
(FLAG.BINARY, str),
],
}
try:
from decimal import Decimal
conversions[FIELD_TYPE.DECIMAL] = Decimal
conversions[FIELD_TYPE.NEWDECIMAL] = Decimal
except ImportError:
pass | PypiClean |
/C-Telethon-1.28.5.tar.gz/C-Telethon-1.28.5/telethon/tl/custom/inlineresult.py | from .. import types, functions
from ... import utils
class InlineResult:
"""
Custom class that encapsulates a bot inline result providing
an abstraction to easily access some commonly needed features
(such as clicking a result to select it).
Attributes:
result (:tl:`BotInlineResult`):
The original :tl:`BotInlineResult` object.
"""
# tdlib types are the following (InlineQueriesManager::answer_inline_query @ 1a4a834):
# gif, article, audio, contact, file, geo, photo, sticker, venue, video, voice
#
# However, those documented in https://core.telegram.org/bots/api#inline-mode are different.
ARTICLE = 'article'
PHOTO = 'photo'
GIF = 'gif'
VIDEO = 'video'
VIDEO_GIF = 'mpeg4_gif'
AUDIO = 'audio'
DOCUMENT = 'document'
LOCATION = 'location'
VENUE = 'venue'
CONTACT = 'contact'
GAME = 'game'
def __init__(self, client, original, query_id=None, *, entity=None):
self._client = client
self.result = original
self._query_id = query_id
self._entity = entity
@property
def type(self):
"""
The always-present type of this result. It will be one of:
``'article'``, ``'photo'``, ``'gif'``, ``'mpeg4_gif'``, ``'video'``,
``'audio'``, ``'voice'``, ``'document'``, ``'location'``, ``'venue'``,
``'contact'``, ``'game'``.
You can access all of these constants through `InlineResult`,
such as `InlineResult.ARTICLE`, `InlineResult.VIDEO_GIF`, etc.
"""
return self.result.type
@property
def message(self):
"""
The always-present :tl:`BotInlineMessage` that
will be sent if `click` is called on this result.
"""
return self.result.send_message
@property
def title(self):
"""
The title for this inline result. It may be `None`.
"""
return self.result.title
@property
def description(self):
"""
The description for this inline result. It may be `None`.
"""
return self.result.description
@property
def url(self):
"""
The URL present in this inline results. If you want to "click"
this URL to open it in your browser, you should use Python's
`webbrowser.open(url)` for such task.
"""
if isinstance(self.result, types.BotInlineResult):
return self.result.url
@property
def photo(self):
"""
Returns either the :tl:`WebDocument` thumbnail for
normal results or the :tl:`Photo` for media results.
"""
if isinstance(self.result, types.BotInlineResult):
return self.result.thumb
elif isinstance(self.result, types.BotInlineMediaResult):
return self.result.photo
@property
def document(self):
"""
Returns either the :tl:`WebDocument` content for
normal results or the :tl:`Document` for media results.
"""
if isinstance(self.result, types.BotInlineResult):
return self.result.content
elif isinstance(self.result, types.BotInlineMediaResult):
return self.result.document
async def click(self, entity=None, reply_to=None, comment_to=None,
silent=False, clear_draft=False, hide_via=False,
background=None):
"""
Clicks this result and sends the associated `message`.
Args:
entity (`entity`):
The entity to which the message of this result should be sent.
reply_to (`int` | `Message <telethon.tl.custom.message.Message>`, optional):
If present, the sent message will reply to this ID or message.
comment_to (`int` | `Message <telethon.tl.custom.message.Message>`, optional):
Similar to ``reply_to``, but replies in the linked group of a
broadcast channel instead (effectively leaving a "comment to"
the specified message).
silent (`bool`, optional):
Whether the message should notify people with sound or not.
Defaults to `False` (send with a notification sound unless
the person has the chat muted). Set it to `True` to alter
this behaviour.
clear_draft (`bool`, optional):
Whether the draft should be removed after sending the
message from this result or not. Defaults to `False`.
hide_via (`bool`, optional):
Whether the "via @bot" should be hidden or not.
Only works with certain bots (like @bing or @gif).
background (`bool`, optional):
Whether the message should be send in background.
"""
if entity:
entity = await self._client.get_input_entity(entity)
elif self._entity:
entity = self._entity
else:
raise ValueError('You must provide the entity where the result should be sent to')
if comment_to:
entity, reply_id = await self._client._get_comment_data(entity, comment_to)
else:
reply_id = None if reply_to is None else utils.get_message_id(reply_to)
req = functions.messages.SendInlineBotResultRequest(
peer=entity,
query_id=self._query_id,
id=self.result.id,
silent=silent,
background=background,
clear_draft=clear_draft,
hide_via=hide_via,
reply_to_msg_id=reply_id
)
return self._client._get_response_message(
req, await self._client(req), entity)
async def download_media(self, *args, **kwargs):
"""
Downloads the media in this result (if there is a document, the
document will be downloaded; otherwise, the photo will if present).
This is a wrapper around `client.download_media
<telethon.client.downloads.DownloadMethods.download_media>`.
"""
if self.document or self.photo:
return await self._client.download_media(
self.document or self.photo, *args, **kwargs) | PypiClean |
/CellDetection-0.4.3-py3-none-any.whl/celldetection/data/cpn.py | import numpy as np
import cv2
from skimage.measure import regionprops
from collections import OrderedDict
from .segmentation import filter_instances_
from .misc import labels2properties
__all__ = [
'CPNTargetGenerator', 'contours2labels', 'render_contour', 'clip_contour_', 'masks2labels',
'contours2boxes', 'contours2properties', 'resolve_label_channels',
'filter_contours_by_intensity'
]
def efd(contour, order=10, epsilon=1e-6):
"""Elliptic fourier descriptor.
Computes elliptic fourier descriptors from contour data.
Args:
contour: Tensor of shape (..., num_points, 2). Should be set of `num_points` 2D points that describe the contour
of an object. Based on each contour a descriptor of shape (order, 4) is computed. The result has thus
a shape of (..., order, 4).
As `num_points` may differ from one contour to another a list of (num_points, 2) arrays may be passed
as a numpy array with `object` as its data type, i.e. `np.array(list_of_contours)`.
order: Order of resulting descriptor. The higher the order, the more detail can be preserved. An order of 1
produces ellipses.
epsilon: Epsilon value. Used to avoid division by zero.
Notes:
Locations may contain NaN if `contour` only contains a single point.
Returns:
Tensor of shape (..., order, 4).
"""
if isinstance(contour, np.ndarray) and contour.dtype == object:
r = [efd(c, order=order, epsilon=epsilon) for c in contour]
if all([isinstance(r_, tuple) and len(r_) == len(r[0]) for r_ in r]):
res = [[] for _ in range(len(r[0]))]
for r_ in r:
for i in range(len(res)):
res[i].append(r_[i])
return tuple(map(np.array, res))
dxy = np.diff(contour, axis=-2) # shape: (..., p, d)
dt = np.sqrt(np.sum(np.square(dxy), axis=-1)) + epsilon # shape: (..., p)
cumsum = np.cumsum(dt, axis=-1) # shape: (..., p)
zero = np.zeros(cumsum.shape[:-1] + (1,))
t = np.concatenate([zero, cumsum], axis=-1) # shape: (..., p + 1)
sampling = t[..., -1:] # shape: (..., 1)
T_ = t[..., -1] # shape: (...,)
phi = (2 * np.pi * t) / sampling # shape: (..., p + 1)
orders = np.arange(1, order + 1, dtype=phi.dtype) # shape: (order,)
constants = sampling / (2. * np.square(orders) * np.square(np.pi))
phi = np.expand_dims(phi, -2) * np.expand_dims(orders, -1)
d_cos_phi = np.cos(phi[..., 1:]) - np.cos(phi[..., :-1])
d_sin_phi = np.sin(phi[..., 1:]) - np.sin(phi[..., :-1])
dxy0_dt = np.expand_dims(dxy[..., 0] / dt, axis=-2)
dxy1_dt = np.expand_dims(dxy[..., 1] / dt, axis=-2)
coefficients = np.stack([
constants * np.sum(dxy0_dt * d_cos_phi, axis=-1),
constants * np.sum(dxy0_dt * d_sin_phi, axis=-1),
constants * np.sum(dxy1_dt * d_cos_phi, axis=-1),
constants * np.sum(dxy1_dt * d_sin_phi, axis=-1),
], axis=-1)
xi = np.cumsum(dxy[..., 0], axis=-1) - (dxy[..., 0] / dt) * t[..., 1:]
delta = np.cumsum(dxy[..., 1], axis=-1) - (dxy[..., 1] / dt) * t[..., 1:]
t_diff = np.diff(t ** 2, axis=-1)
dt2 = 2 * dt
a0 = (1 / T_) * np.sum(((dxy[..., 0] / dt2) * t_diff) + xi * dt, axis=-1)
c0 = (1 / T_) * np.sum(((dxy[..., 1] / dt2) * t_diff) + delta * dt, axis=-1)
return np.array(coefficients), np.stack((contour[..., 0, 0] + a0, contour[..., 0, 1] + c0), axis=-1)
def labels2contours(labels, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE, flag_fragmented_inplace=False,
raise_fragmented=True, constant=-1) -> dict:
"""Labels to contours.
Notes:
- If ``flag_fragmented_inplace is True``, ``labels`` may be modified inplace.
Args:
labels:
mode:
method: Contour method. CHAIN_APPROX_NONE must be used if contours are used for CPN.
flag_fragmented_inplace: Whether to flag fragmented labels. Flagging sets labels that consist of more than one
connected component to ``constant``.
constant: Flagging constant.
raise_fragmented: Whether to raise ValueError when encountering fragmented labels.
Returns:
dict
"""
crops = []
contours = OrderedDict()
for channel in np.split(labels, labels.shape[2], 2):
crops += [(p.label, p.image) + p.bbox[:2] for p in regionprops(channel)]
for label, crop, oy, ox in crops:
crop.dtype = np.uint8
r = cv2.findContours(crop, mode=mode, method=method, offset=(ox, oy))
if len(r) == 3: # be compatible with both existing versions of findContours
_, c, _ = r
elif len(r) == 2:
c, _ = r
else:
raise NotImplementedError('try different cv2 version')
try:
c, = c
except ValueError as ve:
if flag_fragmented_inplace:
labels[labels == label] = constant
elif raise_fragmented:
raise ValueError('Object labeled with multiple connected components.')
continue
if len(c) == 1:
c = np.concatenate((c, c), axis=0) # min len for other functions to work properly
contours[label] = c
if labels.shape[2] > 1:
return OrderedDict(sorted(contours.items()))
return contours
def labels2contour_list(labels, **kwargs) -> list:
if labels.ndim == 2:
labels = labels[..., None]
return [np.squeeze(i, 1) for i in list(labels2contours(labels, **kwargs).values())]
def masks2labels(masks, connectivity=8, label_axis=2, count=False, reduce=np.max, keepdims=True, **kwargs):
"""Masks to labels.
Notes:
~ 11.7 ms for Array[25, 256, 256]. For same array skimage.measure.label takes ~ 17.9 ms.
Args:
masks: List[Array[height, width]] or Array[num_masks, height, width]
connectivity: 8 or 4 for 8-way or 4-way connectivity respectively
label_axis: Axis used for stacking label maps. One per mask.
count: Whether to count and return the number of components.
reduce: Callable used to reduce `label_axis`. If set to None, `label_axis` will not be reduced.
Can be used if instances do not overlap.
**kwargs: Kwargs for cv2.connectedComponents.
Returns:
labels or (labels, count)
"""
labels = []
cnt = 0
for m in masks:
a, b = cv2.connectedComponents(m, connectivity=connectivity, **kwargs)
if cnt > 0:
b[b > 0] += cnt
cnt += a - (1 if (a > 1 and 0 in b) else 0)
labels.append(b)
labels = np.stack(labels, label_axis)
if reduce is not None:
labels = reduce(labels, axis=label_axis, keepdims=keepdims)
return (labels, cnt) if count else labels
def fourier2contour(fourier, locations, samples=64, sampling=None):
"""
Args:
fourier: Array[..., order, 4]
locations: Array[..., 2]
samples: Number of samples.
sampling: Array[samples] or Array[(fourier.shape[:-2] + (samples,)].
Default is linspace from 0 to 1 with `samples` values.
Returns:
Contours.
"""
order = fourier.shape[-2]
if sampling is None:
sampling = np.linspace(0, 1.0, samples)
samples = sampling.shape[-1]
sampling = sampling[..., None, :]
# shape: (order, samples)
c = float(np.pi) * 2 * (np.arange(1, order + 1)[..., None]) * sampling
# shape: (order, samples)
c_cos = np.cos(c)
c_sin = np.sin(c)
# shape: fourier.shape[:-2] + (samples, 2)
con = np.zeros(fourier.shape[:-2] + (samples, 2))
con += locations[..., None, :]
con += (fourier[..., None, (1, 3)] * c_sin[(...,) + (None,) * 1]).sum(-3)
con += (fourier[..., None, (0, 2)] * c_cos[(...,) + (None,) * 1]).sum(-3)
return con
def contours2fourier(contours: dict, order=5, dtype=np.float32):
if len(contours) > 0:
max_label = np.max(list(contours.keys()))
else:
max_label = 0
fouriers = np.zeros((max_label, order, 4), dtype=dtype)
locations = np.zeros((max_label, 2), dtype=dtype)
for key, contour in contours.items():
if contour.ndim == 3:
contour = contour.squeeze(1)
fourier, location = efd(contour, order)
fouriers[key - 1] = fourier # labels start at 1, but indices at 0
locations[key - 1] = location # labels start at 1, but indices at 0
return fouriers, locations
def contours2boxes(contours):
"""Contours to boxes.
Args:
contours: Array[num_contours, num_points, 2]. (x, y) format.
Returns:
Array[num_contours, 4]. (x0, y0, x1, y1) format.
"""
if len(contours):
boxes = np.concatenate((contours.min(1), contours.max(1)), 1)
else:
boxes = np.empty((0, 4))
return boxes
def render_contour(contour, val=1, dtype='int32', round=False, reference=None):
if reference is None:
reference = contour
xmin, ymin = np.floor(np.min(reference, axis=0)).astype('int')
xmax, ymax = np.ceil(np.max(reference, axis=0)).astype('int')
a = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=dtype)
if round:
contour = contour.round()
a = cv2.drawContours(a, [np.array(contour, dtype=np.int32).reshape((-1, 1, 2))], 0, val, -1,
offset=(-xmin, -ymin))
return a, (xmin, xmax), (ymin, ymax)
def filter_contours_by_intensity(img, contours, min_intensity=None, max_intensity=200, aggregate='mean'):
keep = np.ones(len(contours), dtype=bool)
for idx, con in enumerate(contours):
m, (xmin, xmax), (ymin, ymax) = render_contour(con, dtype='uint8')
img_crop = img[ymin:ymin + m.shape[0], xmin:xmin + m.shape[1]]
m = m[:img_crop.shape[0], :img_crop.shape[1]]
assert m.dtype == np.uint8
m.dtype = bool
mean_intensity = getattr(np, aggregate)(img_crop[m])
if max_intensity is not None and mean_intensity > max_intensity:
keep[idx] = False
elif min_intensity is not None and mean_intensity < min_intensity:
keep[idx] = False
return keep
def clip_contour_(contour, size):
np.clip(contour[..., 0], 0, size[1], out=contour[..., 0])
np.clip(contour[..., 1], 0, size[0], out=contour[..., 1])
def contours2labels(contours, size, rounded=True, clip=True, initial_depth=1, gap=3, dtype='int32'):
"""Contours to labels.
Convert contours to label image.
Notes:
- ~137 ms for contours.shape=(1284, 128, 2), size=(1000, 1000).
- Label images come with channels, as contours may assign pixels to multiple objects.
Since such multi-assignments cannot be easily encoded in a channel-free label image, channels are used.
To remove channels refer to `resolve_label_channels`.
Args:
contours: Contours of a single image. Array[num_contours, num_points, 2] or List[Array[num_points, 2]].
size: Label image size. (height, width).
rounded: Whether to round contour coordinates.
clip: Whether to clip contour coordinates to given `size`.
initial_depth: Initial number of channels. More channels are used if necessary.
gap: Gap between instances.
dtype: Data type of label image.
Returns:
Array[height, width, channels]. Since contours may assign pixels to multiple objects, the label image comes
with channels. To remove channels refer to `resolve_label_channels`.
"""
labels = np.zeros(tuple(size) + (initial_depth,), dtype=dtype)
lbl = 1
for contour in contours:
if rounded:
contour = np.round(contour)
if clip:
clip_contour_(contour, np.array(size) - 1)
a, (xmin, xmax), (ymin, ymax) = render_contour(contour, val=lbl, dtype=dtype)
lbl += 1
s = (labels[np.maximum(0, ymin - gap): gap + ymin + a.shape[0],
np.maximum(0, xmin - gap): gap + xmin + a.shape[1]] > 0).sum((0, 1))
i = next(i for i in range(labels.shape[2] + 1) if ~ (i < labels.shape[2] and np.any(s[i])))
if i >= labels.shape[2]:
labels = np.concatenate((labels, np.zeros(size, dtype=dtype)[..., None]), axis=-1)
labels[ymin:ymin + a.shape[0], xmin:xmin + a.shape[1], i] += a
return labels
def resolve_label_channels(labels, method='dilation', max_iter=999, kernel=(3, 3)):
"""Resolve label channels.
Remove channels from a label image.
Pixels that are assigned to exactly one foreground label remain as is.
Pixels that are assigned to multiple foreground labels present a conflict, as they cannot be described by a
channel-less label image. Such conflicts are resolved by `method`.
Args:
labels: Label image. Array[h, w, c].
method: Method to resolve overlapping regions.
max_iter: Max iteration.
kernel: Kernel.
Returns:
Labels with channels removed. Array[h, w].
"""
if isinstance(kernel, (tuple, list)):
kernel = cv2.getStructuringElement(1, kernel)
mask_sm = np.sum(labels > 0, axis=-1)
mask = mask_sm > 1 # all overlaps
if mask.any():
if method == 'dilation':
mask_ = mask_sm == 1 # all cores
lbl = np.zeros(labels.shape[:2], dtype='float64')
lbl[mask_] = labels.max(-1)[mask_]
for _ in range(max_iter):
lbl_ = np.copy(lbl)
m = mask & (lbl <= 0)
if not np.any(m):
break
lbl[m] = cv2.dilate(lbl, kernel=kernel)[m]
if np.allclose(lbl_, lbl):
break
else:
raise ValueError(f'Invalid method: {method}')
else:
lbl = labels.max(-1)
return lbl.astype(labels.dtype)
def contours2properties(contours, *properties, round=True, **kwargs):
"""Contours to properties.
References:
[1] https://scikit-image.org/docs/stable/api/skimage.measure.html#skimage.measure.regionprops
Args:
contours: Contours.
*properties: Property names. See [1] for details.
round: Whether to round contours. Default is `True`.
**kwargs: Keyword arguments for `skimage.measure.regionprops`.
Returns:
List of property lists.
"""
results = []
for idx, con in enumerate(contours):
m, (xmin, xmax), (ymin, ymax) = render_contour(con, dtype='int32', round=round)
results += labels2properties(m, *properties, offset=kwargs.pop('offset', (ymin, xmin)), **kwargs)
return results
def mask_labels_by_distance_(labels, distances, max_bg_dist, min_fg_dist):
# Set instance labels to 0 if their distance is <= max_bg_dist
labels[np.logical_and(np.any(labels > 0, 2), distances <= max_bg_dist)] = 0
# Set all labels to -1 that have a distance d with `max_bg_dist < d < min_fg_dist`
labels[np.logical_and(distances > max_bg_dist, distances < min_fg_dist)] = -1
def _labels2distances_fg(labels, fg_mask_wo_overlap, distance_type):
# Distance transform
fg_mask_wo_overlap.dtype = np.uint8
dist = cv2.distanceTransform(fg_mask_wo_overlap, distance_type, 3)
if labels.size > 0:
for p in regionprops(labels):
c = p.coords
indices = (c[:, 0], c[:, 1])
dist[indices] /= np.maximum(dist[indices].max(), .000001)
return dist
def _labels2distances_instance(labels, fg_mask_wo_overlap, distance_type):
dist = np.zeros_like(fg_mask_wo_overlap, dtype='float32')
if labels.size > 0:
for p in regionprops(labels):
y0, x0, _, y1, x1, _ = p.bbox
box_slices = (slice(y0, y1), slice(x0, x1))
mask = np.any(p.image, 2) & fg_mask_wo_overlap[box_slices]
d_ = cv2.distanceTransform(np.pad(mask.astype('uint8'), 1), distance_type, 3)[1:-1, 1:-1]
d_ /= np.maximum(d_.max(), .000001)
dist[box_slices][mask] = d_[mask]
return dist
def labels2distances(labels, distance_type=cv2.DIST_L2, overlap_zero=True, per_instance=True):
"""Label stacks to distances.
Measures distances from pixel to closest border, relative to largest distance.
Values as percentage. Overlap is zero.
Notes:
54.9 ms ± 3.41 ms (shape (576, 576, 3); 762 instances in three channels)
Args:
labels: Label stack. (height, width, channels)
distance_type: opencv distance type.
overlap_zero: Whether to set overlapping regions to zero.
per_instance: Performs the distance transform per instance if ``True``.
Returns:
Distance map of shape (height, width). All overlapping pixels are 0. Instance centers are 1.
Also, labels are returned. They are altered if `overlap_zero is True`.
"""
labels = np.copy(labels)
mask = labels > 0
# Mask out overlap
if overlap_zero:
overlap_mask = np.sum(mask, 2) > 1
labels[overlap_mask] = -1
fg_mask_wo_overlap = np.sum(mask, 2) == 1
else:
fg_mask_wo_overlap = np.any(mask, 2)
# Fg mask
if per_instance:
dist = _labels2distances_instance(labels, fg_mask_wo_overlap, distance_type)
else:
dist = _labels2distances_fg(labels, fg_mask_wo_overlap, distance_type)
return dist.clip(0., 1.), labels # 332 µs ± 24.5 µs for (576, 576)
class CPNTargetGenerator:
def __init__(self, samples, order, random_sampling=True, remove_partials=False, min_fg_dist=.75, max_bg_dist=.5,
flag_fragmented=True, flag_fragmented_constant=-1):
self.samples = samples
self.order = order
self.random_sampling = random_sampling
self.remove_partials = remove_partials
self.min_fg_dist = min_fg_dist
self.max_bg_dist = max_bg_dist
self.flag_fragmented = flag_fragmented
self.flag_fragmented_constant = flag_fragmented_constant
self.labels = None
self.distances = None
self.partials_mask = None
self._sampling = self._contours = self._fourier = self._locations = self._sampled_contours = None
self._sampled_sizes = None
self._reset()
def _reset(self):
self._sampling = None
self._contours = None
self._fourier = None
self._locations = None
self._sampled_contours = None
self._sampled_sizes = None
def feed(self, labels, border=1, min_area=1, max_area=None):
"""
Notes:
- May apply inplace changes to ``labels``.
Args:
labels: Single label image. E.g. of shape (height, width, channels).
border:
min_area:
max_area:
"""
if labels.ndim == 2:
labels = labels[..., None]
filter_instances_(labels, partials=self.remove_partials, partials_border=border,
min_area=min_area, max_area=max_area, constant=-1, continuous=True)
self.labels = labels
_ = self.contours # compute contours
self.distances, labels = labels2distances(labels)
mask_labels_by_distance_(labels, self.distances, self.max_bg_dist, self.min_fg_dist)
self._reset()
@property
def reduced_labels(self):
if self.flag_fragmented:
_ = self.contours # Since labels2contours may filter instances, it has to be done before returning labels
return self.labels.max(2)
@property
def sampling(self):
if self._sampling is None:
if self.random_sampling:
self._sampling = np.random.uniform(0., 1., self.samples)
else:
self._sampling = np.linspace(0., 1., self.samples)
self._sampling.sort()
return self._sampling
@property
def contours(self):
if self._contours is None:
self._contours: dict = labels2contours(self.labels, flag_fragmented_inplace=self.flag_fragmented,
constant=self.flag_fragmented_constant, raise_fragmented=False)
return self._contours
@property
def fourier(self):
if self._fourier is None:
self._fourier, self._locations = contours2fourier(self.contours, order=self.order)
return self._fourier
@property
def locations(self):
if self._locations is None:
self._fourier, self._locations = contours2fourier(self.contours, order=self.order)
return self._locations
@property
def sampled_contours(self):
"""
Returns:
Tensor[num_contours, num_points, 2]
"""
if self._sampled_contours is None:
self._sampled_contours = fourier2contour(self.fourier, self.locations, samples=self.samples,
sampling=self.sampling)
return self._sampled_contours
@property
def sampled_sizes(self):
"""
Notes:
The quality of `sizes` depends on how accurate `sampled_contours` represents the actual contours.
Returns:
Tensor[num_contours, 2]. Contains height and width for each contour.
"""
if self._sampled_sizes is None:
c = self.sampled_contours
self._sampled_sizes = c.max(1) - c.min(1)
return self._sampled_sizes | PypiClean |
/Aitomatic-Contrib-23.8.10.3.tar.gz/Aitomatic-Contrib-23.8.10.3/src/aito/iot_mgmt/maint_ops/migrations/0035_auto_20180831_1410.py | from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('IoT_DataMgmt', '0014_auto_20180803_1031'),
('IoT_MaintOps', '0034_auto_20180831_1311'),
]
operations = [
migrations.CreateModel(
name='EquipmentUniqueTypeGroupDataFieldBlueprintBenchmarkMetricProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('to_date', models.DateField(default=None)),
('n', models.IntegerField(default=0)),
('mae', models.FloatField(blank=True, null=True)),
('medae', models.FloatField(blank=True, null=True)),
('r2', models.FloatField(blank=True, null=True)),
('last_updated', models.DateTimeField()),
('equipment_data_field', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='equipment_unique_type_group_data_field_measurement_data_field_benchmark_metric_profiles', related_query_name='equipment_unique_type_group_data_field_measurement_data_field_benchmark_metric_profile', to='IoT_DataMgmt.EquipmentDataField')),
('equipment_general_type', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='equipment_unique_type_group_data_field_measurement_data_field_benchmark_metric_profiles', related_query_name='equipment_unique_type_group_data_field_measurement_data_field_benchmark_metric_profile', to='IoT_DataMgmt.EquipmentGeneralType')),
('equipment_unique_type_group', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='equipment_unique_type_group_data_field_measurement_data_field_benchmark_metric_profiles', related_query_name='equipment_unique_type_group_data_field_measurement_data_field_benchmark_metric_profile', to='IoT_DataMgmt.EquipmentUniqueTypeGroup')),
],
),
migrations.CreateModel(
name='EquipmentUniqueTypeGroupDataFieldProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('to_date', models.DateField(default=None)),
('valid_proportion', models.FloatField(default=0)),
('distinct_values', django.contrib.postgres.fields.jsonb.JSONField(default=list)),
('n_distinct_values', models.IntegerField(default=0)),
('sample_min', models.FloatField(blank=True, null=True)),
('outlier_rst_min', models.FloatField(blank=True, null=True)),
('sample_quartile', models.FloatField(blank=True, null=True)),
('sample_median', models.FloatField(blank=True, null=True)),
('sample_3rd_quartile', models.FloatField(blank=True, null=True)),
('outlier_rst_max', models.FloatField(blank=True, null=True)),
('sample_max', models.FloatField(blank=True, null=True)),
('last_updated', models.DateTimeField()),
('equipment_data_field', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='equipment_unique_type_group_data_field_profiles', related_query_name='equipment_unique_type_group_data_field_profile', to='IoT_DataMgmt.EquipmentDataField')),
('equipment_general_type', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='equipment_unique_type_group_data_field_profiles', related_query_name='equipment_unique_type_group_data_field_profile', to='IoT_DataMgmt.EquipmentGeneralType')),
('equipment_unique_type_group', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='equipment_unique_type_group_data_field_profiles', related_query_name='equipment_unique_type_group_data_field_profile', to='IoT_DataMgmt.EquipmentUniqueTypeGroup')),
],
),
migrations.RemoveField(
model_name='equipmentuniquetypegroupmeasurementdatafieldblueprintbenchmarkmetricprofile',
name='equipment_data_field',
),
migrations.RemoveField(
model_name='equipmentuniquetypegroupmeasurementdatafieldblueprintbenchmarkmetricprofile',
name='equipment_general_type',
),
migrations.RemoveField(
model_name='equipmentuniquetypegroupmeasurementdatafieldblueprintbenchmarkmetricprofile',
name='equipment_unique_type_group',
),
migrations.RemoveField(
model_name='equipmentuniquetypegroupmeasurementdatafieldprofile',
name='equipment_data_field',
),
migrations.RemoveField(
model_name='equipmentuniquetypegroupmeasurementdatafieldprofile',
name='equipment_general_type',
),
migrations.RemoveField(
model_name='equipmentuniquetypegroupmeasurementdatafieldprofile',
name='equipment_unique_type_group',
),
migrations.DeleteModel(
name='EquipmentUniqueTypeGroupMeasurementDataFieldBlueprintBenchmarkMetricProfile',
),
migrations.DeleteModel(
name='EquipmentUniqueTypeGroupMeasurementDataFieldProfile',
),
] | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_uz-cyrl.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"AM",
"PM"
],
"DAY": [
"\u044f\u043a\u0448\u0430\u043d\u0431\u0430",
"\u0434\u0443\u0448\u0430\u043d\u0431\u0430",
"\u0441\u0435\u0448\u0430\u043d\u0431\u0430",
"\u0447\u043e\u0440\u0448\u0430\u043d\u0431\u0430",
"\u043f\u0430\u0439\u0448\u0430\u043d\u0431\u0430",
"\u0436\u0443\u043c\u0430",
"\u0448\u0430\u043d\u0431\u0430"
],
"MONTH": [
"\u042f\u043d\u0432\u0430\u0440",
"\u0424\u0435\u0432\u0440\u0430\u043b",
"\u041c\u0430\u0440\u0442",
"\u0410\u043f\u0440\u0435\u043b",
"\u041c\u0430\u0439",
"\u0418\u044e\u043d",
"\u0418\u044e\u043b",
"\u0410\u0432\u0433\u0443\u0441\u0442",
"\u0421\u0435\u043d\u0442\u044f\u0431\u0440",
"\u041e\u043a\u0442\u044f\u0431\u0440",
"\u041d\u043e\u044f\u0431\u0440",
"\u0414\u0435\u043a\u0430\u0431\u0440"
],
"SHORTDAY": [
"\u042f\u043a\u0448",
"\u0414\u0443\u0448",
"\u0421\u0435\u0448",
"\u0427\u043e\u0440",
"\u041f\u0430\u0439",
"\u0416\u0443\u043c",
"\u0428\u0430\u043d"
],
"SHORTMONTH": [
"\u042f\u043d\u0432",
"\u0424\u0435\u0432",
"\u041c\u0430\u0440",
"\u0410\u043f\u0440",
"\u041c\u0430\u0439",
"\u0418\u044e\u043d",
"\u0418\u044e\u043b",
"\u0410\u0432\u0433",
"\u0421\u0435\u043d",
"\u041e\u043a\u0442",
"\u041d\u043e\u044f",
"\u0414\u0435\u043a"
],
"fullDate": "EEEE, y MMMM dd",
"longDate": "y MMMM d",
"medium": "y MMM d HH:mm:ss",
"mediumDate": "y MMM d",
"mediumTime": "HH:mm:ss",
"short": "yy/MM/dd HH:mm",
"shortDate": "yy/MM/dd",
"shortTime": "HH:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "\u20ac",
"DECIMAL_SEP": ",",
"GROUP_SEP": "\u00a0",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "\u00a4\u00a0-",
"negSuf": "",
"posPre": "\u00a4\u00a0",
"posSuf": ""
}
]
},
"id": "uz-cyrl",
"pluralCat": function(n, opt_precision) { if (n == 1) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/Glances-3.4.0.3.tar.gz/Glances-3.4.0.3/docs/aoa/gpu.rst | .. _gpu:
GPU
===
.. note::
You need to install the `py3nvml`_ library on your system.
Or `nvidia-ml-py3`_ for Glances 3.1.3 or lower.
The GPU stats are shown as a percentage of value and for the configured
refresh time. It displays:
- total GPU usage
- memory consumption
- temperature (Glances 3.1.4 or higher)
.. image:: ../_static/gpu.png
If you click on the ``6`` short key, the per-GPU view is displayed:
.. image:: ../_static/pergpu.png
.. note::
You can also start Glances with the ``--meangpu`` option to display
the first view by default.
You can change the threshold limits in the configuration file:
.. code-block:: ini
[gpu]
# Default processor values if not defined: 50/70/90
proc_careful=50
proc_warning=70
proc_critical=90
# Default memory values if not defined: 50/70/90
mem_careful=50
mem_warning=70
mem_critical=90
Legend:
============== ============
GPU (PROC/MEM) Status
============== ============
``<50%`` ``OK``
``>50%`` ``CAREFUL``
``>70%`` ``WARNING``
``>90%`` ``CRITICAL``
============== ============
.. _py3nvml: https://pypi.org/project/py3nvml/
.. _nvidia-ml-py3: https://pypi.org/project/nvidia-ml-py3/
| PypiClean |
/DrQueueIPython-0.0.1.tar.gz/DrQueueIPython-0.0.1/bin/send_job.py | from optparse import OptionParser
import os
import DrQueue
from DrQueue import Job as DrQueueJob
from DrQueue import Client as DrQueueClient
import getpass
def main():
# parse arguments
parser = OptionParser()
parser.usage = "%prog [options] -n name -r renderer -f scenefile"
parser.add_option("-s", "--startframe",
dest="startframe", default=1, help="first frame")
parser.add_option("-e", "--endframe",
dest="endframe", default=1, help="last frame")
parser.add_option("-b", "--blocksize",
dest="blocksize", default=1, help="size of block")
parser.add_option("-n", "--name",
dest="name", default=None, help="name of job")
parser.add_option("-r", "--renderer",
dest="renderer", help="render type (maya|blender|mentalray)")
parser.add_option("-f", "--scenefile",
dest="scenefile", default=None, help="path to scenefile")
parser.add_option("-p", "--pool",
dest="pool", default=None, help="pool of computers")
parser.add_option("-o", "--options",
dest="options", default="{}", help="specific options for renderer as Python dict")
parser.add_option("--retries",
dest="retries", default=1, help="number of retries for every task")
parser.add_option("--owner",
dest="owner", default=getpass.getuser(), help="Owner of job. Default is current username.")
parser.add_option("--os",
dest="os", default=None, help="Operating system.")
parser.add_option("--minram",
dest="minram", default=0, help="Minimal RAM in GB.")
parser.add_option("--mincores",
dest="mincores", default=0, help="Minimal CPU cores.")
parser.add_option("--send-email",
action="store_true", dest="send_email", default=False, help="Send notification email when job is finished.")
parser.add_option("--email-recipients",
dest="email_recipients", default=None, help="Recipients for notification email.")
parser.add_option("-w", "--wait",
action="store_true", dest="wait", default=False, help="wait for job to finish")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False, help="verbose output")
(options, args) = parser.parse_args()
# initialize DrQueue client
client = DrQueueClient()
# set limits
limits = dict()
limits['pool_name'] = options.pool
limits['os'] = options.os
limits['minram'] = int(options.minram)
limits['mincores'] = int(options.mincores)
options_var = eval(options.options)
options_var['send_email'] = options.send_email
options_var['email_recipients'] = options.email_recipients
# initialize DrQueue job
job = DrQueueJob(options.name, int(options.startframe), int(options.endframe), int(options.blocksize), options.renderer, options.scenefile, options.retries, options.owner, options_var, "send_job.py", limits)
# run job with client
try:
client.job_run(job)
except ValueError:
print("One of your the specified values produced an error:")
raise
exit(1)
# tasks which have been created
tasks = client.query_task_list(job['_id'])
# wait for all tasks of job to finish
if options.wait:
if (tasks == []) and (client.query_engine_list() == []):
print("Tasks have been sent but no render node is running at the moment.")
exit(0)
for task in tasks:
ar = client.task_wait(task['msg_id'])
# add some verbose output
if options.verbose:
cpl = ar.metadata.completed
msg_id = ar.metadata.msg_id
status = ar.status
engine_id = ar.metadata.engine_id
print("Task %s finished with status '%s' on engine %i at %i-%02i-%02i %02i:%02i:%02i." % (msg_id, status, engine_id, cpl.year, cpl.month, cpl.day, cpl.hour, cpl.minute, cpl.second))
if ar.pyerr != None:
print(ar.pyerr)
print("Job %s finished." % job['name'])
if __name__ == "__main__":
main() | PypiClean |
/Django-4.2.4.tar.gz/Django-4.2.4/django/db/backends/base/schema.py | import logging
import operator
from datetime import datetime
from django.conf import settings
from django.db.backends.ddl_references import (
Columns,
Expressions,
ForeignKeyName,
IndexName,
Statement,
Table,
)
from django.db.backends.utils import names_digest, split_identifier, truncate_name
from django.db.models import Deferrable, Index
from django.db.models.sql import Query
from django.db.transaction import TransactionManagementError, atomic
from django.utils import timezone
logger = logging.getLogger("django.db.backends.schema")
def _is_relevant_relation(relation, altered_field):
"""
When altering the given field, must constraints on its model from the given
relation be temporarily dropped?
"""
field = relation.field
if field.many_to_many:
# M2M reverse field
return False
if altered_field.primary_key and field.to_fields == [None]:
# Foreign key constraint on the primary key, which is being altered.
return True
# Is the constraint targeting the field being altered?
return altered_field.name in field.to_fields
def _all_related_fields(model):
# Related fields must be returned in a deterministic order.
return sorted(
model._meta._get_fields(
forward=False,
reverse=True,
include_hidden=True,
include_parents=False,
),
key=operator.attrgetter("name"),
)
def _related_non_m2m_objects(old_field, new_field):
# Filter out m2m objects from reverse relations.
# Return (old_relation, new_relation) tuples.
related_fields = zip(
(
obj
for obj in _all_related_fields(old_field.model)
if _is_relevant_relation(obj, old_field)
),
(
obj
for obj in _all_related_fields(new_field.model)
if _is_relevant_relation(obj, new_field)
),
)
for old_rel, new_rel in related_fields:
yield old_rel, new_rel
yield from _related_non_m2m_objects(
old_rel.remote_field,
new_rel.remote_field,
)
class BaseDatabaseSchemaEditor:
"""
This class and its subclasses are responsible for emitting schema-changing
statements to the databases - model creation/removal/alteration, field
renaming, index fiddling, and so on.
"""
# Overrideable SQL templates
sql_create_table = "CREATE TABLE %(table)s (%(definition)s)"
sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s"
sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s"
sql_delete_table = "DROP TABLE %(table)s CASCADE"
sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s"
sql_alter_column = "ALTER TABLE %(table)s %(changes)s"
sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s%(collation)s"
sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL"
sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL"
sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s"
sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT"
sql_alter_column_no_default_null = sql_alter_column_no_default
sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s CASCADE"
sql_rename_column = (
"ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s"
)
sql_update_with_default = (
"UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL"
)
sql_unique_constraint = "UNIQUE (%(columns)s)%(deferrable)s"
sql_check_constraint = "CHECK (%(check)s)"
sql_delete_constraint = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_constraint = "CONSTRAINT %(name)s %(constraint)s"
sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)"
sql_delete_check = sql_delete_constraint
sql_create_unique = (
"ALTER TABLE %(table)s ADD CONSTRAINT %(name)s "
"UNIQUE (%(columns)s)%(deferrable)s"
)
sql_delete_unique = sql_delete_constraint
sql_create_fk = (
"ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) "
"REFERENCES %(to_table)s (%(to_column)s)%(deferrable)s"
)
sql_create_inline_fk = None
sql_create_column_inline_fk = None
sql_delete_fk = sql_delete_constraint
sql_create_index = (
"CREATE INDEX %(name)s ON %(table)s "
"(%(columns)s)%(include)s%(extra)s%(condition)s"
)
sql_create_unique_index = (
"CREATE UNIQUE INDEX %(name)s ON %(table)s "
"(%(columns)s)%(include)s%(condition)s"
)
sql_rename_index = "ALTER INDEX %(old_name)s RENAME TO %(new_name)s"
sql_delete_index = "DROP INDEX %(name)s"
sql_create_pk = (
"ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
)
sql_delete_pk = sql_delete_constraint
sql_delete_procedure = "DROP PROCEDURE %(procedure)s"
sql_alter_table_comment = "COMMENT ON TABLE %(table)s IS %(comment)s"
sql_alter_column_comment = "COMMENT ON COLUMN %(table)s.%(column)s IS %(comment)s"
def __init__(self, connection, collect_sql=False, atomic=True):
self.connection = connection
self.collect_sql = collect_sql
if self.collect_sql:
self.collected_sql = []
self.atomic_migration = self.connection.features.can_rollback_ddl and atomic
# State-managing methods
def __enter__(self):
self.deferred_sql = []
if self.atomic_migration:
self.atomic = atomic(self.connection.alias)
self.atomic.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
for sql in self.deferred_sql:
self.execute(sql)
if self.atomic_migration:
self.atomic.__exit__(exc_type, exc_value, traceback)
# Core utility functions
def execute(self, sql, params=()):
"""Execute the given SQL statement, with optional parameters."""
# Don't perform the transactional DDL check if SQL is being collected
# as it's not going to be executed anyway.
if (
not self.collect_sql
and self.connection.in_atomic_block
and not self.connection.features.can_rollback_ddl
):
raise TransactionManagementError(
"Executing DDL statements while in a transaction on databases "
"that can't perform a rollback is prohibited."
)
# Account for non-string statement objects.
sql = str(sql)
# Log the command we're running, then run it
logger.debug(
"%s; (params %r)", sql, params, extra={"params": params, "sql": sql}
)
if self.collect_sql:
ending = "" if sql.rstrip().endswith(";") else ";"
if params is not None:
self.collected_sql.append(
(sql % tuple(map(self.quote_value, params))) + ending
)
else:
self.collected_sql.append(sql + ending)
else:
with self.connection.cursor() as cursor:
cursor.execute(sql, params)
def quote_name(self, name):
return self.connection.ops.quote_name(name)
def table_sql(self, model):
"""Take a model and return its table definition."""
# Add any unique_togethers (always deferred, as some fields might be
# created afterward, like geometry fields with some backends).
for field_names in model._meta.unique_together:
fields = [model._meta.get_field(field) for field in field_names]
self.deferred_sql.append(self._create_unique_sql(model, fields))
# Create column SQL, add FK deferreds if needed.
column_sqls = []
params = []
for field in model._meta.local_fields:
# SQL.
definition, extra_params = self.column_sql(model, field)
if definition is None:
continue
# Check constraints can go on the column SQL here.
db_params = field.db_parameters(connection=self.connection)
if db_params["check"]:
definition += " " + self.sql_check_constraint % db_params
# Autoincrement SQL (for backends with inline variant).
col_type_suffix = field.db_type_suffix(connection=self.connection)
if col_type_suffix:
definition += " %s" % col_type_suffix
params.extend(extra_params)
# FK.
if field.remote_field and field.db_constraint:
to_table = field.remote_field.model._meta.db_table
to_column = field.remote_field.model._meta.get_field(
field.remote_field.field_name
).column
if self.sql_create_inline_fk:
definition += " " + self.sql_create_inline_fk % {
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
elif self.connection.features.supports_foreign_keys:
self.deferred_sql.append(
self._create_fk_sql(
model, field, "_fk_%(to_table)s_%(to_column)s"
)
)
# Add the SQL to our big list.
column_sqls.append(
"%s %s"
% (
self.quote_name(field.column),
definition,
)
)
# Autoincrement SQL (for backends with post table definition
# variant).
if field.get_internal_type() in (
"AutoField",
"BigAutoField",
"SmallAutoField",
):
autoinc_sql = self.connection.ops.autoinc_sql(
model._meta.db_table, field.column
)
if autoinc_sql:
self.deferred_sql.extend(autoinc_sql)
constraints = [
constraint.constraint_sql(model, self)
for constraint in model._meta.constraints
]
sql = self.sql_create_table % {
"table": self.quote_name(model._meta.db_table),
"definition": ", ".join(
str(constraint)
for constraint in (*column_sqls, *constraints)
if constraint
),
}
if model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(
model._meta.db_tablespace
)
if tablespace_sql:
sql += " " + tablespace_sql
return sql, params
# Field <-> database mapping functions
def _iter_column_sql(
self, column_db_type, params, model, field, field_db_params, include_default
):
yield column_db_type
if collation := field_db_params.get("collation"):
yield self._collate_sql(collation)
if self.connection.features.supports_comments_inline and field.db_comment:
yield self._comment_sql(field.db_comment)
# Work out nullability.
null = field.null
# Include a default value, if requested.
include_default = (
include_default
and not self.skip_default(field)
and
# Don't include a default value if it's a nullable field and the
# default cannot be dropped in the ALTER COLUMN statement (e.g.
# MySQL longtext and longblob).
not (null and self.skip_default_on_alter(field))
)
if include_default:
default_value = self.effective_default(field)
if default_value is not None:
column_default = "DEFAULT " + self._column_default_sql(field)
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (Oracle).
# If this is the case, the individual schema backend should
# implement prepare_default().
yield column_default % self.prepare_default(default_value)
else:
yield column_default
params.append(default_value)
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if (
field.empty_strings_allowed
and not field.primary_key
and self.connection.features.interprets_empty_strings_as_nulls
):
null = True
if not null:
yield "NOT NULL"
elif not self.connection.features.implied_column_null:
yield "NULL"
if field.primary_key:
yield "PRIMARY KEY"
elif field.unique:
yield "UNIQUE"
# Optionally add the tablespace if it's an implicitly indexed column.
tablespace = field.db_tablespace or model._meta.db_tablespace
if (
tablespace
and self.connection.features.supports_tablespaces
and field.unique
):
yield self.connection.ops.tablespace_sql(tablespace, inline=True)
def column_sql(self, model, field, include_default=False):
"""
Return the column definition for a field. The field must already have
had set_attributes_from_name() called.
"""
# Get the column's type and use that as the basis of the SQL.
field_db_params = field.db_parameters(connection=self.connection)
column_db_type = field_db_params["type"]
# Check for fields that aren't actually columns (e.g. M2M).
if column_db_type is None:
return None, None
params = []
return (
" ".join(
# This appends to the params being returned.
self._iter_column_sql(
column_db_type,
params,
model,
field,
field_db_params,
include_default,
)
),
params,
)
def skip_default(self, field):
"""
Some backends don't accept default values for certain columns types
(i.e. MySQL longtext and longblob).
"""
return False
def skip_default_on_alter(self, field):
"""
Some backends don't accept default values for certain columns types
(i.e. MySQL longtext and longblob) in the ALTER COLUMN statement.
"""
return False
def prepare_default(self, value):
"""
Only used for backends which have requires_literal_defaults feature
"""
raise NotImplementedError(
"subclasses of BaseDatabaseSchemaEditor for backends which have "
"requires_literal_defaults must provide a prepare_default() method"
)
def _column_default_sql(self, field):
"""
Return the SQL to use in a DEFAULT clause. The resulting string should
contain a '%s' placeholder for a default value.
"""
return "%s"
@staticmethod
def _effective_default(field):
# This method allows testing its logic without a connection.
if field.has_default():
default = field.get_default()
elif not field.null and field.blank and field.empty_strings_allowed:
if field.get_internal_type() == "BinaryField":
default = b""
else:
default = ""
elif getattr(field, "auto_now", False) or getattr(field, "auto_now_add", False):
internal_type = field.get_internal_type()
if internal_type == "DateTimeField":
default = timezone.now()
else:
default = datetime.now()
if internal_type == "DateField":
default = default.date()
elif internal_type == "TimeField":
default = default.time()
else:
default = None
return default
def effective_default(self, field):
"""Return a field's effective database default value."""
return field.get_db_prep_save(self._effective_default(field), self.connection)
def quote_value(self, value):
"""
Return a quoted version of the value so it's safe to use in an SQL
string. This is not safe against injection from user code; it is
intended only for use in making SQL scripts or preparing default values
for particularly tricky backends (defaults are not user-defined, though,
so this is safe).
"""
raise NotImplementedError()
# Actions
def create_model(self, model):
"""
Create a table and any accompanying indexes or unique constraints for
the given `model`.
"""
sql, params = self.table_sql(model)
# Prevent using [] as params, in the case a literal '%' is used in the
# definition.
self.execute(sql, params or None)
if self.connection.features.supports_comments:
# Add table comment.
if model._meta.db_table_comment:
self.alter_db_table_comment(model, None, model._meta.db_table_comment)
# Add column comments.
if not self.connection.features.supports_comments_inline:
for field in model._meta.local_fields:
if field.db_comment:
field_db_params = field.db_parameters(
connection=self.connection
)
field_type = field_db_params["type"]
self.execute(
*self._alter_column_comment_sql(
model, field, field_type, field.db_comment
)
)
# Add any field index and index_together's (deferred as SQLite
# _remake_table needs it).
self.deferred_sql.extend(self._model_indexes_sql(model))
# Make M2M tables
for field in model._meta.local_many_to_many:
if field.remote_field.through._meta.auto_created:
self.create_model(field.remote_field.through)
def delete_model(self, model):
"""Delete a model from the database."""
# Handle auto-created intermediary models
for field in model._meta.local_many_to_many:
if field.remote_field.through._meta.auto_created:
self.delete_model(field.remote_field.through)
# Delete the table
self.execute(
self.sql_delete_table
% {
"table": self.quote_name(model._meta.db_table),
}
)
# Remove all deferred statements referencing the deleted table.
for sql in list(self.deferred_sql):
if isinstance(sql, Statement) and sql.references_table(
model._meta.db_table
):
self.deferred_sql.remove(sql)
def add_index(self, model, index):
"""Add an index on a model."""
if (
index.contains_expressions
and not self.connection.features.supports_expression_indexes
):
return None
# Index.create_sql returns interpolated SQL which makes params=None a
# necessity to avoid escaping attempts on execution.
self.execute(index.create_sql(model, self), params=None)
def remove_index(self, model, index):
"""Remove an index from a model."""
if (
index.contains_expressions
and not self.connection.features.supports_expression_indexes
):
return None
self.execute(index.remove_sql(model, self))
def rename_index(self, model, old_index, new_index):
if self.connection.features.can_rename_index:
self.execute(
self._rename_index_sql(model, old_index.name, new_index.name),
params=None,
)
else:
self.remove_index(model, old_index)
self.add_index(model, new_index)
def add_constraint(self, model, constraint):
"""Add a constraint to a model."""
sql = constraint.create_sql(model, self)
if sql:
# Constraint.create_sql returns interpolated SQL which makes
# params=None a necessity to avoid escaping attempts on execution.
self.execute(sql, params=None)
def remove_constraint(self, model, constraint):
"""Remove a constraint from a model."""
sql = constraint.remove_sql(model, self)
if sql:
self.execute(sql)
def alter_unique_together(self, model, old_unique_together, new_unique_together):
"""
Deal with a model changing its unique_together. The input
unique_togethers must be doubly-nested, not the single-nested
["foo", "bar"] format.
"""
olds = {tuple(fields) for fields in old_unique_together}
news = {tuple(fields) for fields in new_unique_together}
# Deleted uniques
for fields in olds.difference(news):
self._delete_composed_index(
model,
fields,
{"unique": True, "primary_key": False},
self.sql_delete_unique,
)
# Created uniques
for field_names in news.difference(olds):
fields = [model._meta.get_field(field) for field in field_names]
self.execute(self._create_unique_sql(model, fields))
def alter_index_together(self, model, old_index_together, new_index_together):
"""
Deal with a model changing its index_together. The input
index_togethers must be doubly-nested, not the single-nested
["foo", "bar"] format.
"""
olds = {tuple(fields) for fields in old_index_together}
news = {tuple(fields) for fields in new_index_together}
# Deleted indexes
for fields in olds.difference(news):
self._delete_composed_index(
model,
fields,
{"index": True, "unique": False},
self.sql_delete_index,
)
# Created indexes
for field_names in news.difference(olds):
fields = [model._meta.get_field(field) for field in field_names]
self.execute(self._create_index_sql(model, fields=fields, suffix="_idx"))
def _delete_composed_index(self, model, fields, constraint_kwargs, sql):
meta_constraint_names = {
constraint.name for constraint in model._meta.constraints
}
meta_index_names = {constraint.name for constraint in model._meta.indexes}
columns = [model._meta.get_field(field).column for field in fields]
constraint_names = self._constraint_names(
model,
columns,
exclude=meta_constraint_names | meta_index_names,
**constraint_kwargs,
)
if (
constraint_kwargs.get("unique") is True
and constraint_names
and self.connection.features.allows_multiple_constraints_on_same_fields
):
# Constraint matching the unique_together name.
default_name = str(
self._unique_constraint_name(model._meta.db_table, columns, quote=False)
)
if default_name in constraint_names:
constraint_names = [default_name]
if len(constraint_names) != 1:
raise ValueError(
"Found wrong number (%s) of constraints for %s(%s)"
% (
len(constraint_names),
model._meta.db_table,
", ".join(columns),
)
)
self.execute(self._delete_constraint_sql(sql, model, constraint_names[0]))
def alter_db_table(self, model, old_db_table, new_db_table):
"""Rename the table a model points to."""
if old_db_table == new_db_table or (
self.connection.features.ignores_table_name_case
and old_db_table.lower() == new_db_table.lower()
):
return
self.execute(
self.sql_rename_table
% {
"old_table": self.quote_name(old_db_table),
"new_table": self.quote_name(new_db_table),
}
)
# Rename all references to the old table name.
for sql in self.deferred_sql:
if isinstance(sql, Statement):
sql.rename_table_references(old_db_table, new_db_table)
def alter_db_table_comment(self, model, old_db_table_comment, new_db_table_comment):
self.execute(
self.sql_alter_table_comment
% {
"table": self.quote_name(model._meta.db_table),
"comment": self.quote_value(new_db_table_comment or ""),
}
)
def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace):
"""Move a model's table between tablespaces."""
self.execute(
self.sql_retablespace_table
% {
"table": self.quote_name(model._meta.db_table),
"old_tablespace": self.quote_name(old_db_tablespace),
"new_tablespace": self.quote_name(new_db_tablespace),
}
)
def add_field(self, model, field):
"""
Create a field on a model. Usually involves adding a column, but may
involve adding a table instead (for M2M fields).
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.create_model(field.remote_field.through)
# Get the column's definition
definition, params = self.column_sql(model, field, include_default=True)
# It might not actually have a column behind it
if definition is None:
return
if col_type_suffix := field.db_type_suffix(connection=self.connection):
definition += f" {col_type_suffix}"
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params["check"]:
definition += " " + self.sql_check_constraint % db_params
if (
field.remote_field
and self.connection.features.supports_foreign_keys
and field.db_constraint
):
constraint_suffix = "_fk_%(to_table)s_%(to_column)s"
# Add FK constraint inline, if supported.
if self.sql_create_column_inline_fk:
to_table = field.remote_field.model._meta.db_table
to_column = field.remote_field.model._meta.get_field(
field.remote_field.field_name
).column
namespace, _ = split_identifier(model._meta.db_table)
definition += " " + self.sql_create_column_inline_fk % {
"name": self._fk_constraint_name(model, field, constraint_suffix),
"namespace": "%s." % self.quote_name(namespace)
if namespace
else "",
"column": self.quote_name(field.column),
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
"deferrable": self.connection.ops.deferrable_sql(),
}
# Otherwise, add FK constraints later.
else:
self.deferred_sql.append(
self._create_fk_sql(model, field, constraint_suffix)
)
# Build the SQL and run it
sql = self.sql_create_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"definition": definition,
}
self.execute(sql, params)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if (
not self.skip_default_on_alter(field)
and self.effective_default(field) is not None
):
changes_sql, params = self._alter_column_default_sql(
model, None, field, drop=True
)
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": changes_sql,
}
self.execute(sql, params)
# Add field comment, if required.
if (
field.db_comment
and self.connection.features.supports_comments
and not self.connection.features.supports_comments_inline
):
field_type = db_params["type"]
self.execute(
*self._alter_column_comment_sql(
model, field, field_type, field.db_comment
)
)
# Add an index, if required
self.deferred_sql.extend(self._field_indexes_sql(model, field))
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def remove_field(self, model, field):
"""
Remove a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.delete_model(field.remote_field.through)
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)["type"] is None:
return
# Drop any FK constraints, MySQL requires explicit deletion
if field.remote_field:
fk_names = self._constraint_names(model, [field.column], foreign_key=True)
for fk_name in fk_names:
self.execute(self._delete_fk_sql(model, fk_name))
# Delete the column
sql = self.sql_delete_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
}
self.execute(sql)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
# Remove all deferred statements referencing the deleted column.
for sql in list(self.deferred_sql):
if isinstance(sql, Statement) and sql.references_column(
model._meta.db_table, field.column
):
self.deferred_sql.remove(sql)
def alter_field(self, model, old_field, new_field, strict=False):
"""
Allow a field's type, uniqueness, nullability, default, column,
constraints, etc. to be modified.
`old_field` is required to compute the necessary changes.
If `strict` is True, raise errors if the old column does not match
`old_field` precisely.
"""
if not self._field_should_be_altered(old_field, new_field):
return
# Ensure this field is even column-based
old_db_params = old_field.db_parameters(connection=self.connection)
old_type = old_db_params["type"]
new_db_params = new_field.db_parameters(connection=self.connection)
new_type = new_db_params["type"]
if (old_type is None and old_field.remote_field is None) or (
new_type is None and new_field.remote_field is None
):
raise ValueError(
"Cannot alter field %s into %s - they do not properly define "
"db_type (are you using a badly-written custom field?)"
% (old_field, new_field),
)
elif (
old_type is None
and new_type is None
and (
old_field.remote_field.through
and new_field.remote_field.through
and old_field.remote_field.through._meta.auto_created
and new_field.remote_field.through._meta.auto_created
)
):
return self._alter_many_to_many(model, old_field, new_field, strict)
elif (
old_type is None
and new_type is None
and (
old_field.remote_field.through
and new_field.remote_field.through
and not old_field.remote_field.through._meta.auto_created
and not new_field.remote_field.through._meta.auto_created
)
):
# Both sides have through models; this is a no-op.
return
elif old_type is None or new_type is None:
raise ValueError(
"Cannot alter field %s into %s - they are not compatible types "
"(you cannot alter to or from M2M fields, or add or remove "
"through= on M2M fields)" % (old_field, new_field)
)
self._alter_field(
model,
old_field,
new_field,
old_type,
new_type,
old_db_params,
new_db_params,
strict,
)
def _alter_field(
self,
model,
old_field,
new_field,
old_type,
new_type,
old_db_params,
new_db_params,
strict=False,
):
"""Perform a "physical" (non-ManyToMany) field update."""
# Drop any FK constraints, we'll remake them later
fks_dropped = set()
if (
self.connection.features.supports_foreign_keys
and old_field.remote_field
and old_field.db_constraint
and self._field_should_be_altered(
old_field,
new_field,
ignore={"db_comment"},
)
):
fk_names = self._constraint_names(
model, [old_field.column], foreign_key=True
)
if strict and len(fk_names) != 1:
raise ValueError(
"Found wrong number (%s) of foreign key constraints for %s.%s"
% (
len(fk_names),
model._meta.db_table,
old_field.column,
)
)
for fk_name in fk_names:
fks_dropped.add((old_field.column,))
self.execute(self._delete_fk_sql(model, fk_name))
# Has unique been removed?
if old_field.unique and (
not new_field.unique or self._field_became_primary_key(old_field, new_field)
):
# Find the unique constraint for this field
meta_constraint_names = {
constraint.name for constraint in model._meta.constraints
}
constraint_names = self._constraint_names(
model,
[old_field.column],
unique=True,
primary_key=False,
exclude=meta_constraint_names,
)
if strict and len(constraint_names) != 1:
raise ValueError(
"Found wrong number (%s) of unique constraints for %s.%s"
% (
len(constraint_names),
model._meta.db_table,
old_field.column,
)
)
for constraint_name in constraint_names:
self.execute(self._delete_unique_sql(model, constraint_name))
# Drop incoming FK constraints if the field is a primary key or unique,
# which might be a to_field target, and things are going to change.
old_collation = old_db_params.get("collation")
new_collation = new_db_params.get("collation")
drop_foreign_keys = (
self.connection.features.supports_foreign_keys
and (
(old_field.primary_key and new_field.primary_key)
or (old_field.unique and new_field.unique)
)
and ((old_type != new_type) or (old_collation != new_collation))
)
if drop_foreign_keys:
# '_meta.related_field' also contains M2M reverse fields, these
# will be filtered out
for _old_rel, new_rel in _related_non_m2m_objects(old_field, new_field):
rel_fk_names = self._constraint_names(
new_rel.related_model, [new_rel.field.column], foreign_key=True
)
for fk_name in rel_fk_names:
self.execute(self._delete_fk_sql(new_rel.related_model, fk_name))
# Removed an index? (no strict check, as multiple indexes are possible)
# Remove indexes if db_index switched to False or a unique constraint
# will now be used in lieu of an index. The following lines from the
# truth table show all True cases; the rest are False:
#
# old_field.db_index | old_field.unique | new_field.db_index | new_field.unique
# ------------------------------------------------------------------------------
# True | False | False | False
# True | False | False | True
# True | False | True | True
if (
old_field.db_index
and not old_field.unique
and (not new_field.db_index or new_field.unique)
):
# Find the index for this field
meta_index_names = {index.name for index in model._meta.indexes}
# Retrieve only BTREE indexes since this is what's created with
# db_index=True.
index_names = self._constraint_names(
model,
[old_field.column],
index=True,
type_=Index.suffix,
exclude=meta_index_names,
)
for index_name in index_names:
# The only way to check if an index was created with
# db_index=True or with Index(['field'], name='foo')
# is to look at its name (refs #28053).
self.execute(self._delete_index_sql(model, index_name))
# Change check constraints?
if old_db_params["check"] != new_db_params["check"] and old_db_params["check"]:
meta_constraint_names = {
constraint.name for constraint in model._meta.constraints
}
constraint_names = self._constraint_names(
model,
[old_field.column],
check=True,
exclude=meta_constraint_names,
)
if strict and len(constraint_names) != 1:
raise ValueError(
"Found wrong number (%s) of check constraints for %s.%s"
% (
len(constraint_names),
model._meta.db_table,
old_field.column,
)
)
for constraint_name in constraint_names:
self.execute(self._delete_check_sql(model, constraint_name))
# Have they renamed the column?
if old_field.column != new_field.column:
self.execute(
self._rename_field_sql(
model._meta.db_table, old_field, new_field, new_type
)
)
# Rename all references to the renamed column.
for sql in self.deferred_sql:
if isinstance(sql, Statement):
sql.rename_column_references(
model._meta.db_table, old_field.column, new_field.column
)
# Next, start accumulating actions to do
actions = []
null_actions = []
post_actions = []
# Type suffix change? (e.g. auto increment).
old_type_suffix = old_field.db_type_suffix(connection=self.connection)
new_type_suffix = new_field.db_type_suffix(connection=self.connection)
# Type, collation, or comment change?
if (
old_type != new_type
or old_type_suffix != new_type_suffix
or old_collation != new_collation
or (
self.connection.features.supports_comments
and old_field.db_comment != new_field.db_comment
)
):
fragment, other_actions = self._alter_column_type_sql(
model, old_field, new_field, new_type, old_collation, new_collation
)
actions.append(fragment)
post_actions.extend(other_actions)
# When changing a column NULL constraint to NOT NULL with a given
# default value, we need to perform 4 steps:
# 1. Add a default for new incoming writes
# 2. Update existing NULL rows with new default
# 3. Replace NULL constraint with NOT NULL
# 4. Drop the default again.
# Default change?
needs_database_default = False
if old_field.null and not new_field.null:
old_default = self.effective_default(old_field)
new_default = self.effective_default(new_field)
if (
not self.skip_default_on_alter(new_field)
and old_default != new_default
and new_default is not None
):
needs_database_default = True
actions.append(
self._alter_column_default_sql(model, old_field, new_field)
)
# Nullability change?
if old_field.null != new_field.null:
fragment = self._alter_column_null_sql(model, old_field, new_field)
if fragment:
null_actions.append(fragment)
# Only if we have a default and there is a change from NULL to NOT NULL
four_way_default_alteration = new_field.has_default() and (
old_field.null and not new_field.null
)
if actions or null_actions:
if not four_way_default_alteration:
# If we don't have to do a 4-way default alteration we can
# directly run a (NOT) NULL alteration
actions += null_actions
# Combine actions together if we can (e.g. postgres)
if self.connection.features.supports_combined_alters and actions:
sql, params = tuple(zip(*actions))
actions = [(", ".join(sql), sum(params, []))]
# Apply those actions
for sql, params in actions:
self.execute(
self.sql_alter_column
% {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if four_way_default_alteration:
# Update existing rows with default value
self.execute(
self.sql_update_with_default
% {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(new_field.column),
"default": "%s",
},
[new_default],
)
# Since we didn't run a NOT NULL change before we need to do it
# now
for sql, params in null_actions:
self.execute(
self.sql_alter_column
% {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if post_actions:
for sql, params in post_actions:
self.execute(sql, params)
# If primary_key changed to False, delete the primary key constraint.
if old_field.primary_key and not new_field.primary_key:
self._delete_primary_key(model, strict)
# Added a unique?
if self._unique_should_be_added(old_field, new_field):
self.execute(self._create_unique_sql(model, [new_field]))
# Added an index? Add an index if db_index switched to True or a unique
# constraint will no longer be used in lieu of an index. The following
# lines from the truth table show all True cases; the rest are False:
#
# old_field.db_index | old_field.unique | new_field.db_index | new_field.unique
# ------------------------------------------------------------------------------
# False | False | True | False
# False | True | True | False
# True | True | True | False
if (
(not old_field.db_index or old_field.unique)
and new_field.db_index
and not new_field.unique
):
self.execute(self._create_index_sql(model, fields=[new_field]))
# Type alteration on primary key? Then we need to alter the column
# referring to us.
rels_to_update = []
if drop_foreign_keys:
rels_to_update.extend(_related_non_m2m_objects(old_field, new_field))
# Changed to become primary key?
if self._field_became_primary_key(old_field, new_field):
# Make the new one
self.execute(self._create_primary_key_sql(model, new_field))
# Update all referencing columns
rels_to_update.extend(_related_non_m2m_objects(old_field, new_field))
# Handle our type alters on the other end of rels from the PK stuff above
for old_rel, new_rel in rels_to_update:
rel_db_params = new_rel.field.db_parameters(connection=self.connection)
rel_type = rel_db_params["type"]
rel_collation = rel_db_params.get("collation")
old_rel_db_params = old_rel.field.db_parameters(connection=self.connection)
old_rel_collation = old_rel_db_params.get("collation")
fragment, other_actions = self._alter_column_type_sql(
new_rel.related_model,
old_rel.field,
new_rel.field,
rel_type,
old_rel_collation,
rel_collation,
)
self.execute(
self.sql_alter_column
% {
"table": self.quote_name(new_rel.related_model._meta.db_table),
"changes": fragment[0],
},
fragment[1],
)
for sql, params in other_actions:
self.execute(sql, params)
# Does it have a foreign key?
if (
self.connection.features.supports_foreign_keys
and new_field.remote_field
and (
fks_dropped or not old_field.remote_field or not old_field.db_constraint
)
and new_field.db_constraint
):
self.execute(
self._create_fk_sql(model, new_field, "_fk_%(to_table)s_%(to_column)s")
)
# Rebuild FKs that pointed to us if we previously had to drop them
if drop_foreign_keys:
for _, rel in rels_to_update:
if rel.field.db_constraint:
self.execute(
self._create_fk_sql(rel.related_model, rel.field, "_fk")
)
# Does it have check constraints we need to add?
if old_db_params["check"] != new_db_params["check"] and new_db_params["check"]:
constraint_name = self._create_index_name(
model._meta.db_table, [new_field.column], suffix="_check"
)
self.execute(
self._create_check_sql(model, constraint_name, new_db_params["check"])
)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if needs_database_default:
changes_sql, params = self._alter_column_default_sql(
model, old_field, new_field, drop=True
)
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": changes_sql,
}
self.execute(sql, params)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def _alter_column_null_sql(self, model, old_field, new_field):
"""
Hook to specialize column null alteration.
Return a (sql, params) fragment to set a column to null or non-null
as required by new_field, or None if no changes are required.
"""
if (
self.connection.features.interprets_empty_strings_as_nulls
and new_field.empty_strings_allowed
):
# The field is nullable in the database anyway, leave it alone.
return
else:
new_db_params = new_field.db_parameters(connection=self.connection)
sql = (
self.sql_alter_column_null
if new_field.null
else self.sql_alter_column_not_null
)
return (
sql
% {
"column": self.quote_name(new_field.column),
"type": new_db_params["type"],
},
[],
)
def _alter_column_default_sql(self, model, old_field, new_field, drop=False):
"""
Hook to specialize column default alteration.
Return a (sql, params) fragment to add or drop (depending on the drop
argument) a default to new_field's column.
"""
new_default = self.effective_default(new_field)
default = self._column_default_sql(new_field)
params = [new_default]
if drop:
params = []
elif self.connection.features.requires_literal_defaults:
# Some databases (Oracle) can't take defaults as a parameter
# If this is the case, the SchemaEditor for that database should
# implement prepare_default().
default = self.prepare_default(new_default)
params = []
new_db_params = new_field.db_parameters(connection=self.connection)
if drop:
if new_field.null:
sql = self.sql_alter_column_no_default_null
else:
sql = self.sql_alter_column_no_default
else:
sql = self.sql_alter_column_default
return (
sql
% {
"column": self.quote_name(new_field.column),
"type": new_db_params["type"],
"default": default,
},
params,
)
def _alter_column_type_sql(
self, model, old_field, new_field, new_type, old_collation, new_collation
):
"""
Hook to specialize column type alteration for different backends,
for cases when a creation type is different to an alteration type
(e.g. SERIAL in PostgreSQL, PostGIS fields).
Return a two-tuple of: an SQL fragment of (sql, params) to insert into
an ALTER TABLE statement and a list of extra (sql, params) tuples to
run once the field is altered.
"""
other_actions = []
if collate_sql := self._collate_sql(
new_collation, old_collation, model._meta.db_table
):
collate_sql = f" {collate_sql}"
else:
collate_sql = ""
# Comment change?
comment_sql = ""
if self.connection.features.supports_comments and not new_field.many_to_many:
if old_field.db_comment != new_field.db_comment:
# PostgreSQL and Oracle can't execute 'ALTER COLUMN ...' and
# 'COMMENT ON ...' at the same time.
sql, params = self._alter_column_comment_sql(
model, new_field, new_type, new_field.db_comment
)
if sql:
other_actions.append((sql, params))
if new_field.db_comment:
comment_sql = self._comment_sql(new_field.db_comment)
return (
(
self.sql_alter_column_type
% {
"column": self.quote_name(new_field.column),
"type": new_type,
"collation": collate_sql,
"comment": comment_sql,
},
[],
),
other_actions,
)
def _alter_column_comment_sql(self, model, new_field, new_type, new_db_comment):
return (
self.sql_alter_column_comment
% {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(new_field.column),
"comment": self._comment_sql(new_db_comment),
},
[],
)
def _comment_sql(self, comment):
return self.quote_value(comment or "")
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""Alter M2Ms to repoint their to= endpoints."""
# Rename the through table
if (
old_field.remote_field.through._meta.db_table
!= new_field.remote_field.through._meta.db_table
):
self.alter_db_table(
old_field.remote_field.through,
old_field.remote_field.through._meta.db_table,
new_field.remote_field.through._meta.db_table,
)
# Repoint the FK to the other side
self.alter_field(
new_field.remote_field.through,
# The field that points to the target model is needed, so we can
# tell alter_field to change it - this is m2m_reverse_field_name()
# (as opposed to m2m_field_name(), which points to our model).
old_field.remote_field.through._meta.get_field(
old_field.m2m_reverse_field_name()
),
new_field.remote_field.through._meta.get_field(
new_field.m2m_reverse_field_name()
),
)
self.alter_field(
new_field.remote_field.through,
# for self-referential models we need to alter field from the other end too
old_field.remote_field.through._meta.get_field(old_field.m2m_field_name()),
new_field.remote_field.through._meta.get_field(new_field.m2m_field_name()),
)
def _create_index_name(self, table_name, column_names, suffix=""):
"""
Generate a unique name for an index/unique constraint.
The name is divided into 3 parts: the table name, the column names,
and a unique digest and suffix.
"""
_, table_name = split_identifier(table_name)
hash_suffix_part = "%s%s" % (
names_digest(table_name, *column_names, length=8),
suffix,
)
max_length = self.connection.ops.max_name_length() or 200
# If everything fits into max_length, use that name.
index_name = "%s_%s_%s" % (table_name, "_".join(column_names), hash_suffix_part)
if len(index_name) <= max_length:
return index_name
# Shorten a long suffix.
if len(hash_suffix_part) > max_length / 3:
hash_suffix_part = hash_suffix_part[: max_length // 3]
other_length = (max_length - len(hash_suffix_part)) // 2 - 1
index_name = "%s_%s_%s" % (
table_name[:other_length],
"_".join(column_names)[:other_length],
hash_suffix_part,
)
# Prepend D if needed to prevent the name from starting with an
# underscore or a number (not permitted on Oracle).
if index_name[0] == "_" or index_name[0].isdigit():
index_name = "D%s" % index_name[:-1]
return index_name
def _get_index_tablespace_sql(self, model, fields, db_tablespace=None):
if db_tablespace is None:
if len(fields) == 1 and fields[0].db_tablespace:
db_tablespace = fields[0].db_tablespace
elif settings.DEFAULT_INDEX_TABLESPACE:
db_tablespace = settings.DEFAULT_INDEX_TABLESPACE
elif model._meta.db_tablespace:
db_tablespace = model._meta.db_tablespace
if db_tablespace is not None:
return " " + self.connection.ops.tablespace_sql(db_tablespace)
return ""
def _index_condition_sql(self, condition):
if condition:
return " WHERE " + condition
return ""
def _index_include_sql(self, model, columns):
if not columns or not self.connection.features.supports_covering_indexes:
return ""
return Statement(
" INCLUDE (%(columns)s)",
columns=Columns(model._meta.db_table, columns, self.quote_name),
)
def _create_index_sql(
self,
model,
*,
fields=None,
name=None,
suffix="",
using="",
db_tablespace=None,
col_suffixes=(),
sql=None,
opclasses=(),
condition=None,
include=None,
expressions=None,
):
"""
Return the SQL statement to create the index for one or several fields
or expressions. `sql` can be specified if the syntax differs from the
standard (GIS indexes, ...).
"""
fields = fields or []
expressions = expressions or []
compiler = Query(model, alias_cols=False).get_compiler(
connection=self.connection,
)
tablespace_sql = self._get_index_tablespace_sql(
model, fields, db_tablespace=db_tablespace
)
columns = [field.column for field in fields]
sql_create_index = sql or self.sql_create_index
table = model._meta.db_table
def create_index_name(*args, **kwargs):
nonlocal name
if name is None:
name = self._create_index_name(*args, **kwargs)
return self.quote_name(name)
return Statement(
sql_create_index,
table=Table(table, self.quote_name),
name=IndexName(table, columns, suffix, create_index_name),
using=using,
columns=(
self._index_columns(table, columns, col_suffixes, opclasses)
if columns
else Expressions(table, expressions, compiler, self.quote_value)
),
extra=tablespace_sql,
condition=self._index_condition_sql(condition),
include=self._index_include_sql(model, include),
)
def _delete_index_sql(self, model, name, sql=None):
return Statement(
sql or self.sql_delete_index,
table=Table(model._meta.db_table, self.quote_name),
name=self.quote_name(name),
)
def _rename_index_sql(self, model, old_name, new_name):
return Statement(
self.sql_rename_index,
table=Table(model._meta.db_table, self.quote_name),
old_name=self.quote_name(old_name),
new_name=self.quote_name(new_name),
)
def _index_columns(self, table, columns, col_suffixes, opclasses):
return Columns(table, columns, self.quote_name, col_suffixes=col_suffixes)
def _model_indexes_sql(self, model):
"""
Return a list of all index SQL statements (field indexes,
index_together, Meta.indexes) for the specified model.
"""
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
output = []
for field in model._meta.local_fields:
output.extend(self._field_indexes_sql(model, field))
for field_names in model._meta.index_together:
fields = [model._meta.get_field(field) for field in field_names]
output.append(self._create_index_sql(model, fields=fields, suffix="_idx"))
for index in model._meta.indexes:
if (
not index.contains_expressions
or self.connection.features.supports_expression_indexes
):
output.append(index.create_sql(model, self))
return output
def _field_indexes_sql(self, model, field):
"""
Return a list of all index SQL statements for the specified field.
"""
output = []
if self._field_should_be_indexed(model, field):
output.append(self._create_index_sql(model, fields=[field]))
return output
def _field_should_be_altered(self, old_field, new_field, ignore=None):
ignore = ignore or set()
_, old_path, old_args, old_kwargs = old_field.deconstruct()
_, new_path, new_args, new_kwargs = new_field.deconstruct()
# Don't alter when:
# - changing only a field name
# - changing an attribute that doesn't affect the schema
# - changing an attribute in the provided set of ignored attributes
# - adding only a db_column and the column name is not changed
for attr in ignore.union(old_field.non_db_attrs):
old_kwargs.pop(attr, None)
for attr in ignore.union(new_field.non_db_attrs):
new_kwargs.pop(attr, None)
return self.quote_name(old_field.column) != self.quote_name(
new_field.column
) or (old_path, old_args, old_kwargs) != (new_path, new_args, new_kwargs)
def _field_should_be_indexed(self, model, field):
return field.db_index and not field.unique
def _field_became_primary_key(self, old_field, new_field):
return not old_field.primary_key and new_field.primary_key
def _unique_should_be_added(self, old_field, new_field):
return (
not new_field.primary_key
and new_field.unique
and (not old_field.unique or old_field.primary_key)
)
def _rename_field_sql(self, table, old_field, new_field, new_type):
return self.sql_rename_column % {
"table": self.quote_name(table),
"old_column": self.quote_name(old_field.column),
"new_column": self.quote_name(new_field.column),
"type": new_type,
}
def _create_fk_sql(self, model, field, suffix):
table = Table(model._meta.db_table, self.quote_name)
name = self._fk_constraint_name(model, field, suffix)
column = Columns(model._meta.db_table, [field.column], self.quote_name)
to_table = Table(field.target_field.model._meta.db_table, self.quote_name)
to_column = Columns(
field.target_field.model._meta.db_table,
[field.target_field.column],
self.quote_name,
)
deferrable = self.connection.ops.deferrable_sql()
return Statement(
self.sql_create_fk,
table=table,
name=name,
column=column,
to_table=to_table,
to_column=to_column,
deferrable=deferrable,
)
def _fk_constraint_name(self, model, field, suffix):
def create_fk_name(*args, **kwargs):
return self.quote_name(self._create_index_name(*args, **kwargs))
return ForeignKeyName(
model._meta.db_table,
[field.column],
split_identifier(field.target_field.model._meta.db_table)[1],
[field.target_field.column],
suffix,
create_fk_name,
)
def _delete_fk_sql(self, model, name):
return self._delete_constraint_sql(self.sql_delete_fk, model, name)
def _deferrable_constraint_sql(self, deferrable):
if deferrable is None:
return ""
if deferrable == Deferrable.DEFERRED:
return " DEFERRABLE INITIALLY DEFERRED"
if deferrable == Deferrable.IMMEDIATE:
return " DEFERRABLE INITIALLY IMMEDIATE"
def _unique_sql(
self,
model,
fields,
name,
condition=None,
deferrable=None,
include=None,
opclasses=None,
expressions=None,
):
if (
deferrable
and not self.connection.features.supports_deferrable_unique_constraints
):
return None
if condition or include or opclasses or expressions:
# Databases support conditional, covering, and functional unique
# constraints via a unique index.
sql = self._create_unique_sql(
model,
fields,
name=name,
condition=condition,
include=include,
opclasses=opclasses,
expressions=expressions,
)
if sql:
self.deferred_sql.append(sql)
return None
constraint = self.sql_unique_constraint % {
"columns": ", ".join([self.quote_name(field.column) for field in fields]),
"deferrable": self._deferrable_constraint_sql(deferrable),
}
return self.sql_constraint % {
"name": self.quote_name(name),
"constraint": constraint,
}
def _create_unique_sql(
self,
model,
fields,
name=None,
condition=None,
deferrable=None,
include=None,
opclasses=None,
expressions=None,
):
if (
(
deferrable
and not self.connection.features.supports_deferrable_unique_constraints
)
or (condition and not self.connection.features.supports_partial_indexes)
or (include and not self.connection.features.supports_covering_indexes)
or (
expressions and not self.connection.features.supports_expression_indexes
)
):
return None
compiler = Query(model, alias_cols=False).get_compiler(
connection=self.connection
)
table = model._meta.db_table
columns = [field.column for field in fields]
if name is None:
name = self._unique_constraint_name(table, columns, quote=True)
else:
name = self.quote_name(name)
if condition or include or opclasses or expressions:
sql = self.sql_create_unique_index
else:
sql = self.sql_create_unique
if columns:
columns = self._index_columns(
table, columns, col_suffixes=(), opclasses=opclasses
)
else:
columns = Expressions(table, expressions, compiler, self.quote_value)
return Statement(
sql,
table=Table(table, self.quote_name),
name=name,
columns=columns,
condition=self._index_condition_sql(condition),
deferrable=self._deferrable_constraint_sql(deferrable),
include=self._index_include_sql(model, include),
)
def _unique_constraint_name(self, table, columns, quote=True):
if quote:
def create_unique_name(*args, **kwargs):
return self.quote_name(self._create_index_name(*args, **kwargs))
else:
create_unique_name = self._create_index_name
return IndexName(table, columns, "_uniq", create_unique_name)
def _delete_unique_sql(
self,
model,
name,
condition=None,
deferrable=None,
include=None,
opclasses=None,
expressions=None,
):
if (
(
deferrable
and not self.connection.features.supports_deferrable_unique_constraints
)
or (condition and not self.connection.features.supports_partial_indexes)
or (include and not self.connection.features.supports_covering_indexes)
or (
expressions and not self.connection.features.supports_expression_indexes
)
):
return None
if condition or include or opclasses or expressions:
sql = self.sql_delete_index
else:
sql = self.sql_delete_unique
return self._delete_constraint_sql(sql, model, name)
def _check_sql(self, name, check):
return self.sql_constraint % {
"name": self.quote_name(name),
"constraint": self.sql_check_constraint % {"check": check},
}
def _create_check_sql(self, model, name, check):
return Statement(
self.sql_create_check,
table=Table(model._meta.db_table, self.quote_name),
name=self.quote_name(name),
check=check,
)
def _delete_check_sql(self, model, name):
if not self.connection.features.supports_table_check_constraints:
return None
return self._delete_constraint_sql(self.sql_delete_check, model, name)
def _delete_constraint_sql(self, template, model, name):
return Statement(
template,
table=Table(model._meta.db_table, self.quote_name),
name=self.quote_name(name),
)
def _constraint_names(
self,
model,
column_names=None,
unique=None,
primary_key=None,
index=None,
foreign_key=None,
check=None,
type_=None,
exclude=None,
):
"""Return all constraint names matching the columns and conditions."""
if column_names is not None:
column_names = [
self.connection.introspection.identifier_converter(
truncate_name(name, self.connection.ops.max_name_length())
)
if self.connection.features.truncates_names
else self.connection.introspection.identifier_converter(name)
for name in column_names
]
with self.connection.cursor() as cursor:
constraints = self.connection.introspection.get_constraints(
cursor, model._meta.db_table
)
result = []
for name, infodict in constraints.items():
if column_names is None or column_names == infodict["columns"]:
if unique is not None and infodict["unique"] != unique:
continue
if primary_key is not None and infodict["primary_key"] != primary_key:
continue
if index is not None and infodict["index"] != index:
continue
if check is not None and infodict["check"] != check:
continue
if foreign_key is not None and not infodict["foreign_key"]:
continue
if type_ is not None and infodict["type"] != type_:
continue
if not exclude or name not in exclude:
result.append(name)
return result
def _delete_primary_key(self, model, strict=False):
constraint_names = self._constraint_names(model, primary_key=True)
if strict and len(constraint_names) != 1:
raise ValueError(
"Found wrong number (%s) of PK constraints for %s"
% (
len(constraint_names),
model._meta.db_table,
)
)
for constraint_name in constraint_names:
self.execute(self._delete_primary_key_sql(model, constraint_name))
def _create_primary_key_sql(self, model, field):
return Statement(
self.sql_create_pk,
table=Table(model._meta.db_table, self.quote_name),
name=self.quote_name(
self._create_index_name(
model._meta.db_table, [field.column], suffix="_pk"
)
),
columns=Columns(model._meta.db_table, [field.column], self.quote_name),
)
def _delete_primary_key_sql(self, model, name):
return self._delete_constraint_sql(self.sql_delete_pk, model, name)
def _collate_sql(self, collation, old_collation=None, table_name=None):
return "COLLATE " + self.quote_name(collation) if collation else ""
def remove_procedure(self, procedure_name, param_types=()):
sql = self.sql_delete_procedure % {
"procedure": self.quote_name(procedure_name),
"param_types": ",".join(param_types),
}
self.execute(sql) | PypiClean |
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/calculus/polynomials.py | from ..libmp.backend import xrange
from .calculus import defun
#----------------------------------------------------------------------------#
# Polynomials #
#----------------------------------------------------------------------------#
# XXX: extra precision
@defun
def polyval(ctx, coeffs, x, derivative=False):
r"""
Given coefficients `[c_n, \ldots, c_2, c_1, c_0]` and a number `x`,
:func:`~mpmath.polyval` evaluates the polynomial
.. math ::
P(x) = c_n x^n + \ldots + c_2 x^2 + c_1 x + c_0.
If *derivative=True* is set, :func:`~mpmath.polyval` simultaneously
evaluates `P(x)` with the derivative, `P'(x)`, and returns the
tuple `(P(x), P'(x))`.
>>> from mpmath import *
>>> mp.pretty = True
>>> polyval([3, 0, 2], 0.5)
2.75
>>> polyval([3, 0, 2], 0.5, derivative=True)
(2.75, 3.0)
The coefficients and the evaluation point may be any combination
of real or complex numbers.
"""
if not coeffs:
return ctx.zero
p = ctx.convert(coeffs[0])
q = ctx.zero
for c in coeffs[1:]:
if derivative:
q = p + x*q
p = c + x*p
if derivative:
return p, q
else:
return p
@defun
def polyroots(ctx, coeffs, maxsteps=50, cleanup=True, extraprec=10, error=False):
"""
Computes all roots (real or complex) of a given polynomial. The roots are
returned as a sorted list, where real roots appear first followed by
complex conjugate roots as adjacent elements. The polynomial should be
given as a list of coefficients, in the format used by :func:`~mpmath.polyval`.
The leading coefficient must be nonzero.
With *error=True*, :func:`~mpmath.polyroots` returns a tuple *(roots, err)* where
*err* is an estimate of the maximum error among the computed roots.
**Examples**
Finding the three real roots of `x^3 - x^2 - 14x + 24`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> nprint(polyroots([1,-1,-14,24]), 4)
[-4.0, 2.0, 3.0]
Finding the two complex conjugate roots of `4x^2 + 3x + 2`, with an
error estimate::
>>> roots, err = polyroots([4,3,2], error=True)
>>> for r in roots:
... print(r)
...
(-0.375 + 0.59947894041409j)
(-0.375 - 0.59947894041409j)
>>>
>>> err
2.22044604925031e-16
>>>
>>> polyval([4,3,2], roots[0])
(2.22044604925031e-16 + 0.0j)
>>> polyval([4,3,2], roots[1])
(2.22044604925031e-16 + 0.0j)
The following example computes all the 5th roots of unity; that is,
the roots of `x^5 - 1`::
>>> mp.dps = 20
>>> for r in polyroots([1, 0, 0, 0, 0, -1]):
... print(r)
...
1.0
(-0.8090169943749474241 + 0.58778525229247312917j)
(-0.8090169943749474241 - 0.58778525229247312917j)
(0.3090169943749474241 + 0.95105651629515357212j)
(0.3090169943749474241 - 0.95105651629515357212j)
**Precision and conditioning**
Provided there are no repeated roots, :func:`~mpmath.polyroots` can typically
compute all roots of an arbitrary polynomial to high precision::
>>> mp.dps = 60
>>> for r in polyroots([1, 0, -10, 0, 1]):
... print r
...
-3.14626436994197234232913506571557044551247712918732870123249
-0.317837245195782244725757617296174288373133378433432554879127
0.317837245195782244725757617296174288373133378433432554879127
3.14626436994197234232913506571557044551247712918732870123249
>>>
>>> sqrt(3) + sqrt(2)
3.14626436994197234232913506571557044551247712918732870123249
>>> sqrt(3) - sqrt(2)
0.317837245195782244725757617296174288373133378433432554879127
**Algorithm**
:func:`~mpmath.polyroots` implements the Durand-Kerner method [1], which
uses complex arithmetic to locate all roots simultaneously.
The Durand-Kerner method can be viewed as approximately performing
simultaneous Newton iteration for all the roots. In particular,
the convergence to simple roots is quadratic, just like Newton's
method.
Although all roots are internally calculated using complex arithmetic,
any root found to have an imaginary part smaller than the estimated
numerical error is truncated to a real number. Real roots are placed
first in the returned list, sorted by value. The remaining complex
roots are sorted by real their parts so that conjugate roots end up
next to each other.
**References**
1. http://en.wikipedia.org/wiki/Durand-Kerner_method
"""
if len(coeffs) <= 1:
if not coeffs or not coeffs[0]:
raise ValueError("Input to polyroots must not be the zero polynomial")
# Constant polynomial with no roots
return []
orig = ctx.prec
weps = +ctx.eps
try:
ctx.prec += 10
tol = ctx.eps * 128
deg = len(coeffs) - 1
# Must be monic
lead = ctx.convert(coeffs[0])
if lead == 1:
coeffs = [ctx.convert(c) for c in coeffs]
else:
coeffs = [c/lead for c in coeffs]
f = lambda x: ctx.polyval(coeffs, x)
roots = [ctx.mpc((0.4+0.9j)**n) for n in xrange(deg)]
err = [ctx.one for n in xrange(deg)]
# Durand-Kerner iteration until convergence
for step in xrange(maxsteps):
if abs(max(err)) < tol:
break
for i in xrange(deg):
if not abs(err[i]) < tol:
p = roots[i]
x = f(p)
for j in range(deg):
if i != j:
try:
x /= (p-roots[j])
except ZeroDivisionError:
continue
roots[i] = p - x
err[i] = abs(x)
# Remove small imaginary parts
if cleanup:
for i in xrange(deg):
if abs(ctx._im(roots[i])) < weps:
roots[i] = roots[i].real
elif abs(ctx._re(roots[i])) < weps:
roots[i] = roots[i].imag * 1j
roots.sort(key=lambda x: (abs(ctx._im(x)), ctx._re(x)))
finally:
ctx.prec = orig
if error:
err = max(err)
err = max(err, ctx.ldexp(1, -orig+1))
return [+r for r in roots], +err
else:
return [+r for r in roots] | PypiClean |
/iloveyou-1.0.tar.gz/iloveyou-1.0/src/request/adapters.py | import os.path
import socket
from urllib3.poolmanager import PoolManager, proxy_from_url
from urllib3.response import HTTPResponse
from urllib3.util import parse_url
from urllib3.util import Timeout as TimeoutSauce
from urllib3.util.retry import Retry
from urllib3.exceptions import ClosedPoolError
from urllib3.exceptions import ConnectTimeoutError
from urllib3.exceptions import HTTPError as _HTTPError
from urllib3.exceptions import MaxRetryError
from urllib3.exceptions import NewConnectionError
from urllib3.exceptions import ProxyError as _ProxyError
from urllib3.exceptions import ProtocolError
from urllib3.exceptions import ReadTimeoutError
from urllib3.exceptions import SSLError as _SSLError
from urllib3.exceptions import ResponseError
from urllib3.exceptions import LocationValueError
from .models import Response
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, extract_zipped_paths,
get_encoding_from_headers, prepend_scheme_if_needed,
get_auth_from_url, urldefragauth, select_proxy)
from .structures import CaseInsensitiveDict
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError, RetryError, InvalidSchema, InvalidProxyURL,
InvalidURL)
from .auth import _basic_auth_str
try:
from urllib3.contrib.socks import SOCKSProxyManager
except ImportError:
def SOCKSProxyManager(*args, **kwargs):
raise InvalidSchema("Missing dependencies for SOCKS support.")
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
DEFAULT_POOL_TIMEOUT = None
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self, request, stream=False, timeout=None, verify=True,
cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
raise NotImplementedError
def close(self):
"""Cleans up adapter specific items."""
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
:rtype: urllib3.ProxyManager
"""
if proxy in self.proxy_manager:
manager = self.proxy_manager[proxy]
elif proxy.lower().startswith('socks'):
username, password = get_auth_from_url(proxy)
manager = self.proxy_manager[proxy] = SOCKSProxyManager(
proxy,
username=username,
password=password,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs
)
else:
proxy_headers = self.proxy_headers(proxy)
manager = self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return manager
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH)
if not cert_loc or not os.path.exists(cert_loc):
raise IOError("Could not find a suitable TLS CA certificate bundle, "
"invalid path: {}".format(cert_loc))
conn.cert_reqs = 'CERT_REQUIRED'
if not os.path.isdir(cert_loc):
conn.ca_certs = cert_loc
else:
conn.ca_cert_dir = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
conn.ca_cert_dir = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
conn.key_file = None
if conn.cert_file and not os.path.exists(conn.cert_file):
raise IOError("Could not find the TLS certificate file, "
"invalid path: {}".format(conn.cert_file))
if conn.key_file and not os.path.exists(conn.key_file):
raise IOError("Could not find the TLS key file, "
"invalid path: {}".format(conn.key_file))
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
:rtype: requests.Response
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
:rtype: urllib3.ConnectionPool
"""
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_url = parse_url(proxy)
if not proxy_url.host:
raise InvalidProxyURL("Please check proxy URL. It is malformed"
" and could be missing the host.")
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this closes the PoolManager and any active ProxyManager,
which closes any pooled connections.
"""
self.poolmanager.clear()
for proxy in self.proxy_manager.values():
proxy.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
:rtype: str
"""
proxy = select_proxy(request.url, proxies)
scheme = urlparse(request.url).scheme
is_proxied_http_request = (proxy and scheme != 'https')
using_socks_proxy = False
if proxy:
proxy_scheme = urlparse(proxy).scheme.lower()
using_socks_proxy = proxy_scheme.startswith('socks')
url = request.path_url
if is_proxied_http_request and not using_socks_proxy:
url = urldefragauth(request.url)
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The url of the proxy being used for this request.
:rtype: dict
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple or urllib3 Timeout object
:param verify: (optional) Either a boolean, in which case it controls whether
we verify the server's TLS certificate, or a string, in which case it
must be a path to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
:rtype: requests.Response
"""
try:
conn = self.get_connection(request.url, proxies)
except LocationValueError as e:
raise InvalidURL(e, request=request)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
elif isinstance(timeout, TimeoutSauce):
pass
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
# Receive the response from the server
try:
# For Python 2.7, use buffering of HTTP responses
r = low_conn.getresponse(buffering=True)
except TypeError:
# For compatibility with Python 3.3+
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
# TODO: Remove this in 3.0.0: see #2811
if not isinstance(e.reason, NewConnectionError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
if isinstance(e.reason, _ProxyError):
raise ProxyError(e, request=request)
if isinstance(e.reason, _SSLError):
# This branch is for urllib3 v1.22 and later.
raise SSLError(e, request=request)
raise ConnectionError(e, request=request)
except ClosedPoolError as e:
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
# This branch is for urllib3 versions earlier than v1.22
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp) | PypiClean |
/Firefly_III_API_Client-2.0.5.0-py3-none-any.whl/firefly_iii_client/paths/v1_insight_expense_bill/get.py | from dataclasses import dataclass
import typing_extensions
import urllib3
from urllib3._collections import HTTPHeaderDict
from firefly_iii_client import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from firefly_iii_client import schemas # noqa: F401
from firefly_iii_client.model.insight_group import InsightGroup
from firefly_iii_client.model.unauthenticated import Unauthenticated
from firefly_iii_client.model.bad_request import BadRequest
from firefly_iii_client.model.internal_exception import InternalException
from firefly_iii_client.model.not_found import NotFound
from . import path
# Query params
StartSchema = schemas.DateSchema
EndSchema = schemas.DateSchema
class BillsSchema(
schemas.ListSchema
):
class MetaOapg:
items = schemas.Int64Schema
def __new__(
cls,
_arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, decimal.Decimal, int, ]], typing.List[typing.Union[MetaOapg.items, decimal.Decimal, int, ]]],
_configuration: typing.Optional[schemas.Configuration] = None,
) -> 'BillsSchema':
return super().__new__(
cls,
_arg,
_configuration=_configuration,
)
def __getitem__(self, i: int) -> MetaOapg.items:
return super().__getitem__(i)
class AccountsSchema(
schemas.ListSchema
):
class MetaOapg:
items = schemas.Int64Schema
def __new__(
cls,
_arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, decimal.Decimal, int, ]], typing.List[typing.Union[MetaOapg.items, decimal.Decimal, int, ]]],
_configuration: typing.Optional[schemas.Configuration] = None,
) -> 'AccountsSchema':
return super().__new__(
cls,
_arg,
_configuration=_configuration,
)
def __getitem__(self, i: int) -> MetaOapg.items:
return super().__getitem__(i)
RequestRequiredQueryParams = typing_extensions.TypedDict(
'RequestRequiredQueryParams',
{
'start': typing.Union[StartSchema, str, date, ],
'end': typing.Union[EndSchema, str, date, ],
}
)
RequestOptionalQueryParams = typing_extensions.TypedDict(
'RequestOptionalQueryParams',
{
'bills[]': typing.Union[BillsSchema, list, tuple, ],
'accounts[]': typing.Union[AccountsSchema, list, tuple, ],
},
total=False
)
class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams):
pass
request_query_start = api_client.QueryParameter(
name="start",
style=api_client.ParameterStyle.FORM,
schema=StartSchema,
required=True,
explode=True,
)
request_query_end = api_client.QueryParameter(
name="end",
style=api_client.ParameterStyle.FORM,
schema=EndSchema,
required=True,
explode=True,
)
request_query_bills = api_client.QueryParameter(
name="bills[]",
style=api_client.ParameterStyle.FORM,
schema=BillsSchema,
explode=True,
)
request_query_accounts = api_client.QueryParameter(
name="accounts[]",
style=api_client.ParameterStyle.FORM,
schema=AccountsSchema,
explode=True,
)
# Header params
XTraceIdSchema = schemas.UUIDSchema
RequestRequiredHeaderParams = typing_extensions.TypedDict(
'RequestRequiredHeaderParams',
{
}
)
RequestOptionalHeaderParams = typing_extensions.TypedDict(
'RequestOptionalHeaderParams',
{
'X-Trace-Id': typing.Union[XTraceIdSchema, str, uuid.UUID, ],
},
total=False
)
class RequestHeaderParams(RequestRequiredHeaderParams, RequestOptionalHeaderParams):
pass
request_header_x_trace_id = api_client.HeaderParameter(
name="X-Trace-Id",
style=api_client.ParameterStyle.SIMPLE,
schema=XTraceIdSchema,
)
_auth = [
'firefly_iii_auth',
]
SchemaFor200ResponseBodyApplicationJson = InsightGroup
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationJson),
},
)
SchemaFor400ResponseBodyApplicationJson = BadRequest
@dataclass
class ApiResponseFor400(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor400ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_400 = api_client.OpenApiResponse(
response_cls=ApiResponseFor400,
content={
'application/json': api_client.MediaType(
schema=SchemaFor400ResponseBodyApplicationJson),
},
)
SchemaFor401ResponseBodyApplicationJson = Unauthenticated
@dataclass
class ApiResponseFor401(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor401ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_401 = api_client.OpenApiResponse(
response_cls=ApiResponseFor401,
content={
'application/json': api_client.MediaType(
schema=SchemaFor401ResponseBodyApplicationJson),
},
)
SchemaFor404ResponseBodyApplicationJson = NotFound
@dataclass
class ApiResponseFor404(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor404ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_404 = api_client.OpenApiResponse(
response_cls=ApiResponseFor404,
content={
'application/json': api_client.MediaType(
schema=SchemaFor404ResponseBodyApplicationJson),
},
)
SchemaFor500ResponseBodyApplicationJson = InternalException
@dataclass
class ApiResponseFor500(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor500ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_500 = api_client.OpenApiResponse(
response_cls=ApiResponseFor500,
content={
'application/json': api_client.MediaType(
schema=SchemaFor500ResponseBodyApplicationJson),
},
)
_status_code_to_response = {
'200': _response_for_200,
'400': _response_for_400,
'401': _response_for_401,
'404': _response_for_404,
'500': _response_for_500,
}
_all_accept_content_types = (
'application/json',
)
class BaseApi(api_client.Api):
@typing.overload
def _insight_expense_bill_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _insight_expense_bill_oapg(
self,
skip_deserialization: typing_extensions.Literal[True],
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def _insight_expense_bill_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def _insight_expense_bill_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
"""
Insight into expenses, grouped by bill.
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params)
self._verify_typed_dict_inputs_oapg(RequestHeaderParams, header_params)
used_path = path.value
prefix_separator_iterator = None
for parameter in (
request_query_start,
request_query_end,
request_query_bills,
request_query_accounts,
):
parameter_data = query_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
if prefix_separator_iterator is None:
prefix_separator_iterator = parameter.get_prefix_separator_iterator()
serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator)
for serialized_value in serialized_data.values():
used_path += serialized_value
_headers = HTTPHeaderDict()
for parameter in (
request_header_x_trace_id,
):
parameter_data = header_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
serialized_data = parameter.serialize(parameter_data)
_headers.extend(serialized_data)
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
response = self.api_client.call_api(
resource_path=used_path,
method='get'.upper(),
headers=_headers,
auth_settings=_auth,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(
status=response.status,
reason=response.reason,
api_response=api_response
)
return api_response
class InsightExpenseBill(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
@typing.overload
def insight_expense_bill(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def insight_expense_bill(
self,
skip_deserialization: typing_extensions.Literal[True],
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def insight_expense_bill(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def insight_expense_bill(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._insight_expense_bill_oapg(
query_params=query_params,
header_params=header_params,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForget(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
@typing.overload
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def get(
self,
skip_deserialization: typing_extensions.Literal[True],
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._insight_expense_bill_oapg(
query_params=query_params,
header_params=header_params,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
) | PypiClean |
/LDA_final_project_19-1.5-py3-none-any.whl/src/LDA_EM.py | from collections import Counter
import numpy as np
from scipy.special import digamma, gammaln, polygamma
import warnings
warnings.filterwarnings("ignore")
def get_doc(file_name,stopwords_file):
'''preprocess the real dataset to tokenize the words'''
texts = []
special_chars = '!"#$£@%&/()=?.,+-*\':;_´`1234567890'
with open(file_name, 'r') as infile:
copy = False
text = ''
for line in infile:
if copy:
if line.strip() == '</TEXT>':
text = text.lower()
texts.append(text)
text = ''
copy = False
else:
for char in special_chars:
line = line.replace(char, '')
text += line
else:
if line.strip() == '<TEXT>':
copy = True
tmp_texts = np.array(texts)
stop_words_line = []
with open(stopwords_file, 'r') as infile:
data=infile.read().replace(',', ' ')
for word in data.split():
stop_words_line.append(word)
stop_words = np.array(stop_words_line)
corpus = []
for text in tmp_texts:
words = np.array(text.split())
stopwords_filtered_document = [w for w in words if w not in stop_words]
single_words = [k for k, v in Counter(stopwords_filtered_document).items() if v == 1 ]
final_filtered_document = [w for w in stopwords_filtered_document if w not in single_words]
if not final_filtered_document: # Document is empty, Shape = []
continue
corpus.append(final_filtered_document)
return corpus
def initialize_parameters(corpus, voc, k, M):
Phi = []
gamma = np.zeros([M,k])
alpha = np.ones([M,k])
for m in range(0,M):
doc = np.array(corpus[m])
N = len(doc)
phi = np.ones([N,k])/k
gamma[m,:] = alpha [m,:] + N/k
Phi.append(phi)
# Initialize Beta
Beta = np.random.uniform(0,1,(k,len(voc)))
Beta = Beta/Beta.sum(axis=1).reshape(k,1)
return Phi, gamma, alpha, Beta
def compute_likelihood(Phi, gamma, alpha, Beta, doc, voc, k):
likelihood = 0.0
V = len(voc)
words = np.array(doc)
N = len(words)
alpha_sum = 0.0
phi_gamma_sum = 0.0
phi_lgb_sum = 0.0
e_sum = 0.0
gamma_sum = 0.0
alpha_sum += gammaln(alpha.sum())
gamma_sum -= gammaln(gamma.sum())
for i in range(0,k):
#
alpha_sum -= gammaln(alpha[i]) + (alpha[i] - 1) * (digamma(gamma[i]) - digamma(gamma.sum()))
Phi_p= Phi[:,i] > 0
w_ind = np.array(list(map(lambda x: np.sum(np.in1d(voc, x)),words[Phi_p])))
phi_gamma_sum = np.sum(Phi[Phi_p,i] * (digamma(gamma[i]) - digamma(gamma.sum())))
e_sum = np.dot(Phi[Phi_p,i],np.log(Phi[Phi_p,i]))
b_p=Beta[i,:]>0
phi_lgb_sum += np.sum(np.outer((Phi[Phi_p,i] * w_ind), np.log(Beta[i,b_p])))
gamma_sum += gammaln(gamma[i]) - (gamma[i] - 1) * (digamma(gamma[i]) - digamma(gamma.sum()))
likelihood += (alpha_sum + phi_gamma_sum + phi_lgb_sum - gamma_sum - e_sum)
return likelihood
def E_step(Phi, gamma, alpha, Beta, corpus, voc, k, M):
'''E-step: variational inference'''
likelihood = 0.0
#
for d in range(0,M):
words = np.array(corpus[d])
N = len(words)
phi = Phi[d]
conv_counter = 0
#
while conv_counter < 100:
phi_old = phi
phi = np.zeros([N,k])
gamma_old = gamma[d, :]
for n in range(0,N):
word = words[n]
w_in_voc =np.where(voc == word)
if len(w_in_voc[0]) > 0: # word exists in vocabulary
phi[n,:] = Beta[:,w_in_voc[0][0]]* np.exp(digamma(gamma[d,:]) - digamma(np.sum(gamma[d,:])))
phi[n,:] = phi[n,:] / np.sum(phi[n,:])
alpha = np.ones([M,k])
gamma[d, :] = alpha[d, :] + np.sum(phi, axis=0)
conv_counter += 1
# Check if gamma and phi converged
if np.linalg.norm(phi - phi_old) < 1e-3 and np.linalg.norm(gamma[d,:] - gamma_old) < 1e-3:
Phi[d] = phi
likelihood += compute_likelihood(Phi[d], gamma[d,:], alpha[d,:], Beta, corpus[d], voc, k)
conv_counter=100
return Phi, gamma, likelihood
def M_step(Phi, gamma, alpha, corpus, voc, k, M):
V = len(voc)
# 1 update Beta
Beta = np.zeros([k,V])
for d in range(0,M):
words = np.array(corpus[d])
voc_pos = np.array(list(map(lambda x: np.in1d(words, x),voc)))
Beta += np.dot(voc_pos, Phi[d]).transpose()
Beta = Beta / Beta.sum(axis=1).reshape(k,1)
# 2 update alpha
for i in range(1000):
alpha_old = alpha
# Calculate the gradient
g = M*(digamma(np.sum(alpha))-digamma(alpha)) + np.sum(digamma(gamma)-np.tile(digamma(np.sum(gamma,axis=1)),(k,1)).T,axis=0)
# Calculate Hessian
h = -M * polygamma(1,alpha)
z = M * polygamma(1,np.sum(alpha))
# Calculate parameter
c = np.sum(g/h)/(1/z+np.sum(1/h))
# Update alpha
alpha -= (g-c)/h
if np.sqrt(np.mean(np.square(alpha-alpha_old)))<1e-4:
break
return alpha, Beta
def variational_EM(Phi_init, gamma_init, alpha_init, Beta_init, corpus, voc, k, M):
'''EM inplementation'''
print('Variational EM')
likelihood = 0
likelihood_old = 0
iteration = 1 # Initialization step is the first step
Phi = Phi_init
gamma = gamma_init
alpha = alpha_init
Beta = Beta_init
while iteration <= 100 and (iteration <= 2 or np.abs((likelihood-likelihood_old)/likelihood_old) > 1e-4):
# Update parameters
likelihood_old = likelihood
Phi_old = Phi
gamma_old = gamma
alpha_old = alpha
Beta_old = Beta
Phi, gamma, likelihood = E_step(Phi_old, gamma_old, alpha_old, Beta_old, corpus, voc, k, M)
alpha, Beta = M_step(Phi, gamma, alpha_old, corpus, voc, k, M)
iteration += 1
return Phi, gamma, alpha, Beta, likelihood
def inference_method(corpus, voc,k=2):
'''use EM to do LDA'''
M = len(corpus) # nbr of documents
Phi_init, gamma_init, alpha_init, Beta_init = initialize_parameters(corpus, voc, k, M)
Phi, gamma, alpha, Beta, likelihood = variational_EM(Phi_init, gamma_init, alpha_init, Beta_init, corpus, voc, k, M)
return Phi, gamma, alpha, Beta, likelihood | PypiClean |
/GoogleAppEngineMapReduce-1.9.22.0.tar.gz/GoogleAppEngineMapReduce-1.9.22.0/mapreduce/util.py | # pylint: disable=g-bad-name
__all__ = [
"create_datastore_write_config",
"for_name",
"get_queue_name",
"get_short_name",
"handler_for_name",
"is_generator",
"parse_bool",
"total_seconds",
"try_serialize_handler",
"try_deserialize_handler",
"CALLBACK_MR_ID_TASK_HEADER",
"strip_prefix_from_items"
]
import inspect
import os
import pickle
import random
import sys
import time
import types
from google.appengine.ext import ndb
from google.appengine.datastore import datastore_rpc
from mapreduce import parameters
# Taskqueue task header for mr id. Use internal by MR.
_MR_ID_TASK_HEADER = "AE-MR-ID"
_MR_SHARD_ID_TASK_HEADER = "AE-MR-SHARD-ID"
# Callback task MR ID task header
CALLBACK_MR_ID_TASK_HEADER = "Mapreduce-Id"
# Ridiculous future UNIX epoch time, 500 years from now.
_FUTURE_TIME = 2**34
def _get_descending_key(gettime=time.time):
"""Returns a key name lexically ordered by time descending.
This lets us have a key name for use with Datastore entities which returns
rows in time descending order when it is scanned in lexically ascending order,
allowing us to bypass index building for descending indexes.
Args:
gettime: Used for testing.
Returns:
A string with a time descending key.
"""
now_descending = int((_FUTURE_TIME - gettime()) * 100)
request_id_hash = os.environ.get("REQUEST_ID_HASH")
if not request_id_hash:
request_id_hash = str(random.getrandbits(32))
return "%d%s" % (now_descending, request_id_hash)
def _get_task_host():
"""Get the Host header value for all mr tasks.
Task Host header determines which instance this task would be routed to.
Current version id format is: v7.368834058928280579
Current module id is just the module's name. It could be "default"
Default version hostname is app_id.appspot.com
Returns:
A complete host name is of format version.module.app_id.appspot.com
If module is the default module, just version.app_id.appspot.com. The reason
is if an app doesn't have modules enabled and the url is
"version.default.app_id", "version" is ignored and "default" is used as
version. If "default" version doesn't exist, the url is routed to the
default version.
"""
version = os.environ["CURRENT_VERSION_ID"].split(".")[0]
default_host = os.environ["DEFAULT_VERSION_HOSTNAME"]
module = os.environ["CURRENT_MODULE_ID"]
if os.environ["CURRENT_MODULE_ID"] == "default":
return "%s.%s" % (version, default_host)
return "%s.%s.%s" % (version, module, default_host)
def _get_task_headers(map_job_id,
mr_id_header_key=_MR_ID_TASK_HEADER):
"""Get headers for all mr tasks.
Args:
map_job_id: map job id.
mr_id_header_key: the key to set mr id with.
Returns:
A dictionary of all headers.
"""
return {mr_id_header_key: map_job_id,
"Host": _get_task_host()}
def _enum(**enums):
"""Helper to create enum."""
return type("Enum", (), enums)
def get_queue_name(queue_name):
"""Determine which queue MR should run on.
How to choose the queue:
1. If user provided one, use that.
2. If we are starting a mr from taskqueue, inherit that queue.
If it's a special queue, fall back to the default queue.
3. Default queue.
If user is using any MR pipeline interface, pipeline.start takes a
"queue_name" argument. The pipeline will run on that queue and MR will
simply inherit the queue_name.
Args:
queue_name: queue_name from user. Maybe None.
Returns:
The queue name to run on.
"""
if queue_name:
return queue_name
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME",
parameters.config.QUEUE_NAME)
if len(queue_name) > 1 and queue_name[0:2] == "__":
# We are currently in some special queue. E.g. __cron.
return parameters.config.QUEUE_NAME
else:
return queue_name
def total_seconds(td):
"""convert a timedelta to seconds.
This is patterned after timedelta.total_seconds, which is only
available in python 27.
Args:
td: a timedelta object.
Returns:
total seconds within a timedelta. Rounded up to seconds.
"""
secs = td.seconds + td.days * 24 * 3600
if td.microseconds:
secs += 1
return secs
def for_name(fq_name, recursive=False):
"""Find class/function/method specified by its fully qualified name.
Fully qualified can be specified as:
* <module_name>.<class_name>
* <module_name>.<function_name>
* <module_name>.<class_name>.<method_name> (an unbound method will be
returned in this case).
for_name works by doing __import__ for <module_name>, and looks for
<class_name>/<function_name> in module's __dict__/attrs. If fully qualified
name doesn't contain '.', the current module will be used.
Args:
fq_name: fully qualified name of something to find.
recursive: run recursively or not.
Returns:
class object or None if fq_name is None.
Raises:
ImportError: when specified module could not be loaded or the class
was not found in the module.
"""
# if "." not in fq_name:
# raise ImportError("'%s' is not a full-qualified name" % fq_name)
if fq_name is None:
return
fq_name = str(fq_name)
module_name = __name__
short_name = fq_name
if fq_name.rfind(".") >= 0:
(module_name, short_name) = (fq_name[:fq_name.rfind(".")],
fq_name[fq_name.rfind(".") + 1:])
try:
result = __import__(module_name, None, None, [short_name])
return result.__dict__[short_name]
except KeyError:
# If we're recursively inside a for_name() chain, then we want to raise
# this error as a key error so we can report the actual source of the
# problem. If we're *not* recursively being called, that means the
# module was found and the specific item could not be loaded, and thus
# we want to raise an ImportError directly.
if recursive:
raise
else:
raise ImportError("Could not find '%s' on path '%s'" % (
short_name, module_name))
except ImportError:
# module_name is not actually a module. Try for_name for it to figure
# out what's this.
try:
module = for_name(module_name, recursive=True)
if hasattr(module, short_name):
return getattr(module, short_name)
else:
# The module was found, but the function component is missing.
raise KeyError()
except KeyError:
raise ImportError("Could not find '%s' on path '%s'" % (
short_name, module_name))
except ImportError:
# This means recursive import attempts failed, thus we will raise the
# first ImportError we encountered, since it's likely the most accurate.
pass
# Raise the original import error that caused all of this, since it is
# likely the real cause of the overall problem.
raise
def handler_for_name(fq_name):
"""Resolves and instantiates handler by fully qualified name.
First resolves the name using for_name call. Then if it resolves to a class,
instantiates a class, if it resolves to a method - instantiates the class and
binds method to the instance.
Args:
fq_name: fully qualified name of something to find.
Returns:
handler instance which is ready to be called.
"""
resolved_name = for_name(fq_name)
if isinstance(resolved_name, (type, types.ClassType)):
# create new instance if this is type
return resolved_name()
elif isinstance(resolved_name, types.MethodType):
# bind the method
return getattr(resolved_name.im_class(), resolved_name.__name__)
else:
return resolved_name
def try_serialize_handler(handler):
"""Try to serialize map/reduce handler.
Args:
handler: handler function/instance. Handler can be a function or an
instance of a callable class. In the latter case, the handler will
be serialized across slices to allow users to save states.
Returns:
serialized handler string or None.
"""
if (isinstance(handler, types.InstanceType) or # old style class
(isinstance(handler, object) and # new style class
not inspect.isfunction(handler) and
not inspect.ismethod(handler)) and
hasattr(handler, "__call__")):
return pickle.dumps(handler)
return None
def try_deserialize_handler(serialized_handler):
"""Reverse function of try_serialize_handler.
Args:
serialized_handler: serialized handler str or None.
Returns:
handler instance or None.
"""
if serialized_handler:
return pickle.loads(serialized_handler)
def is_generator(obj):
"""Return true if the object is generator or generator function.
Generator function objects provides same attributes as functions.
See isfunction.__doc__ for attributes listing.
Adapted from Python 2.6.
Args:
obj: an object to test.
Returns:
true if the object is generator function.
"""
if isinstance(obj, types.GeneratorType):
return True
CO_GENERATOR = 0x20
return bool(((inspect.isfunction(obj) or inspect.ismethod(obj)) and
obj.func_code.co_flags & CO_GENERATOR))
def get_short_name(fq_name):
"""Returns the last component of the name."""
return fq_name.split(".")[-1:][0]
def parse_bool(obj):
"""Return true if the object represents a truth value, false otherwise.
For bool and numeric objects, uses Python's built-in bool function. For
str objects, checks string against a list of possible truth values.
Args:
obj: object to determine boolean value of; expected
Returns:
Boolean value according to 5.1 of Python docs if object is not a str
object. For str objects, return True if str is in TRUTH_VALUE_SET
and False otherwise.
http://docs.python.org/library/stdtypes.html
"""
if type(obj) is str:
TRUTH_VALUE_SET = ["true", "1", "yes", "t", "on"]
return obj.lower() in TRUTH_VALUE_SET
else:
return bool(obj)
def create_datastore_write_config(mapreduce_spec):
"""Creates datastore config to use in write operations.
Args:
mapreduce_spec: current mapreduce specification as MapreduceSpec.
Returns:
an instance of datastore_rpc.Configuration to use for all write
operations in the mapreduce.
"""
force_writes = parse_bool(mapreduce_spec.params.get("force_writes", "false"))
if force_writes:
return datastore_rpc.Configuration(force_writes=force_writes)
else:
# dev server doesn't support force_writes.
return datastore_rpc.Configuration()
def _set_ndb_cache_policy():
"""Tell NDB to never cache anything in memcache or in-process.
This ensures that entities fetched from Datastore input_readers via NDB
will not bloat up the request memory size and Datastore Puts will avoid
doing calls to memcache. Without this you get soft memory limit exits,
which hurts overall throughput.
"""
ndb_ctx = ndb.get_context()
ndb_ctx.set_cache_policy(lambda key: False)
ndb_ctx.set_memcache_policy(lambda key: False)
def _obj_to_path(obj):
"""Returns the fully qualified path to the object.
Args:
obj: obj must be a new style top level class, or a top level function.
No inner function or static method.
Returns:
Fully qualified path to the object.
Raises:
TypeError: when argument obj has unsupported type.
ValueError: when obj can't be discovered on the top level.
"""
if obj is None:
return obj
if inspect.isclass(obj) or inspect.isfunction(obj):
fetched = getattr(sys.modules[obj.__module__], obj.__name__, None)
if fetched is None:
raise ValueError(
"Object %r must be defined on the top level of a module." % obj)
return "%s.%s" % (obj.__module__, obj.__name__)
raise TypeError("Unexpected type %s." % type(obj))
def strip_prefix_from_items(prefix, items):
"""Strips out the prefix from each of the items if it is present.
Args:
prefix: the string for that you wish to strip from the beginning of each
of the items.
items: a list of strings that may or may not contain the prefix you want
to strip out.
Returns:
items_no_prefix: a copy of the list of items (same order) without the
prefix (if present).
"""
items_no_prefix = []
for item in items:
if item.startswith(prefix):
items_no_prefix.append(item[len(prefix):])
else:
items_no_prefix.append(item)
return items_no_prefix | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_lag.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"TOO",
"MUU"
],
"DAY": [
"Jumap\u00ediri",
"Jumat\u00e1tu",
"Juma\u00edne",
"Jumat\u00e1ano",
"Alam\u00edisi",
"Ijum\u00e1a",
"Jumam\u00f3osi"
],
"MONTH": [
"K\u0289f\u00fangat\u0268",
"K\u0289naan\u0268",
"K\u0289keenda",
"Kwiikumi",
"Kwiinyamb\u00e1la",
"Kwiidwaata",
"K\u0289m\u0289\u0289nch\u0268",
"K\u0289v\u0268\u0268r\u0268",
"K\u0289saat\u0289",
"Kwiinyi",
"K\u0289saano",
"K\u0289sasat\u0289"
],
"SHORTDAY": [
"P\u00edili",
"T\u00e1atu",
"\u00cdne",
"T\u00e1ano",
"Alh",
"Ijm",
"M\u00f3osi"
],
"SHORTMONTH": [
"F\u00fangat\u0268",
"Naan\u0268",
"Keenda",
"Ik\u00fami",
"Inyambala",
"Idwaata",
"M\u0289\u0289nch\u0268",
"V\u0268\u0268r\u0268",
"Saat\u0289",
"Inyi",
"Saano",
"Sasat\u0289"
],
"fullDate": "EEEE, d MMMM y",
"longDate": "d MMMM y",
"medium": "d MMM y h:mm:ss a",
"mediumDate": "d MMM y",
"mediumTime": "h:mm:ss a",
"short": "dd/MM/y h:mm a",
"shortDate": "dd/MM/y",
"shortTime": "h:mm a"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "TSh",
"DECIMAL_SEP": ".",
"GROUP_SEP": ",",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "\u00a4\u00a0-",
"negSuf": "",
"posPre": "\u00a4\u00a0",
"posSuf": ""
}
]
},
"id": "lag",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/FeLS-1.4.0-py3-none-any.whl/fels/sentinel2.py | from __future__ import absolute_import, division, print_function
import csv
import datetime
import os
import sys
import shutil
import glob
import numpy as np
import xml.etree.ElementTree as ET
from tempfile import NamedTemporaryFile
try:
from urllib2 import urlopen
from urllib2 import HTTPError
from urllib2 import URLError
except ImportError:
from urllib.request import urlopen, HTTPError, URLError
try:
from osgeo import gdal
except ImportError:
raise ImportError("""Could not find the GDAL/OGR Python library bindings. Using conda \
(recommended) use: conda config --add channels conda-forge && conda install gdal""")
from fels.utils import *
def query_sentinel2_catalogue(collection_file, cc_limit, date_start, date_end, tile, latest=False):
"""Query the Sentinel-2 index catalogue and retrieve urls for the best images found."""
print("Searching for Sentinel-2 images in catalog...")
cc_values = []
all_urls = []
all_acqdates = []
with open(collection_file) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
year_acq = int(row['SENSING_TIME'][0:4])
month_acq = int(row['SENSING_TIME'][5:7])
day_acq = int(row['SENSING_TIME'][8:10])
acqdate = datetime.datetime(year_acq, month_acq, day_acq)
if row['MGRS_TILE'] == tile and float(row['CLOUD_COVER']) <= cc_limit \
and date_start < acqdate < date_end:
all_urls.append(row['BASE_URL'])
cc_values.append(float(row['CLOUD_COVER']))
all_acqdates.append(acqdate)
if latest and all_urls:
return [sort_url_list(cc_values, all_acqdates, all_urls).pop()]
return sort_url_list(cc_values, all_acqdates, all_urls)
def get_sentinel2_image(url, outputdir, overwrite=False, partial=False, noinspire=False, reject_old=False):
"""
Collect the entire dir structure of the image files from the
manifest.safe file and build the same structure in the output
location.
Returns:
True if image was downloaded
False if partial=False and image was not fully downloaded
or if reject_old=True and it is old-format
or if noinspire=False and INSPIRE file is missing
"""
img = os.path.basename(url)
target_path = os.path.join(outputdir, img)
target_manifest = os.path.join(target_path, "manifest.safe")
return_status = True
if not os.path.exists(target_path) or overwrite:
manifest_url = url + "/manifest.safe"
if reject_old:
# check contents of manifest before downloading the rest
content = urlopen(manifest_url)
with NamedTemporaryFile() as f:
shutil.copyfileobj(content, f)
if not is_new(f.name):
return False
os.makedirs(target_path, exist_ok=True)
content = urlopen(manifest_url)
with open(target_manifest, 'wb') as f:
shutil.copyfileobj(content, f)
with open(target_manifest, 'r') as manifest_file:
manifest_lines = manifest_file.read().split()
for line in manifest_lines:
if 'href' in line:
rel_path = line[line.find('href=".')+7:]
rel_path = rel_path[:rel_path.find('"')]
abs_path = os.path.join(target_path, *rel_path.split('/')[1:])
if not os.path.exists(os.path.dirname(abs_path)):
os.makedirs(os.path.dirname(abs_path))
try:
download_file(url + rel_path, abs_path)
except HTTPError as error:
print("Error downloading {} [{}]".format(url + rel_path, error))
continue
granule = os.path.dirname(os.path.dirname(get_S2_image_bands(target_path, "B01")))
for extra_dir in ("AUX_DATA", "HTML"):
if not os.path.exists(os.path.join(target_path, extra_dir)):
os.makedirs(os.path.join(target_path, extra_dir))
if not os.path.exists(os.path.join(granule, extra_dir)):
os.makedirs(os.path.join(granule, extra_dir))
if not manifest_lines:
print()
elif reject_old and not is_new(target_manifest):
print(f'Warning: old-format image {outputdir} exists')
return_status = False
if partial:
tile_chk = check_full_tile(get_S2_image_bands(target_path, "B01"))
if tile_chk == 'Partial':
print("Removing partial tile image files...")
shutil.rmtree(target_path)
return_status = False
if not noinspire:
inspire_file = os.path.join(target_path, "INSPIRE.xml")
if os.path.isfile(inspire_file):
inspire_path = get_S2_INSPIRE_title(inspire_file)
if os.path.basename(target_path) != inspire_path:
os.rename(target_path, inspire_path)
else:
print(f"File {inspire_file} could not be found.")
return_status = False
return return_status
def get_S2_image_bands(image_path, band):
image_name = os.path.basename(image_path)
tile = image_name.split("_")[5]
list_dirs = os.listdir(os.path.join(image_path, 'GRANULE'))
match = [x for x in list_dirs if x.find(tile) > 0][0]
list_files = os.path.join(image_path, 'GRANULE', match, 'IMG_DATA')
files = glob.glob(list_files + "/*.jp2")
match_band = [x for x in files if x.find(band) > 0][0]
return match_band
def get_S2_INSPIRE_title(image_inspire_xml):
tree = ET.parse(image_inspire_xml)
chartstring_element = tree.findall(
".//{http://www.isotc211.org/2005/gmd}identificationInfo/{http://www.isotc211.org/2005/gmd}MD_DataIdentification/{http://www.isotc211.org/2005/gmd}citation/{http://www.isotc211.org/2005/gmd}CI_Citation/{http://www.isotc211.org/2005/gmd}title/{http://www.isotc211.org/2005/gco}CharacterString")
s2_file_inspire_title = chartstring_element[0].text
return s2_file_inspire_title
def check_full_tile(image):
gdalData = gdal.Open(image)
if gdalData is None:
sys.exit("ERROR: can't open raster")
# get width and heights of the raster
xsize = gdalData.RasterXSize
ysize = gdalData.RasterYSize
# process the raster
band_i = gdalData.GetRasterBand(1)
raster = band_i.ReadAsArray()
# create dictionary for unique values count
count = {}
# count unique values for the given band
for col in range(xsize):
for row in range(ysize):
cell_value = raster[row, col]
# check if cell_value is NaN
if cell_value == 0:
# add cell_value to dictionary
if cell_value in count:
count[cell_value] += 1
else:
count[cell_value] = 1
break
for key in sorted(count.keys()):
if count[key] is not None:
return "Partial"
def is_new(safedir_or_manifest):
'''
Check if a S2 scene is in the new (after Nov 2016) format.
If the scene is already downloaded, the safedir directory structure can be crawled to determine this.
If not, download the manifest.safe first for an equivalent check.
Example:
>>> safedir = 'S2A_MSIL1C_20160106T021717_N0201_R103_T52SDG_20160106T094733.SAFE/'
>>> manifest = os.path.join(safedir, 'manifest.safe')
>>> assert is_new(safedir) == False
>>> assert is_new(manifest) == False
'''
if os.path.isdir(safedir_or_manifest):
safedir = safedir_or_manifest
# if this file does not have the standard name (len==0), the scene is old format.
# if it is duplicated (len>1), there are multiple granuledirs and we don't want that.
return len(glob.glob(os.path.join(safedir, 'GRANULE', '*', 'MTD_TL.xml'))) == 1
elif os.path.isfile(safedir_or_manifest):
manifest = safedir_or_manifest
with open(manifest, 'r') as f:
lines = f.read().split()
return len([l for l in lines if 'MTD_TL.xml' in l]) == 1
else:
raise ValueError(f'{safedir_or_manifest} is not a safedir or manifest')
def _dedupe(safedirs, to_return=None):
'''
Remove old-format scenes from a list of Google Cloud S2 safedirs
WARNING: this heuristic is usually, but not always, true.
Therefore, it is deprecated in favor of is_new, which requires parsing the actual content of the image.
A failure case:
https://console.cloud.google.com/storage/browser/gcp-public-data-sentinel-2/tiles/52/S/DG/S2A_MSIL1C_20160106T021702_N0201_R103_T52SDG_20160106T021659.SAFE
https://console.cloud.google.com/storage/browser/gcp-public-data-sentinel-2/tiles/52/S/DG/S2A_MSIL1C_20160106T021717_N0201_R103_T52SDG_20160106T094733.SAFE
These are the same scene. The first link is new-format. They *should* have the same sensing time, but the second one is offset by 15 ms for unknown reasons.
Args:
to_return: a list of other products (eg urls) indexed to safedirs.
if provided, dedupe this as well.
'''
_safedirs = np.array(sorted(safedirs))
datetimes = [safedir_to_datetime(s) for s in _safedirs]
prods = [safedir_to_datetime(s, product=True) for s in _safedirs]
# first sorted occurrence should be the earliest product discriminator
_, idxs = np.unique(datetimes, return_index=True)
if to_return is None:
return _safedirs[idxs]
else:
return _safedirs[idxs], np.array(sorted(to_return))[idxs]
def safedir_to_datetime(string, product=False):
'''
Example:
>>> from datetime import datetime
>>> s = 'S2B_MSIL1C_20181010T021649_N0206_R003_T52SDG_20181010T064007.SAFE'
>>> dt = safedir_to_datetime(s)
>>> assert dt == datetime(2018, 10, 10, 2, 16, 49)
References:
https://sentinel.esa.int/web/sentinel/user-guides/sentinel-2-msi/naming-convention
'''
if not product:
dt_str = string.split('_')[2] # this is the "datatake sensing time"
else:
dt_str = string.split('_')[6].strip(
'.SAFE') # this is the "Product Discriminator"
d_str, t_str = dt_str.split('T')
d = list(map(int, [d_str[:4], d_str[4:6], d_str[6:]]))
t = list(map(int, [t_str[:2], t_str[2:4], t_str[4:]]))
return datetime.datetime(*d, *t) | PypiClean |
/DeepSpectrumLite-1.0.2.tar.gz/DeepSpectrumLite-1.0.2/src/deepspectrumlite/lib/hyperparameter.py | import itertools
import json
from tensorboard.plugins.hparams import api as hp
class HyperParameterList:
def __init__(self, config_file_name: str):
f = open(config_file_name)
data = json.load(f)
f.close()
self._config = data
self._param_list = {}
self.load_configuration()
def get_hparams(self):
hparams = []
for key in self._config:
hparams.append(self._param_list[key])
return hparams
def load_configuration(self):
self._param_list = {}
for key in self._config:
self._param_list[key] = hp.HParam(key, hp.Discrete(self._config[key]))
def get_max_iteration(self):
count = 1
for key in self._config:
count = count * len(self._config[key])
return count
def get_values_tensorboard(self, iteration_no: int):
if iteration_no >= self.get_max_iteration():
raise ValueError(str(self.get_max_iteration()) + ' < iteration_no >= 0')
configuration_space = []
for key in self._config:
configurations = []
for v in self._config[key]:
configurations.append({key: v})
configuration_space.append(configurations)
perturbations = list(itertools.product(*configuration_space))
perturbation = perturbations[iteration_no]
hparams = {}
for param in perturbation:
for key in param:
k = self._param_list[key]
hparams[k] = param[key]
return hparams
def get_values(self, iteration_no: int):
if iteration_no >= self.get_max_iteration():
raise ValueError(str(self.get_max_iteration()) + ' < iteration_no >= 0')
configuration_space = []
for key in self._config:
configurations = []
for v in self._config[key]:
configurations.append({key: v})
configuration_space.append(configurations)
perturbations = list(itertools.product(*configuration_space))
perturbation = perturbations[iteration_no]
hparams = {}
for param in perturbation:
for key in param:
hparams[key] = param[key]
return hparams | PypiClean |
/HDF5eis-0.1.1rc0.tar.gz/HDF5eis-0.1.1rc0/hdf5eis/core.py | import pathlib
import re
import sys
import warnings
# Third party imports
import h5py
import numpy as np
import pandas as pd
# Local imports
from ._version import __version__
from . import exceptions
from . import gather as gm
NULL_UTF8_FORMAT = 'NULL'
NULL_TABLE_FORMAT = 'NULL'
STRING_DTYPE = h5py.string_dtype(encoding='utf-8')
TS_INDEX_COLUMNS = ['tag', 'start_time', 'end_time', 'sampling_rate', 'npts']
TS_INDEX_DTYPES = {
'tag': STRING_DTYPE,
'start_time': pd.api.types.DatetimeTZDtype(tz='UTC'),
'end_time': pd.api.types.DatetimeTZDtype(tz='UTC'),
'sampling_rate': np.float32,
'npts': np.int64,
}
COMPATIBLE_VERSIONS = [
'0.1.0',
'0.1.1pre'
]
if sys.platform in ('darwin', 'linux'):
UTF8_DECODER = np.vectorize(lambda x: x.decode('UTF-8'))
elif sys.platform == 'win32':
UTF8_DECODER = lambda x: x
else:
raise NotImplementedError
class File(h5py.File):
'''
An h5py.File subclass for convenient I/O of big, multidimensional
timeseries data from environmental sensors. This class provides
the core functionality for building and processing HDF5eis files.
'''
def __init__(self, *args, overwrite=False, validate=True, **kwargs):
'''
Initialize hdf5eis.File object.
Parameters
----------
*args :
These are passed directly to the super class initializer.
Must contain a file path (str or bytes) or a file-like
object.
overwrite : bool, optional
Whether or not to overwrite an existing file if one exists
at the given location. The default is False.
validate : bool, optional
Whether or not to validate the file structure. This may be
slow for very large files and can be turned off. The default
is True.
**kwargs :
These are passed directly to super class initializer.
Raises
------
ValueError if mode='w' and file already exists.
Returns
-------
None.
'''
if 'mode' not in kwargs:
kwargs['mode'] = 'r'
if (
kwargs['mode'] == 'w'
and overwrite is False
and pathlib.Path(args[0]).exists()
):
raise (
ValueError(
'File already exists! If you are sure you want to open it'
' in write mode, use `overwrite=True`.'
)
)
super().__init__(*args, **kwargs)
self._metadata = AuxiliaryAccessor(self, '/metadata')
self._products = AuxiliaryAccessor(self, '/products')
self._timeseries = TimeseriesAccessor(self, '/timeseries')
if validate is True:
self.validate()
@property
def metadata(self):
'''
Provides functionality to manipulate the '/metadata'
group.
Returns
-------
hdf5eis.AuxiliaryAccessor
Provides functionality to manipulate the '/metadata'
group.
'''
return self._metadata
@property
def products(self):
'''
Provides functionality to manipulate the '/products'
group.
Returns
-------
hdf5eis.AuxiliaryAccessor
Provides functionality to manipulate the '/products'
group.
'''
return self._products
@property
def timeseries(self):
'''
Provides functionality to manipulate the '/timeseries'
group.
Returns
-------
hdf5eis.TimeseriesAccessor
Provides functionality to manipulate the '/timeseries'
group.
'''
return self._timeseries
@property
def version(self):
'''
Return the schema version of this file.
Returns
-------
None.
'''
return self.attrs['__VERSION']
def validate(self):
'''
Validate file structure and raise error
'''
self._validate_version()
self._validate_keys()
self._validate_accessors()
def _validate_keys(self):
'''
Check that the file only contains /metadata, /products, and
/timeseries keys.
Raises
------
KeyError
Raises KeyError if an Group other than /metadata, /products,
or /timeseries is found in file.
Returns
-------
None.
'''
for key in self:
if key not in ('metadata', 'products', 'timeseries'):
raise KeyError(f'Invalid Group(={key}) found in file.')
def _validate_accessors(self):
'''
Validate /metadata and /products Groups.
Returns
-------
None.
'''
for accessor in (self.metadata, self.products, self.timeseries):
accessor.validate()
def _validate_version(self):
'''
Check that the version of this file is compatible against
the current code base.
Raises
------
VersionError
Raises VersionError if the file version is incompatible
with the current code version.
Returns
-------
None.
'''
# If __VERSION is not set, try to set it.
if '__VERSION' not in self.attrs:
self.attrs['__VERSION'] = __version__
if self.attrs['__VERSION'] not in COMPATIBLE_VERSIONS:
version = self.attrs['__VERSION']
raise exceptions.VersionError(
f'File version(={version}) is not compatible with code '
f'version(={__version__}).\n'
f'Compatible versions: {", ".join(COMPATIBLE_VERSIONS)}.'
)
class AccessorBase:
'''
Abstract base class of Accessor classes.
'''
def __init__(self, parent, root):
'''
Initializer.
Parameters
----------
parent : h5py.File or h5py.Group
The parent of the group this accessor provides access to.
root : str
The name of group nested within parent this accessor
provides access to.
Returns
-------
None.
'''
self._parent = parent
self._root = self._parent.require_group(root)
@property
def parent(self):
'''
The parent of the group to which this accessor provides access.
Returns
-------
h5py.File or h5py.Group
The parent of the group to which this accessor provides
access.
'''
return self._parent
@property
def root(self):
'''
The group to which this accessor provides access.
Returns
-------
h5py.Group
The group to which this accessor provides access.
'''
return self._root
def add_table(self, dataf, key, fmt=NULL_TABLE_FORMAT):
'''
Add DataFrame `dataf` to root group under `key`.
Parameters
----------
dataf : pandas.DataFrame
DataFrame to add to root group.
key : str
Key value under which to add table.
Returns
-------
None.
'''
self.root.create_group(key)
self.write_table(dataf, key, fmt=fmt)
def list_tables(self):
'''
Return a list of all tables below root.
Returns
-------
names : list
A list of names of tables beneath root.
'''
names = list()
def is_table(name, obj):
if '__TYPE' in obj.attrs and obj.attrs['__TYPE'] == 'TABLE':
names.append(name)
self.root.visititems(is_table)
return names
def read_table(self, key):
'''
Read data table stored in root group under key.
Parameters
----------
key : str
Key of data table in root group to read.
Returns
-------
dataf : pandas.DataFrame
The table data stored under key in root group.
'''
group = self.root[key]
dataf = pd.DataFrame(index=group['__INDEX'][:])
for column in filter(lambda key: key != '__INDEX', group.keys()):
dataf[column] = group[column][:]
if group[column].attrs['__IS_UTC_DATETIME64'] is np.bool_(True):
dataf[column] = pd.to_datetime(dataf[column], utc=True)
elif group[column].attrs['__IS_UTF8'] is np.bool_(True):
dataf[column] = UTF8_DECODER(dataf[column])
fmt = group.attrs['__FORMAT']
return dataf, fmt
def validate(self):
self.validate_tables()
def validate_tables(self):
for name in self.list_tables():
_validate_table(self.root[name])
def write_table(self, dataf, key, fmt=NULL_TABLE_FORMAT):
'''
Write data table to the parent group under key.
Parameters
----------
dataf : pandas.DataFrame
DataFrame to write to disk.
key : str
Key under which to write data.
Returns
-------
None.
'''
group = self.root[key]
group.attrs['__TYPE'] = 'TABLE'
group.attrs['__FORMAT'] = NULL_TABLE_FORMAT
for column in dataf.columns:
self.write_column(dataf[column], key)
if '__INDEX' in self.root[key]:
del self.root[f'{key}/__INDEX']
self.root[key].create_dataset('__INDEX', data=dataf.index.values)
def write_column(self, column, key):
'''
Write a single column of data to the self.root Group of
self.parent.
Parameters
----------
column : pandas.Series
Column of data to write.
key : str
Key value under which to store data.
Returns
-------
None.
'''
is_utc_datetime64 = pd.api.types.is_datetime64_any_dtype(column)
if (
(
hasattr(column.dtype, 'char')
and
column.dtype.char == np.dtype('S')
)
or
column.dtype == np.dtype('O')
):
is_utf8 = True
else:
is_utf8 = False
if is_utc_datetime64:
if column.dt.tz is None:
warnings.warn(f'Time zone of \'{column}\' is not set. Assuming UTC.')
column = column.dt.tz_localize('UTC')
column = column.dt.tz_convert('UTC')
column = column.astype(np.int64)
if f'{key}/{column.name}' in self.root:
del self.root[f'{key}/{column.name}']
values = column.values
datas = self.root[key].create_dataset(
column.name,
data=values.astype(STRING_DTYPE) if is_utf8 else values
)
datas.attrs['__IS_UTC_DATETIME64'] = is_utc_datetime64
datas.attrs['__IS_UTF8'] = is_utf8
class HDF5eisFileFormatError(Exception):
'''
An Exception indicating that the current file is improperly
formatted.
'''
class AuxiliaryAccessor(AccessorBase):
'''
Accessor class for auxiliary (i.e., /metadata and /products groups)
data.
'''
def __getitem__(self, key):
'''
Read the item under `key` from this group.
'''
dtype = self.root[key].attrs['__TYPE']
if dtype == 'TABLE':
return self.read_table(key)
if dtype == 'UTF-8':
return UTF8_DECODER(self.root[key][0]), self.root[key].attrs['__FORMAT']
raise HDF5eisFileFormatError(f'Unknown data type {dtype} for key {key}.')
def add(self, obj, key, fmt=None):
'''
Add a table or UTF-8 encoded byte stream to the root group.
Parameters
----------
obj : pandas.DataFrame, str, or bytes
DataFrame, string or stream of UTF-8 encoded bytes to add
to root group under key.
key : str
Key under which to add the data object.
Returns
-------
None.
'''
if isinstance(obj, pd.DataFrame):
fmt = fmt if fmt is not None else NULL_TABLE_FORMAT
self.add_table(obj, key, fmt=fmt)
elif isinstance(obj, (str, bytes)):
fmt = fmt if fmt is not None else NULL_UTF8_FORMAT
self.add_utf8(obj, key, fmt=fmt)
def add_utf8(self, data, key, fmt=NULL_UTF8_FORMAT):
'''
Add UTF-8 encoded data to root group under key.
Parameters
----------
data : str or bytes
Data to add to root group under key.
key : str
Key under which to add the data object.
Returns
-------
None.
'''
self.root.create_dataset(
key, data=[data.encode('utf-8')], dtype=STRING_DTYPE
)
self.root[key].attrs['__TYPE'] = 'UTF-8'
self.root[key].attrs['__FORMAT'] = fmt
def link(self, src_file, src_path, key):
'''
Create and external link a data table or UTF-8 encoded byte
stream in the root group under key.
Parameters
----------
src_file : path-like
Path to source file containing data to be externally
linked to root group.
src_path : str
Path within source file to dataset or group to be linked.
key : str
Key within the root group under which to link external
data.
Returns
-------
None.
'''
self.root[key] = h5py.ExternalLink(src_file, src_path)
def list_utf8(self):
'''
Return a list of all UTF-8 encoded strings below root.
Returns
-------
names : list
A list of names of UTF-8 encoded strings beneath root.
'''
names = list()
def is_utf8(name, obj):
if '__TYPE' in obj.attrs and obj.attrs['__TYPE'] == 'UTF-8':
names.append(name)
self.root.visititems(is_utf8)
return names
def validate(self):
self.validate_tables()
self.validate_utf8()
def validate_utf8(self):
for name in self.list_utf8():
dtype = self.root[name].dtype
if dtype != STRING_DTYPE:
raise(exceptions.UTF8FormatError(
f'UTF-8 encoded string has the wrong dtype(={dtype}).'
))
if not '__FORMAT' in self.root[name].attrs:
raise(exceptions.UTF8FormatError(
'UTF-8 encoded string has no __FORMAT specified.'
))
class TimeseriesAccessor(AccessorBase):
'''
Accessor class for timeseries data.
'''
@property
def index(self):
'''
Tabular index of contents in /timeseries group.
Returns
-------
pandas.DataFrame
Index of contents in /timeseries group.
'''
if not hasattr(self, '_index'):
if '__TS_INDEX' in self.root:
self._index, fmt = self.read_table('__TS_INDEX')
else:
self._index = pd.DataFrame(columns=TS_INDEX_COLUMNS)
self._index = self._index.astype(TS_INDEX_DTYPES)
return self._index
@index.setter
def index(self, value):
self._index = value
def add(self, data, start_time, sampling_rate, tag='', **kwargs):
'''
Add timeseries data to the parent HDF5eis file.
Parameters
----------
data : array-like
Data array of any shape to add to file.
start_time : str, int, float, or pandas.Timestamp
The UTC time of the first sample in data. This value is
internally converted to a pandas.Timestamp by
pandas.to_datetime().
sampling_rate : int, float
The temporal sampling rate of data in units of samples per
second.
tag : str, optional
Tag to associate with data. The default is ''.
**kwargs :
Additional keyword arguments are passed directly the
h5py.Group.create_datset() method and can be used, for
example, to choose the chunk layout and compression options.
Returns
-------
None.
'''
if 'dtype' not in kwargs:
kwargs['dtype'] = data.dtype
datas = self.create_dataset(
data.shape,
start_time,
sampling_rate,
tag=tag,
**kwargs,
)
datas[:] = data
def create_dataset(
self, shape, start_time, sampling_rate, tag='', **kwargs
):
'''
Create and return an empty data set.
Parameters
----------
shape : tuple
The shape of the empty data set.
start_time : str, int, float, or pandas.Timestamp
The UTC time of the first sample in data. This value is
internally converted to a pandas.Timestamp by
pandas.to_datetime().
sampling_rate : int, float
The temporal sampling rate of data in units of samples per
second.
tag : str, optional
Tag to associate with data. The default is ''.
**kwargs : TYPE
Additional keyword arguments are passed directly the
h5py.Group.create_datset() method and can be used, for
example, to choose the chunk layout and compression options.
Returns
-------
TYPE
DESCRIPTION.
'''
sampling_interval = pd.to_timedelta(1 / sampling_rate, unit='S')
nsamples = shape[-1]
start_time = pd.to_datetime(start_time)
end_time = start_time + sampling_interval * (nsamples - 1)
handle = build_handle(tag, start_time, end_time)
datas = self.root.create_dataset(handle, shape=shape, **kwargs)
datas.attrs['sampling_rate'] = sampling_rate
row = pd.DataFrame(
[[tag, start_time, end_time, sampling_rate, shape[-1]]],
columns=TS_INDEX_COLUMNS,
)
self.index = pd.concat([self.index, row], ignore_index=True)
self.flush_index()
return self.root[handle]
def flush_index(self):
'''
Flush the self.index attribute to disk.
Returns
-------
None.
'''
if '__TS_INDEX' not in self.root:
self.add_table(
self.index.astype(TS_INDEX_DTYPES),
'__TS_INDEX',
fmt='TIMESERIES_INDEX'
)
else:
self.write_table(
self.index.astype(TS_INDEX_DTYPES),
'__TS_INDEX',
fmt='TIMESERIES_INDEX'
)
def link_tag(
self,
src_file,
src_tag,
prefix=None,
suffix=None,
new_tag=None
):
'''
Links timeseries data from an external file to the current file.
Parameters
----------
src_file : str or pathlib.Path
Path to external file to be linked.
src_tag : str
Tag in external file to be linked.
prefix : str, optional
Prefix for new tag. The default is None.
suffix : str, optional
Suffix for new tag. The default is None.
new_tag : str, optional
New tag. The default is None.
Returns
-------
None.
'''
assert prefix is None or isinstance(prefix, str)
assert suffix is None or isinstance(suffix, str)
assert new_tag is None or isinstance(new_tag, str)
if new_tag is None:
new_tag = '' if prefix is None else prefix
new_tag = '/'.join((new_tag, src_tag))
new_tag = new_tag if suffix is None else '/'.join((new_tag, suffix))
new_tag = new_tag.lstrip('/')
new_index = list()
with h5py.File(src_file, mode='r') as file:
accessor = TimeseriesAccessor(file, '/timeseries')
index = accessor.index
index = index[index['tag'].str.match(src_tag)]
for _, row in index.iterrows():
src_handle = '/'.join((
'/timeseries',
build_handle(
row['tag'],
row['start_time'],
row['end_time']
),
))
new_handle = '/'.join(
(new_tag, src_handle.rsplit('/', maxsplit=1)[-1])
)
self.root[new_handle] = h5py.ExternalLink(src_file, src_handle)
row['tag'] = '/'.join(
(new_tag, row['tag'].lstrip(src_tag))
).strip('/')
new_index.append(row)
self.index = pd.concat(
[self.index, pd.DataFrame(new_index)],
ignore_index=True
)
self.flush_index()
def __getitem__(self, key):
'''
Return a list of hdf5eis.gather.Gather objects.
The first element of `key` must be a `str` tag. The last
element must be a time slice.
Assumes data are regularly sampled between requested start and end times.
'''
assert isinstance(key[0], str)
assert key[-1].start is not None and key[-1].stop is not None
# key is a tuple
tag = key[0]
start_time = pd.to_datetime(key[-1].start, utc=True)
end_time = pd.to_datetime(key[-1].stop, utc=True)
index = self.reduce_index(tag, start_time, end_time)
# Read data for each tag.
gathers = dict()
for tag in index['tag'].unique():
gathers[tag] = list()
gathers[tag] = self.read_data(
index[index['tag'] == tag],
start_time,
end_time,
key
)
return gathers
def read_data(self, index, start_time, end_time, key):
'''
Read data between start and end time with respect to the given
index and key.
Parameters
----------
index : pandas.DataFrame
A subset of self.index corresponding to the requested data.
This DataFrame is the return value of self.reduce_index().
Every continuous block of data associated with a single tag
needs to have a unique segment ID. The index of this
DataFrame must be the segment ID.
start_time : pandas.Timestamp
The time of the earliest sample requested.
end_time : pandas.Timestamp
The time of the latest sample requested.
key : tuple
A tuple of slice indexes with which to slice the data along
each axis except the last (time) axis.
Returns
-------
gathers : list
A list of hdf5eis.Gather objects containing requested data.
'''
gathers = list()
for segment_id in index.index.unique():
gathers.append(
self.read_segment(
index.loc[[segment_id]],
start_time,
end_time,
key
)
)
return gathers
def read_segment(self, rows, start_time, end_time, key):
'''
Read the continuous segment of data corresponding to the given
set of index rows and between given start and end times.
Parameters
----------
rows : pandas.DataFrame
A set of index rows for a continuous segment of data. These
should be the rows corresponding to a single segment ID as
determined by self.reduce_index().
start_time : pandas.Timestamp
Time of the first requested sample.
end_time : pandas.Timestamp
Time of the last requested sample.
key : tuple
Slice index with which to slice the data being read. These
slices will be applied along every storage axis except the
last (time) axis.
Returns
-------
hdf5eis.Gather
An hdf5eis.Gather object containing the requested continuous
segment of data.
'''
# Make sure the sampling rate doesn't change mid stream.
assert len(rows['sampling_rate'].unique()) == 1
nsamples, first_sample = determine_segment_sample_range(
rows,
start_time,
end_time
)
sampling_rate = rows.iloc[0]['sampling_rate']
data, jstart = None, 0
for _, row in rows.iterrows():
sample_range = (
_sample_idx(
start_time,
row['start_time'],
row['sampling_rate']
),
(
_sample_idx(
min(end_time, row['end_time']),
row['start_time'],
row['sampling_rate'],
right=True
)
+ 1
)
)
handle_key = (
build_handle(
row['tag'],
row['start_time'],
row['end_time']
),
(*key[1:-1], slice(*sample_range))
)
assert (
len(handle_key[1]) == self.root[handle_key[0]].ndim
or
Ellipsis in handle_key[1]
)
if data is None:
shape = (
*get_shape(
self.root[handle_key[0]].shape[:-1],
handle_key[1][:-1]
),
nsamples,
)
data = np.empty(shape, dtype=self.root[handle_key[0]].dtype)
data[
...,
jstart: jstart+sample_range[1]-sample_range[0]
] = self.root[handle_key[0]][handle_key[1]]
jstart += sample_range[1]-sample_range[0]
return gm.Gather(
data,
first_sample,
sampling_rate
)
def reduce_index(self, tag, start_time, end_time):
'''
Reduce the index to the set of rows referring to data between
the given start and endtimes and matching the given tag.
Parameters
----------
tag : str
Tag of requested data. Regular expressions are valid here.
start_time : pandas.Timestamp
The start time of the requested data.
end_time : pandas.Timestamp
The end time of the requested data.
Returns
-------
index : pandas.DataFrame
The set of rows from self.index matching the given tag and
time range.
'''
index = self.index
if index is None:
warnings.warn('The timeseries index is empty.')
return None
# Find matching tags.
index = index[index['tag'].str.fullmatch(tag)]
# Find datasets within requested time range.
index = index[
(index['start_time'] < end_time)
&(index['end_time'] > start_time)
]
if len(index) == 0:
warnings.warn('No data found for specified tag and time range.')
return None
# Sort values by time.
index = index.sort_values('start_time')
sampling_interval = pd.to_timedelta(
1 / index['sampling_rate'],
unit='S'
)
delta = index['start_time'] - index.shift(1)['end_time']
index['segment_id'] = (delta != sampling_interval).cumsum()
index = index.set_index('segment_id')
return index
def validate(self):
self.validate_tables()
self._validate_ts_index()
def _validate_ts_index(self):
'''
Validate the __TS_INDEX table.
Raises
------
exceptions.TSIndexError
Raises TSIndexError if their is a mismatch between
the __TS_INDEX table and timeseries data.
Returns
-------
None.
'''
for _, row in self.index.iterrows():
handle = build_handle(row['tag'], row['start_time'], row['end_time'])
if handle not in self.root:
raise exceptions.TSIndexError(
f'Expected DataSet at {handle} not found.'
)
elif self.root[handle].shape[-1] != row['npts']:
raise exceptions.TSIndexError(
f'Unexpected number of samples in DataSet at {handle}.'
)
def determine_segment_sample_range(rows, start_time, end_time):
'''
Determine the number of samples and time of the first sample
for data available in given rows between given start and times.
Parameters
----------
rows : pandas.DataFrame
Index rows corresponding to a single continuous data segment.
This should be a subset of index rows returned by
hdf5eis.TimeseriesAccessor.reduce_rows() with a single, unique
segment ID.
start_time : pandas.Timestamp
The time of the first requested sample.
end_time : pandas.Timestamp
The time of the last requested sample.
Returns
-------
nsamples : int
The number of available samples in the specified time range.
first_sample : pandas.Timestamp
The time of the first available sample in the specified time
range.
|-----|-----|-----|-----|-----|-----|
A B
|-----X-----X-----X-----X-----X-----|
Samples X are returned for start_time=A, end_time=B.
'''
sampling_rate = rows.iloc[0]['sampling_rate']
data_start_time = rows.iloc[0]['start_time']
data_end_time = rows.iloc[-1]['end_time']
istart = _sample_idx(start_time, data_start_time, sampling_rate)
iend = (
_sample_idx(
min(end_time, data_end_time),
data_start_time,
sampling_rate,
right=True
)
+ 1
)
nsamples = iend - istart
offset = pd.to_timedelta(istart / sampling_rate, unit='S')
first_sample = data_start_time + offset
return (nsamples, first_sample)
def get_shape(shape, key):
'''
Determine the shape of a sliced array.
Parameters
----------
shape : tuple
The shape of the array before being sliced.
key : tuple
The slice indices.
Returns
-------
tuple
The shape of the sliced array.
'''
new_shape = tuple()
try:
imax = key.index(Ellipsis)
except ValueError:
imax = len(shape)
new_shape = tuple((get_slice_length(key[i], shape[i]) for i in range(imax)))
if imax < len(shape):
new_shape += shape[imax : imax + len(shape) - len(key) + 1]
new_shape += tuple((
get_slice_length(key[i], shape[i])
for i in range(len(key) - 1, imax, -1)
))
return tuple(filter(lambda k: k != 0, new_shape))
def get_slice_length(obj, max_len):
'''
Determine the length of a slice along a single axis.
Parameters
----------
obj : slice, int
The slice index.
max_len : int
The maximum possible length of the slice. The length of the
axis.
Raises
------
ValueError
Raises ValueError if the slice index is not a slice or int object.
Returns
-------
int
The length of the slice.
'''
if isinstance(obj, slice):
if obj == slice(None):
return max_len
istart = 0 if obj.start is None else obj.start
iend = max_len if obj.stop is None else obj.stop
return iend - istart
if isinstance(obj, int):
return 0
if isinstance(obj, (list, tuple, np.ndarray)):
return len(obj)
raise ValueError
def build_handle(tag, start_time, end_time):
'''
Build a properly formatted address to the data referred to by
tag, start_time, and end_time.
Parameters
----------
tag : str
The tag associated with the data.
start_time : pandas.Timestamp
The time of the first sample in the array for which the handle
is being built.
end_time : pandas.Timestamp
The time of the last sample in the array for which the handle
is being built.
Returns
-------
handle : str
The address of the data relative to the accessor root.
'''
tstart = strftime(start_time)
tend = strftime(end_time)
handle = '/'.join((tag, f'__{tstart}__{tend}'))
handle = re.sub('//+', '/', handle)
handle = handle.lstrip('/')
return handle
def _sample_idx(time, start_time, sampling_rate, right=False):
'''
Get the index of a sample at a given time, relative to start_time.
Parameters
----------
time : pandas.Timestamp
Time of sample to index.
start_time : pandas.Timestamp
Time of first sample in the array being indexed.
sampling_rate : int, float
Sampling rate (in units of samples per second) of array being
indexed.
right : bool, optional
Return the index of the sample to the immediate right of the
provided time if it falls in between samples. The default is
False.
Returns
-------
int
The index of the sample corresponding to the provided time.
'''
time = pd.to_datetime(time, utc=True)
delta = (time - start_time).total_seconds()
if delta < 0:
return 0
if right is True:
idx = int(np.ceil(delta * sampling_rate))
else:
idx = int(np.floor(delta * sampling_rate))
return idx
def _get_time_fields(dataf):
'''
Return list of column names with datetime-like dtype.
Parameters
----------
dataf : pandas.DataFrame
DataFrame from which to extract names of columns with
datetime-like dtype.
Returns
-------
list
List of column names with datetime-like dtype.
'''
is_datetime = pd.api.types.is_datetime64_any_dtype
return list(filter(lambda key: is_datetime(dataf[key]), dataf.columns))
def _validate_table(group):
'''
Validate table structure.
Parameters
----------
group : h5py.Group
The parent h5py.Group of the table to validate.
Raises
------
exceptions.TableFormatError
Raises TableFormatError if any format errors are detected.
Returns
-------
None.
'''
# Validate column shapes.
lengths = list()
columns = list(group.keys())
for column in columns:
shape = group[column].shape
if len(shape) != 1:
raise exceptions.TableFormatError(
f'Columns must have shape=(N,) but shape={shape} found for '
f'\'{column}\' column in \'{group.name}\' table.'
)
lengths.append(shape[-1])
if len(np.unique(lengths)) != 1:
msg = f'Columns in \'{group.name}\' table are not all the same length.'
msg += '\n\nColumn: Length\n--------------'
for i in range(len(columns)):
msg += f'\n{columns[i]}: {lengths[i]}'
raise exceptions.TableFormatError(msg)
# Check for existence and compatibility of __IS_UTC_DATETIME64 and
# __IS_UTF8 tags.
for column in columns:
# Don't check the __INDEX column because it has different requirements.
if column == '__INDEX':
continue
if '__IS_UTC_DATETIME64' not in group[column].attrs:
raise(exceptions.TableFormatError(
f'__IS_UTC_DATETIME64 Attribute missing for \'{column}\' '
f'column of \'{group.name}\' table.'
))
if '__IS_UTF8' not in group[column].attrs:
raise(exceptions.TableFormatError(
f'__IS_UTF8 Attribute missing for \'{column}\' '
f'column of \'{group.name}\' table.'
))
if (
group[column].attrs['__IS_UTC_DATETIME64']
and
group[column].attrs['__IS_UTF8']
):
raise(exceptions.TableFormatError(
f'__IS_UTC_DATETIME64 and __IS_UTF8 Attributes are both True '
f'for \'{column}\' column of \'{group.name}\' table.'
))
# Verify that __IS_UTC_DATETIME64 columns are 64-bit integers.
for column in columns:
# Don't check the __INDEX column because it has different requirements.
if column == '__INDEX':
continue
if group[column].attrs['__IS_UTC_DATETIME64']:
dtype = group[column].dtype
if dtype != np.int64:
raise(exceptions.TableFormatError(
f'__IS_UTC_DATETIME64 is True for \'{column}\' column '
f'of \'{group.name}\' table, but dtype(={dtype}) is not '
f'a 64-bit integer.'
))
elif group[column].attrs['__IS_UTF8']:
dtype = group[column].dtype
if not dtype == STRING_DTYPE:
raise(exceptions.TableFormatError(
f'__IS_UTF8 is True for \'{column}\' column of '
f'\'{group.name}\' table, but dtype(={dtype}) does not '
f'appear to be a byte string.'
))
def strftime(time):
'''
Return a formatted string representation of `time`.
Parameters
----------
time : pandas.Timestamp
Time to convert to string representation.
Returns
-------
str
String representation of time formatted like
'%Y%m%dT%H:%M:%S.%nZ'.
'''
return ''.join((
f'{time.year:4d}',
f'{time.month:02d}',
f'{time.day:02d}',
f'T{time.hour:02d}',
f':{time.minute:02d}',
f':{time.second:02d}',
f'.{time.microsecond:06d}',
f'{time.nanosecond:03d}'
)) | PypiClean |
/FP-SMC-ALS-test1-0.0.1.tar.gz/FP-SMC-ALS-test1-0.0.1/smc/core/lldp.py | from smc.base.model import Element, ElementCreator
class LLDPProfile(Element):
"""
LLDP Profile represents a set of attributes used for configuring LLDP(Link Layer Discovery
Protocol). LLDP information is advertised by devices at a fixed interval in the form of
LLDP data units represented by TLV structures.
"""
typeof = "lldp_profile"
@property
def transmit_delay(self):
"""
The transmit delay determines the delay between
any two consecutive LLDP advertisement frames.
"""
return self.data.get("transmit_delay")
@transmit_delay.setter
def transmit_delay(self, value):
self.data.update(transmit_delay=value)
@property
def hold_time_multiplier(self):
"""
Represents the multiplier to apply to the advertisement interval.
The product of the advertisement interval and the hold time multiplier
gives cache life time for the learned LLDP information, after which it is discarded.
"""
return self.data.get("hold_time_multiplier")
@hold_time_multiplier.setter
def hold_time_multiplier(self, value):
self.data.update(hold_time_multiplier=value)
@property
def chassis_id(self):
"""
TLV field: Chassis ID. The MAC address of the first Ethernet port (Always enabled)
"""
return self.data.get("chassis_id")
@property
def port_id(self):
"""
TLV field: Port ID. The name that the SNMP Agent uses for the interface
(Always enabled)
"""
return self.data.get("port_id")
@property
def time_to_live(self):
"""
TLV field: Time to Live. Automatically calculated based on transmit delay
and hold time multiplier (Always enabled)
"""
return self.data.get("time_to_live")
@property
def port_description(self):
"""
TLV field: Port Description. The description that the SNMP Agent uses for the interface
(Always enabled)
"""
return self.data.get("port_description")
@property
def system_name(self):
"""
TLV field: System Name. The system name that the SNMP Agent uses
"""
return self.data.get("system_name")
@system_name.setter
def system_name(self, value):
self.data.update(system_name=value)
@property
def system_description(self):
"""
TLV field: System Description. The system description that the SNMP Agent uses
"""
return self.data.get("system_description")
@system_description.setter
def system_description(self, value):
self.data.update(system_description=value)
@property
def system_capabilities(self):
"""
TLV field: System Capabilities. Capability bit-map. Depends on the interface type
"""
return self.data.get("system_capabilities")
@system_capabilities.setter
def system_capabilities(self, value):
self.data.update(system_capabilities=value)
@property
def management_address(self):
"""
TLV Field: Management Address IP addresses of the control interfaces
"""
return self.data.get("management_address")
@management_address.setter
def management_address(self, value):
self.data.update(management_address=value)
@classmethod
def create(
cls,
name,
transmit_delay,
hold_time_multiplier,
system_name,
system_description,
system_capabilities,
management_address,
comment=None
):
"""
Create a LLDPProfile.
:param str name: name of TLS Profile
:param int transmit_delay: The transmit delay determines the delay between
any two consecutive LLDP advertisement frames.
:param int hold_time_multiplier: Represents the multiplier to apply to
the advertisement interval.
:param bool system_name: The system name that the SNMP Agent uses
:param bool system_description: The system description that the SNMP Agent uses
:param bool system_capabilities: Capability bit-map. Depends on the interface type
:param bool management_address: Management Address IP addresses of the control interfaces
:param str comment: optional comment
:raises CreateElementFailed: failed to create element with reason
:raises ElementNotFound: specified element reference was not found
:rtype: TLSProfile
"""
json = {
"name": name,
"transmit_delay": transmit_delay,
"hold_time_multiplier": hold_time_multiplier,
"chassis_id": True,
"port_id": True,
"time_to_live": True,
"port_description": True,
"system_name": system_name,
"system_description": system_description,
"system_capabilities": system_capabilities,
"management_address": management_address,
"comment": comment,
}
return ElementCreator(cls, json) | PypiClean |
Subsets and Splits