id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1,500 | exif.py | midgetspy_Sick-Beard/lib/hachoir_parser/image/exif.py | """
EXIF metadata parser (can be found in a JPEG picture for example)
Author: Victor Stinner
"""
from lib.hachoir_core.field import (FieldSet, ParserError,
UInt8, UInt16, UInt32,
Int32, Enum, String,
Bytes, SubFile,
NullBytes, createPaddingField)
from lib.hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN, NETWORK_ENDIAN
from lib.hachoir_core.text_handler import textHandler, hexadecimal
from lib.hachoir_core.tools import createDict
MAX_COUNT = 1000
def rationalFactory(class_name, size, field_class):
class Rational(FieldSet):
static_size = size
def createFields(self):
yield field_class(self, "numerator")
yield field_class(self, "denominator")
def createValue(self):
return float(self["numerator"].value) / self["denominator"].value
cls = Rational
cls.__name__ = class_name
return cls
RationalInt32 = rationalFactory("RationalInt32", 64, Int32)
RationalUInt32 = rationalFactory("RationalUInt32", 64, UInt32)
class BasicIFDEntry(FieldSet):
TYPE_BYTE = 0
TYPE_UNDEFINED = 7
TYPE_RATIONAL = 5
TYPE_SIGNED_RATIONAL = 10
TYPE_INFO = {
1: (UInt8, "BYTE (8 bits)"),
2: (String, "ASCII (8 bits)"),
3: (UInt16, "SHORT (16 bits)"),
4: (UInt32, "LONG (32 bits)"),
5: (RationalUInt32, "RATIONAL (2x LONG, 64 bits)"),
7: (Bytes, "UNDEFINED (8 bits)"),
9: (Int32, "SIGNED LONG (32 bits)"),
10: (RationalInt32, "SRATIONAL (2x SIGNED LONGs, 64 bits)"),
}
ENTRY_FORMAT = createDict(TYPE_INFO, 0)
TYPE_NAME = createDict(TYPE_INFO, 1)
def createFields(self):
yield Enum(textHandler(UInt16(self, "tag", "Tag"), hexadecimal), self.TAG_NAME)
yield Enum(textHandler(UInt16(self, "type", "Type"), hexadecimal), self.TYPE_NAME)
yield UInt32(self, "count", "Count")
if self["type"].value not in (self.TYPE_BYTE, self.TYPE_UNDEFINED) \
and MAX_COUNT < self["count"].value:
raise ParserError("EXIF: Invalid count value (%s)" % self["count"].value)
value_size, array_size = self.getSizes()
# Get offset/value
if not value_size:
yield NullBytes(self, "padding", 4)
elif value_size <= 32:
if 1 < array_size:
name = "value[]"
else:
name = "value"
kw = {}
cls = self.value_cls
if cls is String:
args = (self, name, value_size/8, "Value")
kw["strip"] = " \0"
kw["charset"] = "ISO-8859-1"
elif cls is Bytes:
args = (self, name, value_size/8, "Value")
else:
args = (self, name, "Value")
for index in xrange(array_size):
yield cls(*args, **kw)
size = array_size * value_size
if size < 32:
yield NullBytes(self, "padding", (32-size)//8)
else:
yield UInt32(self, "offset", "Value offset")
def getSizes(self):
"""
Returns (value_size, array_size): value_size in bits and
array_size in number of items.
"""
# Create format
self.value_cls = self.ENTRY_FORMAT.get(self["type"].value, Bytes)
# Set size
count = self["count"].value
if self.value_cls in (String, Bytes):
return 8 * count, 1
else:
return self.value_cls.static_size * count, count
class ExifEntry(BasicIFDEntry):
OFFSET_JPEG_SOI = 0x0201
EXIF_IFD_POINTER = 0x8769
TAG_WIDTH = 0xA002
TAG_HEIGHT = 0xA003
TAG_GPS_LATITUDE_REF = 0x0001
TAG_GPS_LATITUDE = 0x0002
TAG_GPS_LONGITUDE_REF = 0x0003
TAG_GPS_LONGITUDE = 0x0004
TAG_GPS_ALTITUDE_REF = 0x0005
TAG_GPS_ALTITUDE = 0x0006
TAG_GPS_TIMESTAMP = 0x0007
TAG_GPS_DATESTAMP = 0x001d
TAG_IMG_TITLE = 0x010e
TAG_FILE_TIMESTAMP = 0x0132
TAG_SOFTWARE = 0x0131
TAG_CAMERA_MODEL = 0x0110
TAG_CAMERA_MANUFACTURER = 0x010f
TAG_ORIENTATION = 0x0112
TAG_EXPOSURE = 0x829A
TAG_FOCAL = 0x829D
TAG_BRIGHTNESS = 0x9203
TAG_APERTURE = 0x9205
TAG_USER_COMMENT = 0x9286
TAG_NAME = {
# GPS
0x0000: "GPS version ID",
0x0001: "GPS latitude ref",
0x0002: "GPS latitude",
0x0003: "GPS longitude ref",
0x0004: "GPS longitude",
0x0005: "GPS altitude ref",
0x0006: "GPS altitude",
0x0007: "GPS timestamp",
0x0008: "GPS satellites",
0x0009: "GPS status",
0x000a: "GPS measure mode",
0x000b: "GPS DOP",
0x000c: "GPS speed ref",
0x000d: "GPS speed",
0x000e: "GPS track ref",
0x000f: "GPS track",
0x0010: "GPS img direction ref",
0x0011: "GPS img direction",
0x0012: "GPS map datum",
0x0013: "GPS dest latitude ref",
0x0014: "GPS dest latitude",
0x0015: "GPS dest longitude ref",
0x0016: "GPS dest longitude",
0x0017: "GPS dest bearing ref",
0x0018: "GPS dest bearing",
0x0019: "GPS dest distance ref",
0x001a: "GPS dest distance",
0x001b: "GPS processing method",
0x001c: "GPS area information",
0x001d: "GPS datestamp",
0x001e: "GPS differential",
0x0100: "Image width",
0x0101: "Image height",
0x0102: "Number of bits per component",
0x0103: "Compression scheme",
0x0106: "Pixel composition",
TAG_ORIENTATION: "Orientation of image",
0x0115: "Number of components",
0x011C: "Image data arrangement",
0x0212: "Subsampling ratio Y to C",
0x0213: "Y and C positioning",
0x011A: "Image resolution width direction",
0x011B: "Image resolution in height direction",
0x0128: "Unit of X and Y resolution",
0x0111: "Image data location",
0x0116: "Number of rows per strip",
0x0117: "Bytes per compressed strip",
0x0201: "Offset to JPEG SOI",
0x0202: "Bytes of JPEG data",
0x012D: "Transfer function",
0x013E: "White point chromaticity",
0x013F: "Chromaticities of primaries",
0x0211: "Color space transformation matrix coefficients",
0x0214: "Pair of blank and white reference values",
TAG_FILE_TIMESTAMP: "File change date and time",
TAG_IMG_TITLE: "Image title",
TAG_CAMERA_MANUFACTURER: "Camera (Image input equipment) manufacturer",
TAG_CAMERA_MODEL: "Camera (Input input equipment) model",
TAG_SOFTWARE: "Software",
0x013B: "File change date and time",
0x8298: "Copyright holder",
0x8769: "Exif IFD Pointer",
TAG_EXPOSURE: "Exposure time",
TAG_FOCAL: "F number",
0x8822: "Exposure program",
0x8824: "Spectral sensitivity",
0x8827: "ISO speed rating",
0x8828: "Optoelectric conversion factor OECF",
0x9201: "Shutter speed",
0x9202: "Aperture",
TAG_BRIGHTNESS: "Brightness",
0x9204: "Exposure bias",
TAG_APERTURE: "Maximum lens aperture",
0x9206: "Subject distance",
0x9207: "Metering mode",
0x9208: "Light source",
0x9209: "Flash",
0x920A: "Lens focal length",
0x9214: "Subject area",
0xA20B: "Flash energy",
0xA20C: "Spatial frequency response",
0xA20E: "Focal plane X resolution",
0xA20F: "Focal plane Y resolution",
0xA210: "Focal plane resolution unit",
0xA214: "Subject location",
0xA215: "Exposure index",
0xA217: "Sensing method",
0xA300: "File source",
0xA301: "Scene type",
0xA302: "CFA pattern",
0xA401: "Custom image processing",
0xA402: "Exposure mode",
0xA403: "White balance",
0xA404: "Digital zoom ratio",
0xA405: "Focal length in 35 mm film",
0xA406: "Scene capture type",
0xA407: "Gain control",
0xA408: "Contrast",
0x9000: "Exif version",
0xA000: "Supported Flashpix version",
0xA001: "Color space information",
0x9101: "Meaning of each component",
0x9102: "Image compression mode",
TAG_WIDTH: "Valid image width",
TAG_HEIGHT: "Valid image height",
0x927C: "Manufacturer notes",
TAG_USER_COMMENT: "User comments",
0xA004: "Related audio file",
0x9003: "Date and time of original data generation",
0x9004: "Date and time of digital data generation",
0x9290: "DateTime subseconds",
0x9291: "DateTimeOriginal subseconds",
0x9292: "DateTimeDigitized subseconds",
0xA420: "Unique image ID",
0xA005: "Interoperability IFD Pointer"
}
def createDescription(self):
return "Entry: %s" % self["tag"].display
def sortExifEntry(a,b):
return int( a["offset"].value - b["offset"].value )
class ExifIFD(FieldSet):
def seek(self, offset):
"""
Seek to byte address relative to parent address.
"""
padding = offset - (self.address + self.current_size)/8
if 0 < padding:
return createPaddingField(self, padding*8)
else:
return None
def createFields(self):
offset_diff = 6
yield UInt16(self, "count", "Number of entries")
entries = []
next_chunk_offset = None
count = self["count"].value
if not count:
return
while count:
addr = self.absolute_address + self.current_size
next = self.stream.readBits(addr, 32, NETWORK_ENDIAN)
if next in (0, 0xF0000000):
break
entry = ExifEntry(self, "entry[]")
yield entry
if entry["tag"].value in (ExifEntry.EXIF_IFD_POINTER, ExifEntry.OFFSET_JPEG_SOI):
next_chunk_offset = entry["value"].value + offset_diff
if 32 < entry.getSizes()[0]:
entries.append(entry)
count -= 1
yield UInt32(self, "next", "Next IFD offset")
try:
entries.sort( sortExifEntry )
except TypeError:
raise ParserError("Unable to sort entries!")
value_index = 0
for entry in entries:
padding = self.seek(entry["offset"].value + offset_diff)
if padding is not None:
yield padding
value_size, array_size = entry.getSizes()
if not array_size:
continue
cls = entry.value_cls
if 1 < array_size:
name = "value_%s[]" % entry.name
else:
name = "value_%s" % entry.name
desc = "Value of \"%s\"" % entry["tag"].display
if cls is String:
for index in xrange(array_size):
yield cls(self, name, value_size/8, desc, strip=" \0", charset="ISO-8859-1")
elif cls is Bytes:
for index in xrange(array_size):
yield cls(self, name, value_size/8, desc)
else:
for index in xrange(array_size):
yield cls(self, name, desc)
value_index += 1
if next_chunk_offset is not None:
padding = self.seek(next_chunk_offset)
if padding is not None:
yield padding
def createDescription(self):
return "Exif IFD (id %s)" % self["id"].value
class Exif(FieldSet):
def createFields(self):
# Headers
yield String(self, "header", 6, "Header (Exif\\0\\0)", charset="ASCII")
if self["header"].value != "Exif\0\0":
raise ParserError("Invalid EXIF signature!")
yield String(self, "byte_order", 2, "Byte order", charset="ASCII")
if self["byte_order"].value not in ("II", "MM"):
raise ParserError("Invalid endian!")
if self["byte_order"].value == "II":
self.endian = LITTLE_ENDIAN
else:
self.endian = BIG_ENDIAN
yield UInt16(self, "version", "TIFF version number")
yield UInt32(self, "img_dir_ofs", "Next image directory offset")
while not self.eof:
addr = self.absolute_address + self.current_size
tag = self.stream.readBits(addr, 16, NETWORK_ENDIAN)
if tag == 0xFFD8:
size = (self._size - self.current_size) // 8
yield SubFile(self, "thumbnail", size, "Thumbnail (JPEG file)", mime_type="image/jpeg")
break
elif tag == 0xFFFF:
break
yield ExifIFD(self, "ifd[]", "IFD")
padding = self.seekBit(self._size)
if padding is not None:
yield padding
| 12,810 | Python | .py | 328 | 29.585366 | 103 | 0.586553 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,501 | jpeg.py | midgetspy_Sick-Beard/lib/hachoir_parser/image/jpeg.py | """
JPEG picture parser.
Information:
- APP14 documents
http://partners.adobe.com/public/developer/en/ps/sdk/5116.DCT_Filter.pdf
http://java.sun.com/j2se/1.5.0/docs/api/javax/imageio/metadata/doc-files/jpeg_metadata.html#color
- APP12:
http://search.cpan.org/~exiftool/Image-ExifTool/lib/Image/ExifTool/TagNames.pod
Author: Victor Stinner
"""
from lib.hachoir_core.error import HachoirError
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
UInt8, UInt16, Enum,
Bit, Bits, NullBits, NullBytes,
String, RawBytes)
from lib.hachoir_parser.image.common import PaletteRGB
from lib.hachoir_core.endian import BIG_ENDIAN
from lib.hachoir_core.text_handler import textHandler, hexadecimal
from lib.hachoir_parser.image.exif import Exif
from lib.hachoir_parser.image.photoshop_metadata import PhotoshopMetadata
MAX_FILESIZE = 100 * 1024 * 1024
# The four tables (hash/sum for color/grayscale JPEG) comes
# from ImageMagick project
QUALITY_HASH_COLOR = (
1020, 1015, 932, 848, 780, 735, 702, 679, 660, 645,
632, 623, 613, 607, 600, 594, 589, 585, 581, 571,
555, 542, 529, 514, 494, 474, 457, 439, 424, 410,
397, 386, 373, 364, 351, 341, 334, 324, 317, 309,
299, 294, 287, 279, 274, 267, 262, 257, 251, 247,
243, 237, 232, 227, 222, 217, 213, 207, 202, 198,
192, 188, 183, 177, 173, 168, 163, 157, 153, 148,
143, 139, 132, 128, 125, 119, 115, 108, 104, 99,
94, 90, 84, 79, 74, 70, 64, 59, 55, 49,
45, 40, 34, 30, 25, 20, 15, 11, 6, 4,
0)
QUALITY_SUM_COLOR = (
32640,32635,32266,31495,30665,29804,29146,28599,28104,27670,
27225,26725,26210,25716,25240,24789,24373,23946,23572,22846,
21801,20842,19949,19121,18386,17651,16998,16349,15800,15247,
14783,14321,13859,13535,13081,12702,12423,12056,11779,11513,
11135,10955,10676,10392,10208, 9928, 9747, 9564, 9369, 9193,
9017, 8822, 8639, 8458, 8270, 8084, 7896, 7710, 7527, 7347,
7156, 6977, 6788, 6607, 6422, 6236, 6054, 5867, 5684, 5495,
5305, 5128, 4945, 4751, 4638, 4442, 4248, 4065, 3888, 3698,
3509, 3326, 3139, 2957, 2775, 2586, 2405, 2216, 2037, 1846,
1666, 1483, 1297, 1109, 927, 735, 554, 375, 201, 128,
0)
QUALITY_HASH_GRAY = (
510, 505, 422, 380, 355, 338, 326, 318, 311, 305,
300, 297, 293, 291, 288, 286, 284, 283, 281, 280,
279, 278, 277, 273, 262, 251, 243, 233, 225, 218,
211, 205, 198, 193, 186, 181, 177, 172, 168, 164,
158, 156, 152, 148, 145, 142, 139, 136, 133, 131,
129, 126, 123, 120, 118, 115, 113, 110, 107, 105,
102, 100, 97, 94, 92, 89, 87, 83, 81, 79,
76, 74, 70, 68, 66, 63, 61, 57, 55, 52,
50, 48, 44, 42, 39, 37, 34, 31, 29, 26,
24, 21, 18, 16, 13, 11, 8, 6, 3, 2,
0)
QUALITY_SUM_GRAY = (
16320,16315,15946,15277,14655,14073,13623,13230,12859,12560,
12240,11861,11456,11081,10714,10360,10027, 9679, 9368, 9056,
8680, 8331, 7995, 7668, 7376, 7084, 6823, 6562, 6345, 6125,
5939, 5756, 5571, 5421, 5240, 5086, 4976, 4829, 4719, 4616,
4463, 4393, 4280, 4166, 4092, 3980, 3909, 3835, 3755, 3688,
3621, 3541, 3467, 3396, 3323, 3247, 3170, 3096, 3021, 2952,
2874, 2804, 2727, 2657, 2583, 2509, 2437, 2362, 2290, 2211,
2136, 2068, 1996, 1915, 1858, 1773, 1692, 1620, 1552, 1477,
1398, 1326, 1251, 1179, 1109, 1031, 961, 884, 814, 736,
667, 592, 518, 441, 369, 292, 221, 151, 86, 64,
0)
JPEG_NATURAL_ORDER = (
0, 1, 8, 16, 9, 2, 3, 10,
17, 24, 32, 25, 18, 11, 4, 5,
12, 19, 26, 33, 40, 48, 41, 34,
27, 20, 13, 6, 7, 14, 21, 28,
35, 42, 49, 56, 57, 50, 43, 36,
29, 22, 15, 23, 30, 37, 44, 51,
58, 59, 52, 45, 38, 31, 39, 46,
53, 60, 61, 54, 47, 55, 62, 63)
class JpegChunkApp0(FieldSet):
UNIT_NAME = {
0: "pixels",
1: "dots per inch",
2: "dots per cm",
}
def createFields(self):
yield String(self, "jfif", 5, "JFIF string", charset="ASCII")
if self["jfif"].value != "JFIF\0":
raise ParserError(
"Stream doesn't look like JPEG chunk (wrong JFIF signature)")
yield UInt8(self, "ver_maj", "Major version")
yield UInt8(self, "ver_min", "Minor version")
yield Enum(UInt8(self, "units", "Units"), self.UNIT_NAME)
if self["units"].value == 0:
yield UInt16(self, "aspect_x", "Aspect ratio (X)")
yield UInt16(self, "aspect_y", "Aspect ratio (Y)")
else:
yield UInt16(self, "x_density", "X density")
yield UInt16(self, "y_density", "Y density")
yield UInt8(self, "thumb_w", "Thumbnail width")
yield UInt8(self, "thumb_h", "Thumbnail height")
thumb_size = self["thumb_w"].value * self["thumb_h"].value
if thumb_size != 0:
yield PaletteRGB(self, "thumb_palette", 256)
yield RawBytes(self, "thumb_data", thumb_size, "Thumbnail data")
class Ducky(FieldSet):
BLOCK_TYPE = {
0: "end",
1: "Quality",
2: "Comment",
3: "Copyright",
}
def createFields(self):
yield Enum(UInt16(self, "type"), self.BLOCK_TYPE)
if self["type"].value == 0:
return
yield UInt16(self, "size")
size = self["size"].value
if size:
yield RawBytes(self, "data", size)
class APP12(FieldSet):
"""
The JPEG APP12 "Picture Info" segment was used by some older cameras, and
contains ASCII-based meta information.
"""
def createFields(self):
yield String(self, "ducky", 5, '"Ducky" string', charset="ASCII")
while not self.eof:
yield Ducky(self, "item[]")
class StartOfFrame(FieldSet):
def createFields(self):
yield UInt8(self, "precision")
yield UInt16(self, "height")
yield UInt16(self, "width")
yield UInt8(self, "nr_components")
for index in range(self["nr_components"].value):
yield UInt8(self, "component_id[]")
yield UInt8(self, "high[]")
yield UInt8(self, "low[]")
class Comment(FieldSet):
def createFields(self):
yield String(self, "comment", self.size//8, strip="\0")
class AdobeChunk(FieldSet):
COLORSPACE_TRANSFORMATION = {
1: "YCbCr (converted from RGB)",
2: "YCCK (converted from CMYK)",
}
def createFields(self):
if self.stream.readBytes(self.absolute_address, 5) != "Adobe":
yield RawBytes(self, "raw", self.size//8, "Raw data")
return
yield String(self, "adobe", 5, "\"Adobe\" string", charset="ASCII")
yield UInt16(self, "version", "DCT encoder version")
yield Enum(Bit(self, "flag00"),
{False: "Chop down or subsampling", True: "Blend"})
yield NullBits(self, "flags0_reserved", 15)
yield NullBytes(self, "flags1", 2)
yield Enum(UInt8(self, "color_transform", "Colorspace transformation code"), self.COLORSPACE_TRANSFORMATION)
class StartOfScan(FieldSet):
def createFields(self):
yield UInt8(self, "nr_components")
for index in range(self["nr_components"].value):
comp_id = UInt8(self, "component_id[]")
yield comp_id
if not(1 <= comp_id.value <= self["nr_components"].value):
raise ParserError("JPEG error: Invalid component-id")
yield UInt8(self, "value[]")
yield RawBytes(self, "raw", 3) # TODO: What's this???
class RestartInterval(FieldSet):
def createFields(self):
yield UInt16(self, "interval", "Restart interval")
class QuantizationTable(FieldSet):
def createFields(self):
# Code based on function get_dqt() (jdmarker.c from libjpeg62)
yield Bits(self, "is_16bit", 4)
yield Bits(self, "index", 4)
if self["index"].value >= 4:
raise ParserError("Invalid quantification index (%s)" % self["index"].value)
if self["is_16bit"].value:
coeff_type = UInt16
else:
coeff_type = UInt8
for index in xrange(64):
natural = JPEG_NATURAL_ORDER[index]
yield coeff_type(self, "coeff[%u]" % natural)
def createDescription(self):
return "Quantification table #%u" % self["index"].value
class DefineQuantizationTable(FieldSet):
def createFields(self):
while self.current_size < self.size:
yield QuantizationTable(self, "qt[]")
class JpegChunk(FieldSet):
TAG_SOI = 0xD8
TAG_EOI = 0xD9
TAG_SOS = 0xDA
TAG_DQT = 0xDB
TAG_DRI = 0xDD
TAG_INFO = {
0xC4: ("huffman[]", "Define Huffman Table (DHT)", None),
0xD8: ("start_image", "Start of image (SOI)", None),
0xD9: ("end_image", "End of image (EOI)", None),
0xDA: ("start_scan", "Start Of Scan (SOS)", StartOfScan),
0xDB: ("quantization[]", "Define Quantization Table (DQT)", DefineQuantizationTable),
0xDC: ("nb_line", "Define number of Lines (DNL)", None),
0xDD: ("restart_interval", "Define Restart Interval (DRI)", RestartInterval),
0xE0: ("app0", "APP0", JpegChunkApp0),
0xE1: ("exif", "Exif metadata", Exif),
0xE2: ("icc", "ICC profile", None),
0xEC: ("app12", "APP12", APP12),
0xED: ("photoshop", "Photoshop", PhotoshopMetadata),
0xEE: ("adobe", "Image encoding information for DCT filters (Adobe)", AdobeChunk),
0xFE: ("comment[]", "Comment", Comment),
}
START_OF_FRAME = {
0xC0: u"Baseline",
0xC1: u"Extended sequential",
0xC2: u"Progressive",
0xC3: u"Lossless",
0xC5: u"Differential sequential",
0xC6: u"Differential progressive",
0xC7: u"Differential lossless",
0xC9: u"Extended sequential, arithmetic coding",
0xCA: u"Progressive, arithmetic coding",
0xCB: u"Lossless, arithmetic coding",
0xCD: u"Differential sequential, arithmetic coding",
0xCE: u"Differential progressive, arithmetic coding",
0xCF: u"Differential lossless, arithmetic coding",
}
for key, text in START_OF_FRAME.iteritems():
TAG_INFO[key] = ("start_frame", "Start of frame (%s)" % text.lower(), StartOfFrame)
def __init__(self, parent, name, description=None):
FieldSet.__init__(self, parent, name, description)
tag = self["type"].value
if tag == 0xE1:
# Hack for Adobe extension: XAP metadata (as XML)
bytes = self.stream.readBytes(self.absolute_address + 32, 6)
if bytes == "Exif\0\0":
self._name = "exif"
self._description = "EXIF"
self._parser = Exif
else:
self._parser = None
elif tag in self.TAG_INFO:
self._name, self._description, self._parser = self.TAG_INFO[tag]
else:
self._parser = None
def createFields(self):
yield textHandler(UInt8(self, "header", "Header"), hexadecimal)
if self["header"].value != 0xFF:
raise ParserError("JPEG: Invalid chunk header!")
yield textHandler(UInt8(self, "type", "Type"), hexadecimal)
tag = self["type"].value
if tag in (self.TAG_SOI, self.TAG_EOI):
return
yield UInt16(self, "size", "Size")
size = (self["size"].value - 2)
if 0 < size:
if self._parser:
yield self._parser(self, "content", "Chunk content", size=size*8)
else:
yield RawBytes(self, "data", size, "Data")
def createDescription(self):
return "Chunk: %s" % self["type"].display
class JpegFile(Parser):
endian = BIG_ENDIAN
PARSER_TAGS = {
"id": "jpeg",
"category": "image",
"file_ext": ("jpg", "jpeg"),
"mime": (u"image/jpeg",),
"magic": (
("\xFF\xD8\xFF\xE0", 0), # (Start Of Image, APP0)
("\xFF\xD8\xFF\xE1", 0), # (Start Of Image, EXIF)
("\xFF\xD8\xFF\xEE", 0), # (Start Of Image, Adobe)
),
"min_size": 22*8,
"description": "JPEG picture",
"subfile": "skip",
}
def validate(self):
if self.stream.readBytes(0, 2) != "\xFF\xD8":
return "Invalid file signature"
try:
for index, field in enumerate(self):
chunk_type = field["type"].value
if chunk_type not in JpegChunk.TAG_INFO:
return "Unknown chunk type: 0x%02X (chunk #%s)" % (chunk_type, index)
if index == 2:
# Only check 3 fields
break
except HachoirError:
return "Unable to parse at least three chunks"
return True
def createFields(self):
while not self.eof:
chunk = JpegChunk(self, "chunk[]")
yield chunk
if chunk["type"].value == JpegChunk.TAG_SOS:
# TODO: Read JPEG image data...
break
# TODO: is it possible to handle piped input?
if self._size is None:
raise NotImplementedError
has_end = False
size = (self._size - self.current_size) // 8
if size:
if 2 < size \
and self.stream.readBytes(self._size - 16, 2) == "\xff\xd9":
has_end = True
size -= 2
yield RawBytes(self, "data", size, "JPEG data")
if has_end:
yield JpegChunk(self, "chunk[]")
def createDescription(self):
desc = "JPEG picture"
if "sof/content" in self:
header = self["sof/content"]
desc += ": %ux%u pixels" % (header["width"].value, header["height"].value)
return desc
def createContentSize(self):
if "end" in self:
return self["end"].absolute_address + self["end"].size
if "data" not in self:
return None
start = self["data"].absolute_address
end = self.stream.searchBytes("\xff\xd9", start, MAX_FILESIZE*8)
if end is not None:
return end + 16
return None
| 14,363 | Python | .py | 331 | 35.081571 | 116 | 0.581065 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,502 | ico.py | midgetspy_Sick-Beard/lib/hachoir_parser/image/ico.py | """
Microsoft Windows icon and cursor file format parser.
Author: Victor Stinner
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
UInt8, UInt16, UInt32, Enum, RawBytes)
from lib.hachoir_parser.image.common import PaletteRGBA
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_parser.common.win32 import BitmapInfoHeader
class IconHeader(FieldSet):
def createFields(self):
yield UInt8(self, "width", "Width")
yield UInt8(self, "height", "Height")
yield UInt8(self, "nb_color", "Number of colors")
yield UInt8(self, "reserved", "(reserved)")
yield UInt16(self, "planes", "Color planes (=1)")
yield UInt16(self, "bpp", "Bits per pixel")
yield UInt32(self, "size", "Content size in bytes")
yield UInt32(self, "offset", "Data offset")
def createDescription(self):
return "Icon: %ux%u pixels, %u bits/pixel" % \
(self["width"].value, self["height"].value, self["bpp"].value)
def isValid(self):
if self["nb_color"].value == 0:
if self["bpp"].value in (8, 24, 32) and self["planes"].value == 1:
return True
if self["planes"].value == 4 and self["bpp"].value == 0:
return True
elif self["nb_color"].value == 16:
if self["bpp"].value in (4, 16) and self["planes"].value == 1:
return True
else:
return False
if self["bpp"].value == 0 and self["planes"].value == 0:
return True
return False
class IconData(FieldSet):
def __init__(self, parent, name, header):
FieldSet.__init__(self, parent, name, "Icon data")
self.header = header
def createFields(self):
yield BitmapInfoHeader(self, "header")
# Read palette if needed
nb_color = self.header["nb_color"].value
if self.header["bpp"].value == 8:
nb_color = 256
if nb_color != 0:
yield PaletteRGBA(self, "palette", nb_color)
# Read pixels
size = self.header["size"].value - self.current_size/8
yield RawBytes(self, "pixels", size, "Image pixels")
class IcoFile(Parser):
endian = LITTLE_ENDIAN
PARSER_TAGS = {
"id": "ico",
"category": "image",
"file_ext": ("ico", "cur"),
"mime": (u"image/x-ico",),
"min_size": (22 + 40)*8,
# "magic": (
# ("\0\0\1\0", 0), # Icon
# ("\0\0\2\0", 0), # Cursor
# ),
"magic_regex": ((
# signature=0, type=(1|2), count in 1..20,
"\0\0[\1\2]\0[\x01-\x14]."
# size=(16x16|32x32|48x48|64x64),
"(\x10\x10|\x20\x20|\x30\x30|\x40\x40)"
# nb_color=0 or 16; nb_plane=(0|1|4), bpp=(0|8|24|32)
"[\x00\x10]\0[\0\1\4][\0\x08\x18\x20]\0",
0),),
"description": "Microsoft Windows icon or cursor",
}
TYPE_NAME = {
1: "icon",
2: "cursor"
}
def validate(self):
# Check signature and type
if self["signature"].value != 0:
return "Wrong file signature"
if self["type"].value not in self.TYPE_NAME:
return "Unknown picture type"
# Check all icon headers
index = -1
for field in self:
if field.name.startswith("icon_header"):
index += 1
if not field.isValid():
return "Invalid header #%u" % index
elif 0 <= index:
break
return True
def createFields(self):
yield UInt16(self, "signature", "Signature (0x0000)")
yield Enum(UInt16(self, "type", "Resource type"), self.TYPE_NAME)
yield UInt16(self, "nb_items", "Number of items")
items = []
for index in xrange(self["nb_items"].value):
item = IconHeader(self, "icon_header[]")
yield item
items.append(item)
for header in items:
if header["offset"].value*8 != self.current_size:
raise ParserError("Icon: Problem with icon data offset.")
yield IconData(self, "icon_data[]", header)
def createDescription(self):
desc = "Microsoft Windows %s" % self["type"].display
size = []
for header in self.array("icon_header"):
size.append("%ux%ux%u" % (header["width"].value,
header["height"].value, header["bpp"].value))
if size:
return "%s: %s" % (desc, ", ".join(size))
else:
return desc
def createContentSize(self):
count = self["nb_items"].value
if not count:
return None
field = self["icon_data[%u]" % (count-1)]
return field.absolute_address + field.size
| 4,835 | Python | .py | 123 | 30.211382 | 78 | 0.556644 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,503 | png.py | midgetspy_Sick-Beard/lib/hachoir_parser/image/png.py | """
PNG picture file parser.
Documents:
- RFC 2083
http://www.faqs.org/rfcs/rfc2083.html
Author: Victor Stinner
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, Fragment,
ParserError, MissingField,
UInt8, UInt16, UInt32,
String, CString,
Bytes, RawBytes,
Bit, NullBits,
Enum, CompressedField)
from lib.hachoir_parser.image.common import RGB
from lib.hachoir_core.text_handler import textHandler, hexadecimal
from lib.hachoir_core.endian import NETWORK_ENDIAN
from lib.hachoir_core.tools import humanFilesize
from datetime import datetime
MAX_FILESIZE = 500 * 1024 * 1024
try:
from zlib import decompressobj
class Gunzip:
def __init__(self, stream):
self.gzip = decompressobj()
def __call__(self, size, data=None):
if data is None:
data = self.gzip.unconsumed_tail
return self.gzip.decompress(data, size)
has_deflate = True
except ImportError:
has_deflate = False
UNIT_NAME = {1: "Meter"}
COMPRESSION_NAME = {
0: u"deflate" # with 32K sliding window
}
MAX_CHUNK_SIZE = 500 * 1024 # Maximum chunk size (500 KB)
def headerParse(parent):
yield UInt32(parent, "width", "Width (pixels)")
yield UInt32(parent, "height", "Height (pixels)")
yield UInt8(parent, "bit_depth", "Bit depth")
yield NullBits(parent, "reserved", 5)
yield Bit(parent, "has_alpha", "Has alpha channel?")
yield Bit(parent, "color", "Color used?")
yield Bit(parent, "has_palette", "Has a color palette?")
yield Enum(UInt8(parent, "compression", "Compression method"), COMPRESSION_NAME)
yield UInt8(parent, "filter", "Filter method")
yield UInt8(parent, "interlace", "Interlace method")
def headerDescription(parent):
return "Header: %ux%u pixels and %u bits/pixel" % \
(parent["width"].value, parent["height"].value, getBitsPerPixel(parent))
def paletteParse(parent):
size = parent["size"].value
if (size % 3) != 0:
raise ParserError("Palette have invalid size (%s), should be 3*n!" % size)
nb_colors = size // 3
for index in xrange(nb_colors):
yield RGB(parent, "color[]")
def paletteDescription(parent):
return "Palette: %u colors" % (parent["size"].value // 3)
def gammaParse(parent):
yield UInt32(parent, "gamma", "Gamma (x100,000)")
def gammaValue(parent):
return float(parent["gamma"].value) / 100000
def gammaDescription(parent):
return "Gamma: %.3f" % parent.value
def textParse(parent):
yield CString(parent, "keyword", "Keyword", charset="ISO-8859-1")
length = parent["size"].value - parent["keyword"].size/8
if length:
yield String(parent, "text", length, "Text", charset="ISO-8859-1")
def textDescription(parent):
if "text" in parent:
return u'Text: %s' % parent["text"].display
else:
return u'Text'
def timestampParse(parent):
yield UInt16(parent, "year", "Year")
yield UInt8(parent, "month", "Month")
yield UInt8(parent, "day", "Day")
yield UInt8(parent, "hour", "Hour")
yield UInt8(parent, "minute", "Minute")
yield UInt8(parent, "second", "Second")
def timestampValue(parent):
value = datetime(
parent["year"].value, parent["month"].value, parent["day"].value,
parent["hour"].value, parent["minute"].value, parent["second"].value)
return value
def physicalParse(parent):
yield UInt32(parent, "pixel_per_unit_x", "Pixel per unit, X axis")
yield UInt32(parent, "pixel_per_unit_y", "Pixel per unit, Y axis")
yield Enum(UInt8(parent, "unit", "Unit type"), UNIT_NAME)
def physicalDescription(parent):
x = parent["pixel_per_unit_x"].value
y = parent["pixel_per_unit_y"].value
desc = "Physical: %ux%u pixels" % (x,y)
if parent["unit"].value == 1:
desc += " per meter"
return desc
def parseBackgroundColor(parent):
yield UInt16(parent, "red")
yield UInt16(parent, "green")
yield UInt16(parent, "blue")
def backgroundColorDesc(parent):
rgb = parent["red"].value, parent["green"].value, parent["blue"].value
name = RGB.color_name.get(rgb)
if not name:
name = "#%02X%02X%02X" % rgb
return "Background color: %s" % name
class ImageData(Fragment):
def __init__(self, parent, name="compressed_data"):
Fragment.__init__(self, parent, name, None, 8*parent["size"].value)
data = parent.name.split('[')
data, next = "../%s[%%u]" % data[0], int(data[1][:-1]) + 1
first = parent.getField(data % 0)
if first is parent:
first = None
if has_deflate:
CompressedField(self, Gunzip)
else:
first = first[name]
try:
_next = parent[data % next]
next = lambda: _next[name]
except MissingField:
next = None
self.setLinks(first, next)
def parseTransparency(parent):
for i in range(parent["size"].value):
yield UInt8(parent, "alpha_value[]", "Alpha value for palette entry %i"%i)
def getBitsPerPixel(header):
nr_component = 1
if header["has_alpha"].value:
nr_component += 1
if header["color"].value and not header["has_palette"].value:
nr_component += 2
return nr_component * header["bit_depth"].value
class Chunk(FieldSet):
TAG_INFO = {
"tIME": ("time", timestampParse, "Timestamp", timestampValue),
"pHYs": ("physical", physicalParse, physicalDescription, None),
"IHDR": ("header", headerParse, headerDescription, None),
"PLTE": ("palette", paletteParse, paletteDescription, None),
"gAMA": ("gamma", gammaParse, gammaDescription, gammaValue),
"tEXt": ("text[]", textParse, textDescription, None),
"tRNS": ("transparency", parseTransparency, "Transparency Info", None),
"bKGD": ("background", parseBackgroundColor, backgroundColorDesc, None),
"IDAT": ("data[]", lambda parent: (ImageData(parent),), "Image data", None),
"iTXt": ("utf8_text[]", None, "International text (encoded in UTF-8)", None),
"zTXt": ("comp_text[]", None, "Compressed text", None),
"IEND": ("end", None, "End", None)
}
def createValueFunc(self):
return self.value_func(self)
def __init__(self, parent, name, description=None):
FieldSet.__init__(self, parent, name, description)
self._size = (self["size"].value + 3*4) * 8
if MAX_CHUNK_SIZE < (self._size//8):
raise ParserError("PNG: Chunk is too big (%s)"
% humanFilesize(self._size//8))
tag = self["tag"].value
self.desc_func = None
self.value_func = None
if tag in self.TAG_INFO:
self._name, self.parse_func, desc, value_func = self.TAG_INFO[tag]
if value_func:
self.value_func = value_func
self.createValue = self.createValueFunc
if desc:
if isinstance(desc, str):
self._description = desc
else:
self.desc_func = desc
else:
self._description = ""
self.parse_func = None
def createFields(self):
yield UInt32(self, "size", "Size")
yield String(self, "tag", 4, "Tag", charset="ASCII")
size = self["size"].value
if size != 0:
if self.parse_func:
for field in self.parse_func(self):
yield field
else:
yield RawBytes(self, "content", size, "Data")
yield textHandler(UInt32(self, "crc32", "CRC32"), hexadecimal)
def createDescription(self):
if self.desc_func:
return self.desc_func(self)
else:
return "Chunk: %s" % self["tag"].display
class PngFile(Parser):
PARSER_TAGS = {
"id": "png",
"category": "image",
"file_ext": ("png",),
"mime": (u"image/png", u"image/x-png"),
"min_size": 8*8, # just the identifier
"magic": [('\x89PNG\r\n\x1A\n', 0)],
"description": "Portable Network Graphics (PNG) picture"
}
endian = NETWORK_ENDIAN
def validate(self):
if self["id"].value != '\x89PNG\r\n\x1A\n':
return "Invalid signature"
if self[1].name != "header":
return "First chunk is not header"
return True
def createFields(self):
yield Bytes(self, "id", 8, r"PNG identifier ('\x89PNG\r\n\x1A\n')")
while not self.eof:
yield Chunk(self, "chunk[]")
def createDescription(self):
header = self["header"]
desc = "PNG picture: %ux%ux%u" % (
header["width"].value, header["height"].value, getBitsPerPixel(header))
if header["has_alpha"].value:
desc += " (alpha layer)"
return desc
def createContentSize(self):
field = self["header"]
start = field.absolute_address + field.size
end = MAX_FILESIZE * 8
pos = self.stream.searchBytes("\0\0\0\0IEND\xae\x42\x60\x82", start, end)
if pos is not None:
return pos + 12*8
return None
| 9,156 | Python | .py | 229 | 32.672489 | 85 | 0.616899 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,504 | psd.py | midgetspy_Sick-Beard/lib/hachoir_parser/image/psd.py | """
Photoshop parser (.psd file).
Creation date: 8 january 2006
Author: Victor Stinner
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet,
UInt16, UInt32, String, NullBytes, Enum, RawBytes)
from lib.hachoir_core.endian import BIG_ENDIAN
from lib.hachoir_parser.image.photoshop_metadata import Photoshop8BIM
class Config(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = (4 + self["size"].value) * 8
def createFields(self):
yield UInt32(self, "size")
while not self.eof:
yield Photoshop8BIM(self, "item[]")
class PsdFile(Parser):
endian = BIG_ENDIAN
PARSER_TAGS = {
"id": "psd",
"category": "image",
"file_ext": ("psd",),
"mime": (u"image/psd", u"image/photoshop", u"image/x-photoshop"),
"min_size": 4*8,
"magic": (("8BPS\0\1",0),),
"description": "Photoshop (PSD) picture",
}
COLOR_MODE = {
0: u"Bitmap",
1: u"Grayscale",
2: u"Indexed",
3: u"RGB color",
4: u"CMYK color",
7: u"Multichannel",
8: u"Duotone",
9: u"Lab Color",
}
COMPRESSION_NAME = {
0: "Raw data",
1: "RLE",
}
def validate(self):
if self.stream.readBytes(0, 4) != "8BPS":
return "Invalid signature"
return True
def createFields(self):
yield String(self, "signature", 4, "PSD signature (8BPS)", charset="ASCII")
yield UInt16(self, "version")
yield NullBytes(self, "reserved[]", 6)
yield UInt16(self, "nb_channels")
yield UInt32(self, "width")
yield UInt32(self, "height")
yield UInt16(self, "depth")
yield Enum(UInt16(self, "color_mode"), self.COLOR_MODE)
# Mode data
yield UInt32(self, "mode_data_size")
size = self["mode_data_size"].value
if size:
yield RawBytes(self, "mode_data", size)
# Resources
yield Config(self, "config")
# Reserved
yield UInt32(self, "reserved_data_size")
size = self["reserved_data_size"].value
if size:
yield RawBytes(self, "reserved_data", size)
yield Enum(UInt16(self, "compression"), self.COMPRESSION_NAME)
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "end", size)
| 2,430 | Python | .py | 72 | 26.180556 | 83 | 0.585928 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,505 | tga.py | midgetspy_Sick-Beard/lib/hachoir_parser/image/tga.py | """
Truevision Targa Graphic (TGA) picture parser.
Author: Victor Stinner
Creation: 18 december 2006
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import FieldSet, UInt8, UInt16, Enum, RawBytes
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_parser.image.common import PaletteRGB
class Line(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["/width"].value * self["/bpp"].value
def createFields(self):
for x in xrange(self["/width"].value):
yield UInt8(self, "pixel[]")
class Pixels(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["/width"].value * self["/height"].value * self["/bpp"].value
def createFields(self):
if self["/options"].value == 0:
RANGE = xrange(self["/height"].value-1,-1,-1)
else:
RANGE = xrange(self["/height"].value)
for y in RANGE:
yield Line(self, "line[%u]" % y)
class TargaFile(Parser):
PARSER_TAGS = {
"id": "targa",
"category": "image",
"file_ext": ("tga",),
"mime": (u"image/targa", u"image/tga", u"image/x-tga"),
"min_size": 18*8,
"description": u"Truevision Targa Graphic (TGA)"
}
CODEC_NAME = {
1: u"8-bit uncompressed",
2: u"24-bit uncompressed",
9: u"8-bit RLE",
10: u"24-bit RLE",
}
endian = LITTLE_ENDIAN
def validate(self):
if self["version"].value != 1:
return "Unknown version"
if self["codec"].value not in self.CODEC_NAME:
return "Unknown codec"
if self["x_min"].value != 0 or self["y_min"].value != 0:
return "(x_min, y_min) is not (0,0)"
if self["bpp"].value not in (8, 24):
return "Unknown bits/pixel value"
return True
def createFields(self):
yield UInt8(self, "hdr_size", "Header size in bytes")
yield UInt8(self, "version", "Targa version (always one)")
yield Enum(UInt8(self, "codec", "Pixels encoding"), self.CODEC_NAME)
yield UInt16(self, "palette_ofs", "Palette absolute file offset")
yield UInt16(self, "nb_color", "Number of color")
yield UInt8(self, "color_map_size", "Color map entry size")
yield UInt16(self, "x_min")
yield UInt16(self, "y_min")
yield UInt16(self, "width")
yield UInt16(self, "height")
yield UInt8(self, "bpp", "Bits per pixel")
yield UInt8(self, "options", "Options (0: vertical mirror)")
if self["bpp"].value == 8:
yield PaletteRGB(self, "palette", 256)
if self["codec"].value == 1:
yield Pixels(self, "pixels")
else:
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "raw_pixels", size)
| 2,927 | Python | .py | 74 | 31.5 | 86 | 0.584448 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,506 | __init__.py | midgetspy_Sick-Beard/lib/hachoir_parser/image/__init__.py | from lib.hachoir_parser.image.bmp import BmpFile
from lib.hachoir_parser.image.gif import GifFile
from lib.hachoir_parser.image.ico import IcoFile
from lib.hachoir_parser.image.jpeg import JpegFile
from lib.hachoir_parser.image.pcx import PcxFile
from lib.hachoir_parser.image.psd import PsdFile
from lib.hachoir_parser.image.png import PngFile
from lib.hachoir_parser.image.tga import TargaFile
from lib.hachoir_parser.image.tiff import TiffFile
from lib.hachoir_parser.image.wmf import WMF_File
from lib.hachoir_parser.image.xcf import XcfFile
| 547 | Python | .py | 11 | 48.636364 | 50 | 0.854206 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,507 | iptc.py | midgetspy_Sick-Beard/lib/hachoir_parser/image/iptc.py | """
IPTC metadata parser (can be found in a JPEG picture for example)
Sources:
- Image-MetaData Perl module:
http://www.annocpan.org/~BETTELLI/Image-MetaData-JPEG-0.15/...
...lib/Image/MetaData/JPEG/TagLists.pod
- IPTC tag name and description:
http://peccatte.karefil.com/software/IPTCTableau.pdf
Author: Victor Stinner
"""
from lib.hachoir_core.field import (FieldSet, ParserError,
UInt8, UInt16, String, RawBytes, NullBytes)
from lib.hachoir_core.text_handler import textHandler, hexadecimal
def IPTC_String(parent, name, desc=None):
# Charset may be utf-8, ISO-8859-1, or ...
return String(parent, name, parent["size"].value, desc,
strip=" ")
dataset1 = {
}
dataset2 = {
0: ("record_version", "Record version (2 for JPEG)", UInt16),
5: ("obj_name", "Object name", None),
7: ("edit_stat", "Edit status", None),
10: ("urgency", "Urgency", UInt8),
15: ("category[]", "Category", None),
22: ("fixture", "Fixture identifier", IPTC_String),
25: ("keyword[]", "Keywords", IPTC_String),
30: ("release_date", "Release date", IPTC_String),
35: ("release_time", "Release time", IPTC_String),
40: ("instruction", "Special instructions", IPTC_String),
55: ("date_created", "Date created", IPTC_String),
60: ("time_created", "Time created (ISO 8601)", IPTC_String),
65: ("originating_prog", "Originating program", IPTC_String),
70: ("prog_ver", "Program version", IPTC_String),
80: ("author", "By-line (Author)", IPTC_String),
85: ("author_job", "By-line (Author precision)", IPTC_String),
90: ("city", "City", IPTC_String),
95: ("state", "Province / State", IPTC_String),
100: ("country_code", "Country / Primary location code", IPTC_String),
101: ("country_name", "Country / Primary location name", IPTC_String),
103: ("trans_ref", "Original transmission reference", IPTC_String),
105: ("headline", "Headline", IPTC_String),
110: ("credit", "Credit", IPTC_String),
115: ("source", "Source", IPTC_String),
116: ("copyright", "Copyright notice", IPTC_String),
120: ("caption", "Caption/Abstract", IPTC_String),
122: ("writer", "Writer/editor", IPTC_String),
231: ("history[]", "Document history (timestamp)", IPTC_String)
}
datasets = {1: dataset1, 2: dataset2}
class IPTC_Size(FieldSet):
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
value = 0
for field in self:
value <<= 15
value += (field.value & 0x7fff)
self.createValue = lambda: value
def createFields(self):
while True:
field = UInt16(self, "value[]")
yield field
if field.value < 0x8000:
break
class IPTC_Chunk(FieldSet):
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
number = self["dataset_nb"].value
self.dataset_info = None
if number in datasets:
tag = self["tag"].value
if tag in datasets[number]:
self.dataset_info = datasets[number][tag]
self._name = self.dataset_info[0]
self._description = self.dataset_info[1]
size_chunk = self["size"]
self._size = 3*8 + size_chunk.size + size_chunk.value*8
def createFields(self):
yield textHandler(UInt8(self, "signature", "IPTC signature (0x1c)"), hexadecimal)
if self["signature"].value != 0x1C:
raise ParserError("Wrong IPTC signature")
yield textHandler(UInt8(self, "dataset_nb", "Dataset number"), hexadecimal)
yield UInt8(self, "tag", "Tag")
yield IPTC_Size(self, "size", "Content size")
size = self["size"].value
if 0 < size:
if self.dataset_info:
cls = self.dataset_info[2]
else:
cls = None
if cls:
yield cls(self, "content")
else:
yield RawBytes(self, "content", size)
class IPTC(FieldSet):
def createFields(self):
while 5 <= (self._size - self.current_size)/8:
yield IPTC_Chunk(self, "chunk[]")
size = (self._size - self.current_size) / 8
if 0 < size:
yield NullBytes(self, "padding", size)
| 4,299 | Python | .py | 101 | 35.079208 | 89 | 0.603918 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,508 | wmf.py | midgetspy_Sick-Beard/lib/hachoir_parser/image/wmf.py | """
Hachoir parser of Microsoft Windows Metafile (WMF) file format.
Documentation:
- Microsoft Windows Metafile; also known as: WMF,
Enhanced Metafile, EMF, APM
http://wvware.sourceforge.net/caolan/ora-wmf.html
- libwmf source code:
- include/libwmf/defs.h: enums
- src/player/meta.h: arguments parsers
- libemf source code
Author: Victor Stinner
Creation date: 26 december 2006
"""
MAX_FILESIZE = 50 * 1024 * 1024
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, StaticFieldSet, Enum,
MissingField, ParserError,
UInt32, Int32, UInt16, Int16, UInt8, NullBytes, RawBytes, String)
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.text_handler import textHandler, hexadecimal
from lib.hachoir_core.tools import createDict
from lib.hachoir_parser.image.common import RGBA
POLYFILL_MODE = {1: "Alternate", 2: "Winding"}
BRUSH_STYLE = {
0: u"Solid",
1: u"Null",
2: u"Hollow",
3: u"Pattern",
4: u"Indexed",
5: u"DIB pattern",
6: u"DIB pattern point",
7: u"Pattern 8x8",
8: u"DIB pattern 8x8",
}
HATCH_STYLE = {
0: u"Horizontal", # -----
1: u"Vertical", # |||||
2: u"FDIAGONAL", # \\\\\
3: u"BDIAGONAL", # /////
4: u"Cross", # +++++
5: u"Diagonal cross", # xxxxx
}
PEN_STYLE = {
0: u"Solid",
1: u"Dash", # -------
2: u"Dot", # .......
3: u"Dash dot", # _._._._
4: u"Dash dot dot", # _.._.._
5: u"Null",
6: u"Inside frame",
7: u"User style",
8: u"Alternate",
}
# Binary raster operations
ROP2_DESC = {
1: u"Black (0)",
2: u"Not merge pen (DPon)",
3: u"Mask not pen (DPna)",
4: u"Not copy pen (PN)",
5: u"Mask pen not (PDna)",
6: u"Not (Dn)",
7: u"Xor pen (DPx)",
8: u"Not mask pen (DPan)",
9: u"Mask pen (DPa)",
10: u"Not xor pen (DPxn)",
11: u"No operation (D)",
12: u"Merge not pen (DPno)",
13: u"Copy pen (P)",
14: u"Merge pen not (PDno)",
15: u"Merge pen (DPo)",
16: u"White (1)",
}
def parseXY(parser):
yield Int16(parser, "x")
yield Int16(parser, "y")
def parseCreateBrushIndirect(parser):
yield Enum(UInt16(parser, "brush_style"), BRUSH_STYLE)
yield RGBA(parser, "color")
yield Enum(UInt16(parser, "brush_hatch"), HATCH_STYLE)
def parsePenIndirect(parser):
yield Enum(UInt16(parser, "pen_style"), PEN_STYLE)
yield UInt16(parser, "pen_width")
yield UInt16(parser, "pen_height")
yield RGBA(parser, "color")
def parsePolyFillMode(parser):
yield Enum(UInt16(parser, "operation"), POLYFILL_MODE)
def parseROP2(parser):
yield Enum(UInt16(parser, "operation"), ROP2_DESC)
def parseObjectID(parser):
yield UInt16(parser, "object_id")
class Point(FieldSet):
static_size = 32
def createFields(self):
yield Int16(self, "x")
yield Int16(self, "y")
def createDescription(self):
return "Point (%s, %s)" % (self["x"].value, self["y"].value)
def parsePolygon(parser):
yield UInt16(parser, "count")
for index in xrange(parser["count"].value):
yield Point(parser, "point[]")
META = {
0x0000: ("EOF", u"End of file", None),
0x001E: ("SAVEDC", u"Save device context", None),
0x0035: ("REALIZEPALETTE", u"Realize palette", None),
0x0037: ("SETPALENTRIES", u"Set palette entries", None),
0x00f7: ("CREATEPALETTE", u"Create palette", None),
0x0102: ("SETBKMODE", u"Set background mode", None),
0x0103: ("SETMAPMODE", u"Set mapping mode", None),
0x0104: ("SETROP2", u"Set foreground mix mode", parseROP2),
0x0106: ("SETPOLYFILLMODE", u"Set polygon fill mode", parsePolyFillMode),
0x0107: ("SETSTRETCHBLTMODE", u"Set bitmap streching mode", None),
0x0108: ("SETTEXTCHAREXTRA", u"Set text character extra", None),
0x0127: ("RESTOREDC", u"Restore device context", None),
0x012A: ("INVERTREGION", u"Invert region", None),
0x012B: ("PAINTREGION", u"Paint region", None),
0x012C: ("SELECTCLIPREGION", u"Select clipping region", None),
0x012D: ("SELECTOBJECT", u"Select object", parseObjectID),
0x012E: ("SETTEXTALIGN", u"Set text alignment", None),
0x0142: ("CREATEDIBPATTERNBRUSH", u"Create DIB brush with specified pattern", None),
0x01f0: ("DELETEOBJECT", u"Delete object", parseObjectID),
0x0201: ("SETBKCOLOR", u"Set background color", None),
0x0209: ("SETTEXTCOLOR", u"Set text color", None),
0x020A: ("SETTEXTJUSTIFICATION", u"Set text justification", None),
0x020B: ("SETWINDOWORG", u"Set window origin", parseXY),
0x020C: ("SETWINDOWEXT", u"Set window extends", parseXY),
0x020D: ("SETVIEWPORTORG", u"Set view port origin", None),
0x020E: ("SETVIEWPORTEXT", u"Set view port extends", None),
0x020F: ("OFFSETWINDOWORG", u"Offset window origin", None),
0x0211: ("OFFSETVIEWPORTORG", u"Offset view port origin", None),
0x0213: ("LINETO", u"Draw a line to", None),
0x0214: ("MOVETO", u"Move to", None),
0x0220: ("OFFSETCLIPRGN", u"Offset clipping rectangle", None),
0x0228: ("FILLREGION", u"Fill region", None),
0x0231: ("SETMAPPERFLAGS", u"Set mapper flags", None),
0x0234: ("SELECTPALETTE", u"Select palette", None),
0x02FB: ("CREATEFONTINDIRECT", u"Create font indirect", None),
0x02FA: ("CREATEPENINDIRECT", u"Create pen indirect", parsePenIndirect),
0x02FC: ("CREATEBRUSHINDIRECT", u"Create brush indirect", parseCreateBrushIndirect),
0x0324: ("POLYGON", u"Draw a polygon", parsePolygon),
0x0325: ("POLYLINE", u"Draw a polyline", None),
0x0410: ("SCALEWINDOWEXT", u"Scale window extends", None),
0x0412: ("SCALEVIEWPORTEXT", u"Scale view port extends", None),
0x0415: ("EXCLUDECLIPRECT", u"Exclude clipping rectangle", None),
0x0416: ("INTERSECTCLIPRECT", u"Intersect clipping rectangle", None),
0x0418: ("ELLIPSE", u"Draw an ellipse", None),
0x0419: ("FLOODFILL", u"Flood fill", None),
0x041B: ("RECTANGLE", u"Draw a rectangle", None),
0x041F: ("SETPIXEL", u"Set pixel", None),
0x0429: ("FRAMEREGION", u"Fram region", None),
0x0521: ("TEXTOUT", u"Draw text", None),
0x0538: ("POLYPOLYGON", u"Draw multiple polygons", None),
0x0548: ("EXTFLOODFILL", u"Extend flood fill", None),
0x061C: ("ROUNDRECT", u"Draw a rounded rectangle", None),
0x061D: ("PATBLT", u"Pattern blitting", None),
0x0626: ("ESCAPE", u"Escape", None),
0x06FF: ("CREATEREGION", u"Create region", None),
0x0817: ("ARC", u"Draw an arc", None),
0x081A: ("PIE", u"Draw a pie", None),
0x0830: ("CHORD", u"Draw a chord", None),
0x0940: ("DIBBITBLT", u"DIB bit blitting", None),
0x0a32: ("EXTTEXTOUT", u"Draw text (extra)", None),
0x0b41: ("DIBSTRETCHBLT", u"DIB stretch blitting", None),
0x0d33: ("SETDIBTODEV", u"Set DIB to device", None),
0x0f43: ("STRETCHDIB", u"Stretch DIB", None),
}
META_NAME = createDict(META, 0)
META_DESC = createDict(META, 1)
#----------------------------------------------------------------------------
# EMF constants
# EMF mapping modes
EMF_MAPPING_MODE = {
1: "TEXT",
2: "LOMETRIC",
3: "HIMETRIC",
4: "LOENGLISH",
5: "HIENGLISH",
6: "TWIPS",
7: "ISOTROPIC",
8: "ANISOTROPIC",
}
#----------------------------------------------------------------------------
# EMF parser
def parseEmfMappingMode(parser):
yield Enum(Int32(parser, "mapping_mode"), EMF_MAPPING_MODE)
def parseXY32(parser):
yield Int32(parser, "x")
yield Int32(parser, "y")
def parseObjectID32(parser):
yield textHandler(UInt32(parser, "object_id"), hexadecimal)
def parseBrushIndirect(parser):
yield UInt32(parser, "ihBrush")
yield UInt32(parser, "style")
yield RGBA(parser, "color")
yield Int32(parser, "hatch")
class Point16(FieldSet):
static_size = 32
def createFields(self):
yield Int16(self, "x")
yield Int16(self, "y")
def createDescription(self):
return "Point16: (%i,%i)" % (self["x"].value, self["y"].value)
def parsePoint16array(parser):
yield RECT32(parser, "bounds")
yield UInt32(parser, "count")
for index in xrange(parser["count"].value):
yield Point16(parser, "point[]")
def parseGDIComment(parser):
yield UInt32(parser, "data_size")
size = parser["data_size"].value
if size:
yield RawBytes(parser, "data", size)
def parseICMMode(parser):
yield UInt32(parser, "icm_mode")
def parseExtCreatePen(parser):
yield UInt32(parser, "ihPen")
yield UInt32(parser, "offBmi")
yield UInt32(parser, "cbBmi")
yield UInt32(parser, "offBits")
yield UInt32(parser, "cbBits")
yield UInt32(parser, "pen_style")
yield UInt32(parser, "width")
yield UInt32(parser, "brush_style")
yield RGBA(parser, "color")
yield UInt32(parser, "hatch")
yield UInt32(parser, "nb_style")
for index in xrange(parser["nb_style"].value):
yield UInt32(parser, "style")
EMF_META = {
1: ("HEADER", u"Header", None),
2: ("POLYBEZIER", u"Draw poly bezier", None),
3: ("POLYGON", u"Draw polygon", None),
4: ("POLYLINE", u"Draw polyline", None),
5: ("POLYBEZIERTO", u"Draw poly bezier to", None),
6: ("POLYLINETO", u"Draw poly line to", None),
7: ("POLYPOLYLINE", u"Draw poly polyline", None),
8: ("POLYPOLYGON", u"Draw poly polygon", None),
9: ("SETWINDOWEXTEX", u"Set window extend EX", parseXY32),
10: ("SETWINDOWORGEX", u"Set window origin EX", parseXY32),
11: ("SETVIEWPORTEXTEX", u"Set viewport extend EX", parseXY32),
12: ("SETVIEWPORTORGEX", u"Set viewport origin EX", parseXY32),
13: ("SETBRUSHORGEX", u"Set brush org EX", None),
14: ("EOF", u"End of file", None),
15: ("SETPIXELV", u"Set pixel V", None),
16: ("SETMAPPERFLAGS", u"Set mapper flags", None),
17: ("SETMAPMODE", u"Set mapping mode", parseEmfMappingMode),
18: ("SETBKMODE", u"Set background mode", None),
19: ("SETPOLYFILLMODE", u"Set polyfill mode", None),
20: ("SETROP2", u"Set ROP2", None),
21: ("SETSTRETCHBLTMODE", u"Set stretching blitting mode", None),
22: ("SETTEXTALIGN", u"Set text align", None),
23: ("SETCOLORADJUSTMENT", u"Set color adjustment", None),
24: ("SETTEXTCOLOR", u"Set text color", None),
25: ("SETBKCOLOR", u"Set background color", None),
26: ("OFFSETCLIPRGN", u"Offset clipping region", None),
27: ("MOVETOEX", u"Move to EX", parseXY32),
28: ("SETMETARGN", u"Set meta region", None),
29: ("EXCLUDECLIPRECT", u"Exclude clipping rectangle", None),
30: ("INTERSECTCLIPRECT", u"Intersect clipping rectangle", None),
31: ("SCALEVIEWPORTEXTEX", u"Scale viewport extend EX", None),
32: ("SCALEWINDOWEXTEX", u"Scale window extend EX", None),
33: ("SAVEDC", u"Save device context", None),
34: ("RESTOREDC", u"Restore device context", None),
35: ("SETWORLDTRANSFORM", u"Set world transform", None),
36: ("MODIFYWORLDTRANSFORM", u"Modify world transform", None),
37: ("SELECTOBJECT", u"Select object", parseObjectID32),
38: ("CREATEPEN", u"Create pen", None),
39: ("CREATEBRUSHINDIRECT", u"Create brush indirect", parseBrushIndirect),
40: ("DELETEOBJECT", u"Delete object", parseObjectID32),
41: ("ANGLEARC", u"Draw angle arc", None),
42: ("ELLIPSE", u"Draw ellipse", None),
43: ("RECTANGLE", u"Draw rectangle", None),
44: ("ROUNDRECT", u"Draw rounded rectangle", None),
45: ("ARC", u"Draw arc", None),
46: ("CHORD", u"Draw chord", None),
47: ("PIE", u"Draw pie", None),
48: ("SELECTPALETTE", u"Select palette", None),
49: ("CREATEPALETTE", u"Create palette", None),
50: ("SETPALETTEENTRIES", u"Set palette entries", None),
51: ("RESIZEPALETTE", u"Resize palette", None),
52: ("REALIZEPALETTE", u"Realize palette", None),
53: ("EXTFLOODFILL", u"EXT flood fill", None),
54: ("LINETO", u"Draw line to", parseXY32),
55: ("ARCTO", u"Draw arc to", None),
56: ("POLYDRAW", u"Draw poly draw", None),
57: ("SETARCDIRECTION", u"Set arc direction", None),
58: ("SETMITERLIMIT", u"Set miter limit", None),
59: ("BEGINPATH", u"Begin path", None),
60: ("ENDPATH", u"End path", None),
61: ("CLOSEFIGURE", u"Close figure", None),
62: ("FILLPATH", u"Fill path", None),
63: ("STROKEANDFILLPATH", u"Stroke and fill path", None),
64: ("STROKEPATH", u"Stroke path", None),
65: ("FLATTENPATH", u"Flatten path", None),
66: ("WIDENPATH", u"Widen path", None),
67: ("SELECTCLIPPATH", u"Select clipping path", None),
68: ("ABORTPATH", u"Arbort path", None),
70: ("GDICOMMENT", u"GDI comment", parseGDIComment),
71: ("FILLRGN", u"Fill region", None),
72: ("FRAMERGN", u"Frame region", None),
73: ("INVERTRGN", u"Invert region", None),
74: ("PAINTRGN", u"Paint region", None),
75: ("EXTSELECTCLIPRGN", u"EXT select clipping region", None),
76: ("BITBLT", u"Bit blitting", None),
77: ("STRETCHBLT", u"Stretch blitting", None),
78: ("MASKBLT", u"Mask blitting", None),
79: ("PLGBLT", u"PLG blitting", None),
80: ("SETDIBITSTODEVICE", u"Set DIB bits to device", None),
81: ("STRETCHDIBITS", u"Stretch DIB bits", None),
82: ("EXTCREATEFONTINDIRECTW", u"EXT create font indirect W", None),
83: ("EXTTEXTOUTA", u"EXT text out A", None),
84: ("EXTTEXTOUTW", u"EXT text out W", None),
85: ("POLYBEZIER16", u"Draw poly bezier (16-bit)", None),
86: ("POLYGON16", u"Draw polygon (16-bit)", parsePoint16array),
87: ("POLYLINE16", u"Draw polyline (16-bit)", parsePoint16array),
88: ("POLYBEZIERTO16", u"Draw poly bezier to (16-bit)", parsePoint16array),
89: ("POLYLINETO16", u"Draw polyline to (16-bit)", parsePoint16array),
90: ("POLYPOLYLINE16", u"Draw poly polyline (16-bit)", None),
91: ("POLYPOLYGON16", u"Draw poly polygon (16-bit)", parsePoint16array),
92: ("POLYDRAW16", u"Draw poly draw (16-bit)", None),
93: ("CREATEMONOBRUSH", u"Create monobrush", None),
94: ("CREATEDIBPATTERNBRUSHPT", u"Create DIB pattern brush PT", None),
95: ("EXTCREATEPEN", u"EXT create pen", parseExtCreatePen),
96: ("POLYTEXTOUTA", u"Poly text out A", None),
97: ("POLYTEXTOUTW", u"Poly text out W", None),
98: ("SETICMMODE", u"Set ICM mode", parseICMMode),
99: ("CREATECOLORSPACE", u"Create color space", None),
100: ("SETCOLORSPACE", u"Set color space", None),
101: ("DELETECOLORSPACE", u"Delete color space", None),
102: ("GLSRECORD", u"GLS record", None),
103: ("GLSBOUNDEDRECORD", u"GLS bound ED record", None),
104: ("PIXELFORMAT", u"Pixel format", None),
}
EMF_META_NAME = createDict(EMF_META, 0)
EMF_META_DESC = createDict(EMF_META, 1)
class Function(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
if self.root.isEMF():
self._size = self["size"].value * 8
else:
self._size = self["size"].value * 16
def createFields(self):
if self.root.isEMF():
yield Enum(UInt32(self, "function"), EMF_META_NAME)
yield UInt32(self, "size")
try:
parser = EMF_META[self["function"].value][2]
except KeyError:
parser = None
else:
yield UInt32(self, "size")
yield Enum(UInt16(self, "function"), META_NAME)
try:
parser = META[self["function"].value][2]
except KeyError:
parser = None
if parser:
for field in parser(self):
yield field
else:
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "data", size)
def isValid(self):
func = self["function"]
return func.value in func.getEnum()
def createDescription(self):
if self.root.isEMF():
return EMF_META_DESC[self["function"].value]
try:
return META_DESC[self["function"].value]
except KeyError:
return "Function %s" % self["function"].display
class RECT16(StaticFieldSet):
format = (
(Int16, "left"),
(Int16, "top"),
(Int16, "right"),
(Int16, "bottom"),
)
def createDescription(self):
return "%s: %ux%u at (%u,%u)" % (
self.__class__.__name__,
self["right"].value-self["left"].value,
self["bottom"].value-self["top"].value,
self["left"].value,
self["top"].value)
class RECT32(RECT16):
format = (
(Int32, "left"),
(Int32, "top"),
(Int32, "right"),
(Int32, "bottom"),
)
class PlaceableHeader(FieldSet):
"""
Header of Placeable Metafile (file extension .APM),
created by Aldus Corporation
"""
MAGIC = "\xD7\xCD\xC6\x9A\0\0" # (magic, handle=0x0000)
def createFields(self):
yield textHandler(UInt32(self, "signature", "Placeable Metafiles signature (0x9AC6CDD7)"), hexadecimal)
yield UInt16(self, "handle")
yield RECT16(self, "rect")
yield UInt16(self, "inch")
yield NullBytes(self, "reserved", 4)
yield textHandler(UInt16(self, "checksum"), hexadecimal)
class EMF_Header(FieldSet):
MAGIC = "\x20\x45\x4D\x46\0\0" # (magic, min_ver=0x0000)
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["size"].value * 8
def createFields(self):
LONG = Int32
yield UInt32(self, "type", "Record type (always 1)")
yield UInt32(self, "size", "Size of the header in bytes")
yield RECT32(self, "Bounds", "Inclusive bounds")
yield RECT32(self, "Frame", "Inclusive picture frame")
yield textHandler(UInt32(self, "signature", "Signature ID (always 0x464D4520)"), hexadecimal)
yield UInt16(self, "min_ver", "Minor version")
yield UInt16(self, "maj_ver", "Major version")
yield UInt32(self, "file_size", "Size of the file in bytes")
yield UInt32(self, "NumOfRecords", "Number of records in the metafile")
yield UInt16(self, "NumOfHandles", "Number of handles in the handle table")
yield NullBytes(self, "reserved", 2)
yield UInt32(self, "desc_size", "Size of description in 16-bit words")
yield UInt32(self, "desc_ofst", "Offset of description string in metafile")
yield UInt32(self, "nb_colors", "Number of color palette entries")
yield LONG(self, "width_px", "Width of reference device in pixels")
yield LONG(self, "height_px", "Height of reference device in pixels")
yield LONG(self, "width_mm", "Width of reference device in millimeters")
yield LONG(self, "height_mm", "Height of reference device in millimeters")
# Read description (if any)
offset = self["desc_ofst"].value
current = (self.absolute_address + self.current_size) // 8
size = self["desc_size"].value * 2
if offset == current and size:
yield String(self, "description", size, charset="UTF-16-LE", strip="\0 ")
# Read padding (if any)
size = self["size"].value - self.current_size//8
if size:
yield RawBytes(self, "padding", size)
class WMF_File(Parser):
PARSER_TAGS = {
"id": "wmf",
"category": "image",
"file_ext": ("wmf", "apm", "emf"),
"mime": (
u"image/wmf", u"image/x-wmf", u"image/x-win-metafile",
u"application/x-msmetafile", u"application/wmf", u"application/x-wmf",
u"image/x-emf"),
"magic": (
(PlaceableHeader.MAGIC, 0),
(EMF_Header.MAGIC, 40*8),
# WMF: file_type=memory, header size=9, version=3.0
("\0\0\x09\0\0\3", 0),
# WMF: file_type=disk, header size=9, version=3.0
("\1\0\x09\0\0\3", 0),
),
"min_size": 40*8,
"description": u"Microsoft Windows Metafile (WMF)",
}
endian = LITTLE_ENDIAN
FILE_TYPE = {0: "memory", 1: "disk"}
def validate(self):
if self.isEMF():
# Check EMF header
emf = self["emf_header"]
if emf["signature"].value != 0x464D4520:
return "Invalid signature"
if emf["type"].value != 1:
return "Invalid record type"
if emf["reserved"].value != "\0\0":
return "Invalid reserved"
else:
# Check AMF header
if self.isAPM():
amf = self["amf_header"]
if amf["handle"].value != 0:
return "Invalid handle"
if amf["reserved"].value != "\0\0\0\0":
return "Invalid reserved"
# Check common header
if self["file_type"].value not in (0, 1):
return "Invalid file type"
if self["header_size"].value != 9:
return "Invalid header size"
if self["nb_params"].value != 0:
return "Invalid number of parameters"
# Check first functions
for index in xrange(5):
try:
func = self["func[%u]" % index]
except MissingField:
if self.done:
return True
return "Unable to get function #%u" % index
except ParserError:
return "Unable to create function #%u" % index
# Check first frame values
if not func.isValid():
return "Function #%u is invalid" % index
return True
def createFields(self):
if self.isEMF():
yield EMF_Header(self, "emf_header")
else:
if self.isAPM():
yield PlaceableHeader(self, "amf_header")
yield Enum(UInt16(self, "file_type"), self.FILE_TYPE)
yield UInt16(self, "header_size", "Size of header in 16-bit words (always 9)")
yield UInt8(self, "win_ver_min", "Minor version of Microsoft Windows")
yield UInt8(self, "win_ver_maj", "Major version of Microsoft Windows")
yield UInt32(self, "file_size", "Total size of the metafile in 16-bit words")
yield UInt16(self, "nb_obj", "Number of objects in the file")
yield UInt32(self, "max_record_size", "The size of largest record in 16-bit words")
yield UInt16(self, "nb_params", "Not Used (always 0)")
while not(self.eof):
yield Function(self, "func[]")
def isEMF(self):
"""File is in EMF format?"""
if 1 <= self.current_length:
return self[0].name == "emf_header"
if self.size < 44*8:
return False
magic = EMF_Header.MAGIC
return self.stream.readBytes(40*8, len(magic)) == magic
def isAPM(self):
"""File is in Aldus Placeable Metafiles format?"""
if 1 <= self.current_length:
return self[0].name == "amf_header"
else:
magic = PlaceableHeader.MAGIC
return (self.stream.readBytes(0, len(magic)) == magic)
def createDescription(self):
if self.isEMF():
return u"Microsoft Enhanced Metafile (EMF) picture"
elif self.isAPM():
return u"Aldus Placeable Metafile (APM) picture"
else:
return u"Microsoft Windows Metafile (WMF) picture"
def createMimeType(self):
if self.isEMF():
return u"image/x-emf"
else:
return u"image/wmf"
def createContentSize(self):
if self.isEMF():
return None
start = self["func[0]"].absolute_address
end = self.stream.searchBytes("\3\0\0\0\0\0", start, MAX_FILESIZE * 8)
if end is not None:
return end + 6*8
return None
| 23,796 | Python | .py | 555 | 35.92973 | 111 | 0.610998 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,509 | tiff.py | midgetspy_Sick-Beard/lib/hachoir_parser/image/tiff.py | """
TIFF image parser.
Authors: Victor Stinner and Sebastien Ponce
Creation date: 30 september 2006
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, SeekableFieldSet, ParserError, RootSeekableFieldSet,
UInt16, UInt32, Bytes, String)
from lib.hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
from lib.hachoir_parser.image.exif import BasicIFDEntry
from lib.hachoir_core.tools import createDict
MAX_COUNT = 250
class IFDEntry(BasicIFDEntry):
static_size = 12*8
TAG_INFO = {
254: ("new_subfile_type", "New subfile type"),
255: ("subfile_type", "Subfile type"),
256: ("img_width", "Image width in pixels"),
257: ("img_height", "Image height in pixels"),
258: ("bits_per_sample", "Bits per sample"),
259: ("compression", "Compression method"),
262: ("photo_interpret", "Photometric interpretation"),
263: ("thres", "Thresholding"),
264: ("cell_width", "Cellule width"),
265: ("cell_height", "Cellule height"),
266: ("fill_order", "Fill order"),
269: ("doc_name", "Document name"),
270: ("description", "Image description"),
271: ("make", "Make"),
272: ("model", "Model"),
273: ("strip_ofs", "Strip offsets"),
274: ("orientation", "Orientation"),
277: ("sample_pixel", "Samples per pixel"),
278: ("row_per_strip", "Rows per strip"),
279: ("strip_byte", "Strip byte counts"),
280: ("min_sample_value", "Min sample value"),
281: ("max_sample_value", "Max sample value"),
282: ("xres", "X resolution"),
283: ("yres", "Y resolution"),
284: ("planar_conf", "Planar configuration"),
285: ("page_name", "Page name"),
286: ("xpos", "X position"),
287: ("ypos", "Y position"),
288: ("free_ofs", "Free offsets"),
289: ("free_byte", "Free byte counts"),
290: ("gray_resp_unit", "Gray response unit"),
291: ("gray_resp_curve", "Gray response curve"),
292: ("group3_opt", "Group 3 options"),
293: ("group4_opt", "Group 4 options"),
296: ("res_unit", "Resolution unit"),
297: ("page_nb", "Page number"),
301: ("color_respt_curve", "Color response curves"),
305: ("software", "Software"),
306: ("date_time", "Date time"),
315: ("artist", "Artist"),
316: ("host_computer", "Host computer"),
317: ("predicator", "Predicator"),
318: ("white_pt", "White point"),
319: ("prim_chomat", "Primary chromaticities"),
320: ("color_map", "Color map"),
321: ("half_tone_hints", "Halftone Hints"),
322: ("tile_width", "TileWidth"),
323: ("tile_length", "TileLength"),
324: ("tile_offsets", "TileOffsets"),
325: ("tile_byte_counts", "TileByteCounts"),
332: ("ink_set", "InkSet"),
333: ("ink_names", "InkNames"),
334: ("number_of_inks", "NumberOfInks"),
336: ("dot_range", "DotRange"),
337: ("target_printer", "TargetPrinter"),
338: ("extra_samples", "ExtraSamples"),
339: ("sample_format", "SampleFormat"),
340: ("smin_sample_value", "SMinSampleValue"),
341: ("smax_sample_value", "SMaxSampleValue"),
342: ("transfer_range", "TransferRange"),
512: ("jpeg_proc", "JPEGProc"),
513: ("jpeg_interchange_format", "JPEGInterchangeFormat"),
514: ("jpeg_interchange_format_length", "JPEGInterchangeFormatLength"),
515: ("jpeg_restart_interval", "JPEGRestartInterval"),
517: ("jpeg_lossless_predictors", "JPEGLosslessPredictors"),
518: ("jpeg_point_transforms", "JPEGPointTransforms"),
519: ("jpeg_qtables", "JPEGQTables"),
520: ("jpeg_dctables", "JPEGDCTables"),
521: ("jpeg_actables", "JPEGACTables"),
529: ("ycbcr_coefficients", "YCbCrCoefficients"),
530: ("ycbcr_subsampling", "YCbCrSubSampling"),
531: ("ycbcr_positioning", "YCbCrPositioning"),
532: ("reference_blackwhite", "ReferenceBlackWhite"),
33432: ("copyright", "Copyright"),
0x8769: ("ifd_pointer", "Pointer to next IFD entry"),
}
TAG_NAME = createDict(TAG_INFO, 0)
def __init__(self, *args):
FieldSet.__init__(self, *args)
tag = self["tag"].value
if tag in self.TAG_INFO:
self._name, self._description = self.TAG_INFO[tag]
else:
self._parser = None
class IFD(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = 16 + self["count"].value * IFDEntry.static_size
self._has_offset = False
def createFields(self):
yield UInt16(self, "count")
if MAX_COUNT < self["count"].value:
raise ParserError("TIFF IFD: Invalid count (%s)"
% self["count"].value)
for index in xrange(self["count"].value):
yield IFDEntry(self, "entry[]")
class ImageFile(SeekableFieldSet):
def __init__(self, parent, name, description, ifd):
SeekableFieldSet.__init__(self, parent, name, description, None)
self._has_offset = False
self._ifd = ifd
def createFields(self):
datas = {}
for entry in self._ifd:
if type(entry) != IFDEntry:
continue
for c in entry:
if c.name != "offset":
continue
self.seekByte(c.value, False)
desc = "data of ifd entry " + entry.name,
entryType = BasicIFDEntry.ENTRY_FORMAT[entry["type"].value]
count = entry["count"].value
if entryType == String:
yield String(self, entry.name, count, desc, "\0", "ISO-8859-1")
else:
d = Data(self, entry.name, desc, entryType, count)
datas[d.name] = d
yield d
break
# image data
if "strip_ofs" in datas and "strip_byte" in datas:
for i in xrange(datas["strip_byte"]._count):
self.seekByte(datas["strip_ofs"]["value["+str(i)+"]"].value, False)
yield Bytes(self, "strip[]", datas["strip_byte"]["value["+str(i)+"]"].value)
class Data(FieldSet):
def __init__(self, parent, name, desc, type, count):
size = type.static_size * count
FieldSet.__init__(self, parent, name, desc, size)
self._count = count
self._type = type
def createFields(self):
for i in xrange(self._count):
yield self._type(self, "value[]")
class TiffFile(RootSeekableFieldSet, Parser):
PARSER_TAGS = {
"id": "tiff",
"category": "image",
"file_ext": ("tif", "tiff"),
"mime": (u"image/tiff",),
"min_size": 8*8,
# TODO: Re-enable magic
"magic": (("II\x2A\0", 0), ("MM\0\x2A", 0)),
"description": "TIFF picture"
}
# Correct endian is set in constructor
endian = LITTLE_ENDIAN
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
if self.stream.readBytes(0, 2) == "MM":
self.endian = BIG_ENDIAN
Parser.__init__(self, stream, **args)
def validate(self):
endian = self.stream.readBytes(0, 2)
if endian not in ("MM", "II"):
return "Invalid endian (%r)" % endian
if self["version"].value != 42:
return "Unknown TIFF version"
return True
def createFields(self):
yield String(self, "endian", 2, 'Endian ("II" or "MM")', charset="ASCII")
yield UInt16(self, "version", "TIFF version number")
offset = UInt32(self, "img_dir_ofs[]", "Next image directory offset (in bytes from the beginning)")
yield offset
ifds = []
while True:
if offset.value == 0:
break
self.seekByte(offset.value, relative=False)
ifd = IFD(self, "ifd[]", "Image File Directory", None)
ifds.append(ifd)
yield ifd
offset = UInt32(self, "img_dir_ofs[]", "Next image directory offset (in bytes from the beginning)")
yield offset
for ifd in ifds:
image = ImageFile(self, "image[]", "Image File", ifd)
yield image
| 8,407 | Python | .py | 192 | 34.5 | 111 | 0.56979 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,510 | bmp.py | midgetspy_Sick-Beard/lib/hachoir_parser/image/bmp.py | """
Microsoft Bitmap picture parser.
- file extension: ".bmp"
Author: Victor Stinner
Creation: 16 december 2005
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet,
UInt8, UInt16, UInt32, Bits,
String, RawBytes, Enum,
PaddingBytes, NullBytes, createPaddingField)
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.text_handler import textHandler, hexadecimal
from lib.hachoir_parser.image.common import RGB, PaletteRGBA
from lib.hachoir_core.tools import alignValue
class Pixel4bit(Bits):
static_size = 4
def __init__(self, parent, name):
Bits.__init__(self, parent, name, 4)
class ImageLine(FieldSet):
def __init__(self, parent, name, width, pixel_class):
FieldSet.__init__(self, parent, name)
self._pixel = pixel_class
self._width = width
self._size = alignValue(self._width * self._pixel.static_size, 32)
def createFields(self):
for x in xrange(self._width):
yield self._pixel(self, "pixel[]")
size = self.size - self.current_size
if size:
yield createPaddingField(self, size)
class ImagePixels(FieldSet):
def __init__(self, parent, name, width, height, pixel_class, size=None):
FieldSet.__init__(self, parent, name, size=size)
self._width = width
self._height = height
self._pixel = pixel_class
def createFields(self):
for y in xrange(self._height-1, -1, -1):
yield ImageLine(self, "line[%u]" % y, self._width, self._pixel)
size = (self.size - self.current_size) // 8
if size:
yield NullBytes(self, "padding", size)
class CIEXYZ(FieldSet):
def createFields(self):
yield UInt32(self, "x")
yield UInt32(self, "y")
yield UInt32(self, "z")
class BmpHeader(FieldSet):
color_space_name = {
1: "Business (Saturation)",
2: "Graphics (Relative)",
4: "Images (Perceptual)",
8: "Absolute colormetric (Absolute)",
}
def getFormatVersion(self):
if "gamma_blue" in self:
return 4
if "important_color" in self:
return 3
return 2
def createFields(self):
# Version 2 (12 bytes)
yield UInt32(self, "header_size", "Header size")
yield UInt32(self, "width", "Width (pixels)")
yield UInt32(self, "height", "Height (pixels)")
yield UInt16(self, "nb_plan", "Number of plan (=1)")
yield UInt16(self, "bpp", "Bits per pixel") # may be zero for PNG/JPEG picture
# Version 3 (40 bytes)
if self["header_size"].value < 40:
return
yield Enum(UInt32(self, "compression", "Compression method"), BmpFile.COMPRESSION_NAME)
yield UInt32(self, "image_size", "Image size (bytes)")
yield UInt32(self, "horizontal_dpi", "Horizontal DPI")
yield UInt32(self, "vertical_dpi", "Vertical DPI")
yield UInt32(self, "used_colors", "Number of color used")
yield UInt32(self, "important_color", "Number of import colors")
# Version 4 (108 bytes)
if self["header_size"].value < 108:
return
yield textHandler(UInt32(self, "red_mask"), hexadecimal)
yield textHandler(UInt32(self, "green_mask"), hexadecimal)
yield textHandler(UInt32(self, "blue_mask"), hexadecimal)
yield textHandler(UInt32(self, "alpha_mask"), hexadecimal)
yield Enum(UInt32(self, "color_space"), self.color_space_name)
yield CIEXYZ(self, "red_primary")
yield CIEXYZ(self, "green_primary")
yield CIEXYZ(self, "blue_primary")
yield UInt32(self, "gamma_red")
yield UInt32(self, "gamma_green")
yield UInt32(self, "gamma_blue")
def parseImageData(parent, name, size, header):
if ("compression" not in header) or (header["compression"].value in (0, 3)):
width = header["width"].value
height = header["height"].value
bpp = header["bpp"].value
if bpp == 32:
cls = UInt32
elif bpp == 24:
cls = RGB
elif bpp == 8:
cls = UInt8
elif bpp == 4:
cls = Pixel4bit
else:
cls = None
if cls:
return ImagePixels(parent, name, width, height, cls, size=size*8)
return RawBytes(parent, name, size)
class BmpFile(Parser):
PARSER_TAGS = {
"id": "bmp",
"category": "image",
"file_ext": ("bmp",),
"mime": (u"image/x-ms-bmp", u"image/x-bmp"),
"min_size": 30*8,
# "magic": (("BM", 0),),
"magic_regex": ((
# "BM", <filesize>, <reserved>, header_size=(12|40|108)
"BM.{4}.{8}[\x0C\x28\x6C]\0{3}",
0),),
"description": "Microsoft bitmap (BMP) picture"
}
endian = LITTLE_ENDIAN
COMPRESSION_NAME = {
0: u"Uncompressed",
1: u"RLE 8-bit",
2: u"RLE 4-bit",
3: u"Bitfields",
4: u"JPEG",
5: u"PNG",
}
def validate(self):
if self.stream.readBytes(0, 2) != 'BM':
return "Wrong file signature"
if self["header/header_size"].value not in (12, 40, 108):
return "Unknown header size (%s)" % self["header_size"].value
if self["header/nb_plan"].value != 1:
return "Invalid number of planes"
return True
def createFields(self):
yield String(self, "signature", 2, "Header (\"BM\")", charset="ASCII")
yield UInt32(self, "file_size", "File size (bytes)")
yield PaddingBytes(self, "reserved", 4, "Reserved")
yield UInt32(self, "data_start", "Data start position")
yield BmpHeader(self, "header")
# Compute number of color
header = self["header"]
bpp = header["bpp"].value
if 0 < bpp <= 8:
if "used_colors" in header and header["used_colors"].value:
nb_color = header["used_colors"].value
else:
nb_color = (1 << bpp)
else:
nb_color = 0
# Color palette (if any)
if nb_color:
yield PaletteRGBA(self, "palette", nb_color)
# Seek to data start
field = self.seekByte(self["data_start"].value)
if field:
yield field
# Image pixels
size = min(self["file_size"].value-self["data_start"].value, (self.size - self.current_size)//8)
yield parseImageData(self, "pixels", size, header)
def createDescription(self):
return u"Microsoft Bitmap version %s" % self["header"].getFormatVersion()
def createContentSize(self):
return self["file_size"].value * 8
| 6,706 | Python | .py | 170 | 31.170588 | 104 | 0.595915 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,511 | xcf.py | midgetspy_Sick-Beard/lib/hachoir_parser/image/xcf.py | """
Gimp image parser (XCF file, ".xcf" extension).
You can find informations about XCF file in Gimp source code. URL to read
CVS online:
http://cvs.gnome.org/viewcvs/gimp/app/xcf/
\--> files xcf-read.c and xcf-load.c
Author: Victor Stinner
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (StaticFieldSet, FieldSet, ParserError,
UInt8, UInt32, Enum, Float32, String, PascalString32, RawBytes)
from lib.hachoir_parser.image.common import RGBA
from lib.hachoir_core.endian import NETWORK_ENDIAN
class XcfCompression(FieldSet):
static_size = 8
COMPRESSION_NAME = {
0: u"None",
1: u"RLE",
2: u"Zlib",
3: u"Fractal"
}
def createFields(self):
yield Enum(UInt8(self, "compression", "Compression method"), self.COMPRESSION_NAME)
class XcfResolution(StaticFieldSet):
format = (
(Float32, "xres", "X resolution in DPI"),
(Float32, "yres", "Y resolution in DPI")
)
class XcfTattoo(StaticFieldSet):
format = ((UInt32, "tattoo", "Tattoo"),)
class LayerOffsets(StaticFieldSet):
format = (
(UInt32, "ofst_x", "Offset X"),
(UInt32, "ofst_y", "Offset Y")
)
class LayerMode(FieldSet):
static_size = 32
MODE_NAME = {
0: u"Normal",
1: u"Dissolve",
2: u"Behind",
3: u"Multiply",
4: u"Screen",
5: u"Overlay",
6: u"Difference",
7: u"Addition",
8: u"Subtract",
9: u"Darken only",
10: u"Lighten only",
11: u"Hue",
12: u"Saturation",
13: u"Color",
14: u"Value",
15: u"Divide",
16: u"Dodge",
17: u"Burn",
18: u"Hard light",
19: u"Soft light",
20: u"Grain extract",
21: u"Grain merge",
22: u"Color erase"
}
def createFields(self):
yield Enum(UInt32(self, "mode", "Layer mode"), self.MODE_NAME)
class GimpBoolean(UInt32):
def __init__(self, parent, name):
UInt32.__init__(self, parent, name)
def createValue(self):
return 1 == UInt32.createValue(self)
class XcfUnit(StaticFieldSet):
format = ((UInt32, "unit", "Unit"),)
class XcfParasiteEntry(FieldSet):
def createFields(self):
yield PascalString32(self, "name", "Name", strip="\0", charset="UTF-8")
yield UInt32(self, "flags", "Flags")
yield PascalString32(self, "data", "Data", strip=" \0", charset="UTF-8")
class XcfLevel(FieldSet):
def createFields(self):
yield UInt32(self, "width", "Width in pixel")
yield UInt32(self, "height", "Height in pixel")
yield UInt32(self, "offset", "Offset")
offset = self["offset"].value
if offset == 0:
return
data_offsets = []
while (self.absolute_address + self.current_size)/8 < offset:
chunk = UInt32(self, "data_offset[]", "Data offset")
yield chunk
if chunk.value == 0:
break
data_offsets.append(chunk)
if (self.absolute_address + self.current_size)/8 != offset:
raise ParserError("Problem with level offset.")
previous = offset
for chunk in data_offsets:
data_offset = chunk.value
size = data_offset - previous
yield RawBytes(self, "data[]", size, "Data content of %s" % chunk.name)
previous = data_offset
class XcfHierarchy(FieldSet):
def createFields(self):
yield UInt32(self, "width", "Width")
yield UInt32(self, "height", "Height")
yield UInt32(self, "bpp", "Bits/pixel")
offsets = []
while True:
chunk = UInt32(self, "offset[]", "Level offset")
yield chunk
if chunk.value == 0:
break
offsets.append(chunk.value)
for offset in offsets:
padding = self.seekByte(offset, relative=False)
if padding is not None:
yield padding
yield XcfLevel(self, "level[]", "Level")
# yield XcfChannel(self, "channel[]", "Channel"))
class XcfChannel(FieldSet):
def createFields(self):
yield UInt32(self, "width", "Channel width")
yield UInt32(self, "height", "Channel height")
yield PascalString32(self, "name", "Channel name", strip="\0", charset="UTF-8")
for field in readProperties(self):
yield field
yield UInt32(self, "hierarchy_ofs", "Hierarchy offset")
yield XcfHierarchy(self, "hierarchy", "Hierarchy")
def createDescription(self):
return 'Channel "%s"' % self["name"].value
class XcfLayer(FieldSet):
def createFields(self):
yield UInt32(self, "width", "Layer width in pixels")
yield UInt32(self, "height", "Layer height in pixels")
yield Enum(UInt32(self, "type", "Layer type"), XcfFile.IMAGE_TYPE_NAME)
yield PascalString32(self, "name", "Layer name", strip="\0", charset="UTF-8")
for prop in readProperties(self):
yield prop
# --
# TODO: Hack for Gimp 1.2 files
# --
yield UInt32(self, "hierarchy_ofs", "Hierarchy offset")
yield UInt32(self, "mask_ofs", "Layer mask offset")
padding = self.seekByte(self["hierarchy_ofs"].value, relative=False)
if padding is not None:
yield padding
yield XcfHierarchy(self, "hierarchy", "Hierarchy")
# TODO: Read layer mask if needed: self["mask_ofs"].value != 0
def createDescription(self):
return 'Layer "%s"' % self["name"].value
class XcfParasites(FieldSet):
def createFields(self):
size = self["../size"].value * 8
while self.current_size < size:
yield XcfParasiteEntry(self, "parasite[]", "Parasite")
class XcfProperty(FieldSet):
PROP_COMPRESSION = 17
PROP_RESOLUTION = 19
PROP_PARASITES = 21
TYPE_NAME = {
0: u"End",
1: u"Colormap",
2: u"Active layer",
3: u"Active channel",
4: u"Selection",
5: u"Floating selection",
6: u"Opacity",
7: u"Mode",
8: u"Visible",
9: u"Linked",
10: u"Lock alpha",
11: u"Apply mask",
12: u"Edit mask",
13: u"Show mask",
14: u"Show masked",
15: u"Offsets",
16: u"Color",
17: u"Compression",
18: u"Guides",
19: u"Resolution",
20: u"Tattoo",
21: u"Parasites",
22: u"Unit",
23: u"Paths",
24: u"User unit",
25: u"Vectors",
26: u"Text layer flags",
}
handler = {
6: RGBA,
7: LayerMode,
8: GimpBoolean,
9: GimpBoolean,
10: GimpBoolean,
11: GimpBoolean,
12: GimpBoolean,
13: GimpBoolean,
15: LayerOffsets,
17: XcfCompression,
19: XcfResolution,
20: XcfTattoo,
21: XcfParasites,
22: XcfUnit
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = (8 + self["size"].value) * 8
def createFields(self):
yield Enum(UInt32(self, "type", "Property type"), self.TYPE_NAME)
yield UInt32(self, "size", "Property size")
size = self["size"].value
if 0 < size:
cls = self.handler.get(self["type"].value, None)
if cls:
yield cls(self, "data", size=size*8)
else:
yield RawBytes(self, "data", size, "Data")
def createDescription(self):
return "Property: %s" % self["type"].display
def readProperties(parser):
while True:
prop = XcfProperty(parser, "property[]")
yield prop
if prop["type"].value == 0:
return
class XcfFile(Parser):
PARSER_TAGS = {
"id": "xcf",
"category": "image",
"file_ext": ("xcf",),
"mime": (u"image/x-xcf", u"application/x-gimp-image"),
"min_size": (26 + 8 + 4 + 4)*8, # header+empty property+layer offset+channel offset
"magic": (
('gimp xcf file\0', 0),
('gimp xcf v002\0', 0),
),
"description": "Gimp (XCF) picture"
}
endian = NETWORK_ENDIAN
IMAGE_TYPE_NAME = {
0: u"RGB",
1: u"Gray",
2: u"Indexed"
}
def validate(self):
if self.stream.readBytes(0, 14) not in ('gimp xcf file\0', 'gimp xcf v002\0'):
return "Wrong signature"
return True
def createFields(self):
# Read signature
yield String(self, "signature", 14, "Gimp picture signature (ends with nul byte)", charset="ASCII")
# Read image general informations (width, height, type)
yield UInt32(self, "width", "Image width")
yield UInt32(self, "height", "Image height")
yield Enum(UInt32(self, "type", "Image type"), self.IMAGE_TYPE_NAME)
for prop in readProperties(self):
yield prop
# Read layer offsets
layer_offsets = []
while True:
chunk = UInt32(self, "layer_offset[]", "Layer offset")
yield chunk
if chunk.value == 0:
break
layer_offsets.append(chunk.value)
# Read channel offsets
channel_offsets = []
while True:
chunk = UInt32(self, "channel_offset[]", "Channel offset")
yield chunk
if chunk.value == 0:
break
channel_offsets.append(chunk.value)
# Read layers
for index, offset in enumerate(layer_offsets):
if index+1 < len(layer_offsets):
size = (layer_offsets[index+1] - offset) * 8
else:
size = None
padding = self.seekByte(offset, relative=False)
if padding:
yield padding
yield XcfLayer(self, "layer[]", size=size)
# Read channels
for index, offset in enumerate(channel_offsets):
if index+1 < len(channel_offsets):
size = (channel_offsets[index+1] - offset) * 8
else:
size = None
padding = self.seekByte(offset, relative=False)
if padding is not None:
yield padding
yield XcfChannel(self, "channel[]", "Channel", size=size)
| 10,369 | Python | .py | 291 | 26.780069 | 108 | 0.568938 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,512 | pcx.py | midgetspy_Sick-Beard/lib/hachoir_parser/image/pcx.py | """
PCX picture filter.
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (
UInt8, UInt16,
PaddingBytes, RawBytes,
Enum)
from lib.hachoir_parser.image.common import PaletteRGB
from lib.hachoir_core.endian import LITTLE_ENDIAN
class PcxFile(Parser):
endian = LITTLE_ENDIAN
PARSER_TAGS = {
"id": "pcx",
"category": "image",
"file_ext": ("pcx",),
"mime": (u"image/x-pcx",),
"min_size": 128*8,
"description": "PC Paintbrush (PCX) picture"
}
compression_name = { 1: "Run-length encoding (RLE)" }
version_name = {
0: u"Version 2.5 of PC Paintbrush",
2: u"Version 2.8 with palette information",
3: u"Version 2.8 without palette information",
4: u"PC Paintbrush for Windows",
5: u"Version 3.0 (or greater) of PC Paintbrush"
}
def validate(self):
if self["id"].value != 10:
return "Wrong signature"
if self["version"].value not in self.version_name:
return "Unknown format version"
if self["bpp"].value not in (1, 2, 4, 8, 24, 32):
return "Unknown bits/pixel"
if self["reserved[0]"].value != "\0":
return "Invalid reserved value"
return True
def createFields(self):
yield UInt8(self, "id", "PCX identifier (10)")
yield Enum(UInt8(self, "version", "PCX version"), self.version_name)
yield Enum(UInt8(self, "compression", "Compression method"), self.compression_name)
yield UInt8(self, "bpp", "Bits / pixel")
yield UInt16(self, "xmin", "Minimum X")
yield UInt16(self, "ymin", "Minimum Y")
yield UInt16(self, "xmax", "Width minus one") # value + 1
yield UInt16(self, "ymax", "Height minus one") # value + 1
yield UInt16(self, "horiz_dpi", "Horizontal DPI")
yield UInt16(self, "vert_dpi", "Vertical DPI")
yield PaletteRGB(self, "palette_4bits", 16, "Palette (4 bits)")
yield PaddingBytes(self, "reserved[]", 1)
yield UInt8(self, "nb_color_plan", "Number of color plans")
yield UInt16(self, "bytes_per_line", "Bytes per line")
yield UInt16(self, "color_mode", "Color mode")
yield PaddingBytes(self, "reserved[]", 58)
if self._size is None: # TODO: is it possible to handle piped input?
raise NotImplementedError
nb_colors = 256
size = (self._size - self.current_size)/8
has_palette = self["bpp"].value == 8
if has_palette:
size -= nb_colors*3
yield RawBytes(self, "image_data", size, "Image data")
if has_palette:
yield PaletteRGB(self, "palette_8bits", nb_colors, "Palette (8 bit)")
| 2,745 | Python | .py | 65 | 34.338462 | 91 | 0.608533 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,513 | common.py | midgetspy_Sick-Beard/lib/hachoir_parser/image/common.py | from lib.hachoir_core.field import FieldSet, UserVector, UInt8
class RGB(FieldSet):
color_name = {
( 0, 0, 0): "Black",
(255, 0, 0): "Red",
( 0, 255, 0): "Green",
( 0, 0, 255): "Blue",
(255, 255, 255): "White",
}
static_size = 24
def createFields(self):
yield UInt8(self, "red", "Red")
yield UInt8(self, "green", "Green")
yield UInt8(self, "blue", "Blue")
def createDescription(self):
rgb = self["red"].value, self["green"].value, self["blue"].value
name = self.color_name.get(rgb)
if not name:
name = "#%02X%02X%02X" % rgb
return "RGB color: " + name
class RGBA(RGB):
static_size = 32
def createFields(self):
yield UInt8(self, "red", "Red")
yield UInt8(self, "green", "Green")
yield UInt8(self, "blue", "Blue")
yield UInt8(self, "alpha", "Alpha")
def createDescription(self):
description = RGB.createDescription(self)
opacity = self["alpha"].value*100/255
return "%s (opacity: %s%%)" % (description, opacity)
class PaletteRGB(UserVector):
item_class = RGB
item_name = "color"
def createDescription(self):
return "Palette of %u RGB colors" % len(self)
class PaletteRGBA(PaletteRGB):
item_class = RGBA
def createDescription(self):
return "Palette of %u RGBA colors" % len(self)
| 1,433 | Python | .py | 40 | 28.8 | 72 | 0.584538 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,514 | gif.py | midgetspy_Sick-Beard/lib/hachoir_parser/image/gif.py | """
GIF picture parser.
Author: Victor Stinner
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
Enum, UInt8, UInt16,
Bit, Bits, NullBytes,
String, PascalString8, Character,
NullBits, RawBytes)
from lib.hachoir_parser.image.common import PaletteRGB
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.tools import humanDuration
from lib.hachoir_core.text_handler import textHandler, displayHandler, hexadecimal
# Maximum image dimension (in pixel)
MAX_WIDTH = 6000
MAX_HEIGHT = MAX_WIDTH
MAX_FILE_SIZE = 100 * 1024 * 1024
class Image(FieldSet):
def createFields(self):
yield UInt16(self, "left", "Left")
yield UInt16(self, "top", "Top")
yield UInt16(self, "width", "Width")
yield UInt16(self, "height", "Height")
yield Bits(self, "bpp", 3, "Bits / pixel minus one")
yield NullBits(self, "nul", 2)
yield Bit(self, "sorted", "Sorted??")
yield Bit(self, "interlaced", "Interlaced?")
yield Bit(self, "has_local_map", "Use local color map?")
if self["has_local_map"].value:
nb_color = 1 << (1 + self["bpp"].value)
yield PaletteRGB(self, "local_map", nb_color, "Local color map")
yield UInt8(self, "code_size", "LZW Minimum Code Size")
while True:
blen = UInt8(self, "block_len[]", "Block Length")
yield blen
if blen.value != 0:
yield RawBytes(self, "data[]", blen.value, "Image Data")
else:
break
def createDescription(self):
return "Image: %ux%u pixels at (%u,%u)" % (
self["width"].value, self["height"].value,
self["left"].value, self["top"].value)
DISPOSAL_METHOD = {
0: "No disposal specified",
1: "Do not dispose",
2: "Restore to background color",
3: "Restore to previous",
}
NETSCAPE_CODE = {
1: "Loop count",
}
def parseApplicationExtension(parent):
yield PascalString8(parent, "app_name", "Application name")
yield UInt8(parent, "size")
size = parent["size"].value
if parent["app_name"].value == "NETSCAPE2.0" and size == 3:
yield Enum(UInt8(parent, "netscape_code"), NETSCAPE_CODE)
if parent["netscape_code"].value == 1:
yield UInt16(parent, "loop_count")
else:
yield RawBytes(parent, "raw", 2)
else:
yield RawBytes(parent, "raw", size)
yield NullBytes(parent, "terminator", 1, "Terminator (0)")
def parseGraphicControl(parent):
yield UInt8(parent, "size", "Block size (4)")
yield Bit(parent, "has_transp", "Has transparency")
yield Bit(parent, "user_input", "User input")
yield Enum(Bits(parent, "disposal_method", 3), DISPOSAL_METHOD)
yield NullBits(parent, "reserved[]", 3)
if parent["size"].value != 4:
raise ParserError("Invalid graphic control size")
yield displayHandler(UInt16(parent, "delay", "Delay time in millisecond"), humanDuration)
yield UInt8(parent, "transp", "Transparent color index")
yield NullBytes(parent, "terminator", 1, "Terminator (0)")
def parseComments(parent):
while True:
field = PascalString8(parent, "comment[]", strip=" \0\r\n\t")
yield field
if field.length == 0:
break
def parseTextExtension(parent):
yield UInt8(parent, "block_size", "Block Size")
yield UInt16(parent, "left", "Text Grid Left")
yield UInt16(parent, "top", "Text Grid Top")
yield UInt16(parent, "width", "Text Grid Width")
yield UInt16(parent, "height", "Text Grid Height")
yield UInt8(parent, "cell_width", "Character Cell Width")
yield UInt8(parent, "cell_height", "Character Cell Height")
yield UInt8(parent, "fg_color", "Foreground Color Index")
yield UInt8(parent, "bg_color", "Background Color Index")
while True:
field = PascalString8(parent, "comment[]", strip=" \0\r\n\t")
yield field
if field.length == 0:
break
def defaultExtensionParser(parent):
while True:
size = UInt8(parent, "size[]", "Size (in bytes)")
yield size
if 0 < size.value:
yield RawBytes(parent, "content[]", size.value)
else:
break
class Extension(FieldSet):
ext_code = {
0xf9: ("graphic_ctl[]", parseGraphicControl, "Graphic control"),
0xfe: ("comments[]", parseComments, "Comments"),
0xff: ("app_ext[]", parseApplicationExtension, "Application extension"),
0x01: ("text_ext[]", parseTextExtension, "Plain text extension")
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
code = self["code"].value
if code in self.ext_code:
self._name, self.parser, self._description = self.ext_code[code]
else:
self.parser = defaultExtensionParser
def createFields(self):
yield textHandler(UInt8(self, "code", "Extension code"), hexadecimal)
for field in self.parser(self):
yield field
def createDescription(self):
return "Extension: function %s" % self["func"].display
class ScreenDescriptor(FieldSet):
def createFields(self):
yield UInt16(self, "width", "Width")
yield UInt16(self, "height", "Height")
yield Bits(self, "bpp", 3, "Bits per pixel minus one")
yield Bit(self, "reserved", "(reserved)")
yield Bits(self, "color_res", 3, "Color resolution minus one")
yield Bit(self, "global_map", "Has global map?")
yield UInt8(self, "background", "Background color")
yield UInt8(self, "pixel_aspect_ratio", "Pixel Aspect Ratio")
def createDescription(self):
colors = 1 << (self["bpp"].value+1)
return "Screen descriptor: %ux%u pixels %u colors" \
% (self["width"].value, self["height"].value, colors)
class GifFile(Parser):
endian = LITTLE_ENDIAN
separator_name = {
"!": "Extension",
",": "Image",
";": "Terminator"
}
PARSER_TAGS = {
"id": "gif",
"category": "image",
"file_ext": ("gif",),
"mime": (u"image/gif",),
"min_size": (6 + 7 + 1 + 9)*8, # signature + screen + separator + image
"magic": (("GIF87a", 0), ("GIF89a", 0)),
"description": "GIF picture"
}
def validate(self):
if self.stream.readBytes(0, 6) not in ("GIF87a", "GIF89a"):
return "Wrong header"
if self["screen/width"].value == 0 or self["screen/height"].value == 0:
return "Invalid image size"
if MAX_WIDTH < self["screen/width"].value:
return "Image width too big (%u)" % self["screen/width"].value
if MAX_HEIGHT < self["screen/height"].value:
return "Image height too big (%u)" % self["screen/height"].value
return True
def createFields(self):
# Header
yield String(self, "magic", 3, "File magic code", charset="ASCII")
yield String(self, "version", 3, "GIF version", charset="ASCII")
yield ScreenDescriptor(self, "screen")
if self["screen/global_map"].value:
bpp = (self["screen/bpp"].value+1)
yield PaletteRGB(self, "color_map", 1 << bpp, "Color map")
self.color_map = self["color_map"]
else:
self.color_map = None
self.images = []
while True:
code = Enum(Character(self, "separator[]", "Separator code"), self.separator_name)
yield code
code = code.value
if code == "!":
yield Extension(self, "extensions[]")
elif code == ",":
yield Image(self, "image[]")
elif code == ";":
# GIF Terminator
break
else:
raise ParserError("Wrong GIF image separator: 0x%02X" % ord(code))
def createContentSize(self):
field = self["image[0]"]
start = field.absolute_address + field.size
end = start + MAX_FILE_SIZE*8
pos = self.stream.searchBytes("\0;", start, end)
if pos:
return pos + 16
return None
| 8,192 | Python | .py | 199 | 33.211055 | 94 | 0.605273 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,515 | photoshop_metadata.py | midgetspy_Sick-Beard/lib/hachoir_parser/image/photoshop_metadata.py | from lib.hachoir_core.field import (FieldSet, ParserError,
UInt8, UInt16, UInt32,
String, CString, PascalString8,
NullBytes, RawBytes)
from lib.hachoir_core.text_handler import textHandler, hexadecimal
from lib.hachoir_core.tools import alignValue, createDict
from lib.hachoir_parser.image.iptc import IPTC
from lib.hachoir_parser.common.win32 import PascalStringWin32
class Version(FieldSet):
def createFields(self):
yield UInt32(self, "version")
yield UInt8(self, "has_realm")
yield PascalStringWin32(self, "writer_name", charset="UTF-16-BE")
yield PascalStringWin32(self, "reader_name", charset="UTF-16-BE")
yield UInt32(self, "file_version")
size = (self.size - self.current_size) // 8
if size:
yield NullBytes(self, "padding", size)
class Photoshop8BIM(FieldSet):
TAG_INFO = {
0x03ed: ("res_info", None, "Resolution information"),
0x03f3: ("print_flag", None, "Print flags: labels, crop marks, colour bars, etc."),
0x03f5: ("col_half_info", None, "Colour half-toning information"),
0x03f8: ("color_trans_func", None, "Colour transfer function"),
0x0404: ("iptc", IPTC, "IPTC/NAA"),
0x0406: ("jpeg_qual", None, "JPEG quality"),
0x0408: ("grid_guide", None, "Grid guides informations"),
0x040a: ("copyright_flag", None, "Copyright flag"),
0x040c: ("thumb_res2", None, "Thumbnail resource (2)"),
0x040d: ("glob_angle", None, "Global lighting angle for effects"),
0x0411: ("icc_tagged", None, "ICC untagged (1 means intentionally untagged)"),
0x0414: ("base_layer_id", None, "Base value for new layers ID's"),
0x0419: ("glob_altitude", None, "Global altitude"),
0x041a: ("slices", None, "Slices"),
0x041e: ("url_list", None, "Unicode URL's"),
0x0421: ("version", Version, "Version information"),
0x2710: ("print_flag2", None, "Print flags (2)"),
}
TAG_NAME = createDict(TAG_INFO, 0)
CONTENT_HANDLER = createDict(TAG_INFO, 1)
TAG_DESC = createDict(TAG_INFO, 2)
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
try:
self._name, self.handler, self._description = self.TAG_INFO[self["tag"].value]
except KeyError:
self.handler = None
size = self["size"]
self._size = size.address + size.size + alignValue(size.value, 2) * 8
def createFields(self):
yield String(self, "signature", 4, "8BIM signature", charset="ASCII")
if self["signature"].value != "8BIM":
raise ParserError("Stream doesn't look like 8BIM item (wrong signature)!")
yield textHandler(UInt16(self, "tag"), hexadecimal)
if self.stream.readBytes(self.absolute_address + self.current_size, 4) != "\0\0\0\0":
yield PascalString8(self, "name")
size = 2 + (self["name"].size // 8) % 2
yield NullBytes(self, "name_padding", size)
else:
yield String(self, "name", 4, strip="\0")
yield UInt16(self, "size")
size = alignValue(self["size"].value, 2)
if not size:
return
if self.handler:
yield self.handler(self, "content", size=size*8)
else:
yield RawBytes(self, "content", size)
class PhotoshopMetadata(FieldSet):
def createFields(self):
yield CString(self, "signature", "Photoshop version")
if self["signature"].value == "Photoshop 3.0":
while not self.eof:
yield Photoshop8BIM(self, "item[]")
else:
size = (self._size - self.current_size) / 8
yield RawBytes(self, "rawdata", size)
| 3,729 | Python | .py | 77 | 39.974026 | 93 | 0.620954 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,516 | hlp.py | midgetspy_Sick-Beard/lib/hachoir_parser/misc/hlp.py | """
Microsoft Windows Help (HLP) parser for Hachoir project.
Documents:
- Windows Help File Format / Annotation File Format / SHG and MRB File Format
written by M. Winterhoff ([email protected])
found on http://www.wotsit.org/
Author: Victor Stinner
Creation date: 2007-09-03
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet,
Bits, Int32, UInt16, UInt32,
NullBytes, RawBytes, PaddingBytes, String)
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.text_handler import (textHandler, hexadecimal,
displayHandler, humanFilesize)
class FileEntry(FieldSet):
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = self["res_space"].value * 8
def createFields(self):
yield displayHandler(UInt32(self, "res_space", "Reserved space"), humanFilesize)
yield displayHandler(UInt32(self, "used_space", "Used space"), humanFilesize)
yield Bits(self, "file_flags", 8, "(=4)")
yield textHandler(UInt16(self, "magic"), hexadecimal)
yield Bits(self, "flags", 16)
yield displayHandler(UInt16(self, "page_size", "Page size in bytes"), humanFilesize)
yield String(self, "structure", 16, strip="\0", charset="ASCII")
yield NullBytes(self, "zero", 2)
yield UInt16(self, "nb_page_splits", "Number of page splits B+ tree has suffered")
yield UInt16(self, "root_page", "Page number of B+ tree root page")
yield PaddingBytes(self, "one", 2, pattern="\xFF")
yield UInt16(self, "nb_page", "Number of B+ tree pages")
yield UInt16(self, "nb_level", "Number of levels of B+ tree")
yield UInt16(self, "nb_entry", "Number of entries in B+ tree")
size = (self.size - self.current_size)//8
if size:
yield PaddingBytes(self, "reserved_space", size)
class HlpFile(Parser):
PARSER_TAGS = {
"id": "hlp",
"category": "misc",
"file_ext": ("hlp",),
"min_size": 32,
"description": "Microsoft Windows Help (HLP)",
}
endian = LITTLE_ENDIAN
def validate(self):
if self["magic"].value != 0x00035F3F:
return "Invalid magic"
if self["filesize"].value != self.stream.size//8:
return "Invalid magic"
return True
def createFields(self):
yield textHandler(UInt32(self, "magic"), hexadecimal)
yield UInt32(self, "dir_start", "Directory start")
yield Int32(self, "first_free_block", "First free block")
yield UInt32(self, "filesize", "File size in bytes")
yield self.seekByte(self["dir_start"].value)
yield FileEntry(self, "file[]")
size = (self.size - self.current_size)//8
if size:
yield RawBytes(self, "end", size)
| 2,836 | Python | .py | 63 | 38.031746 | 92 | 0.648188 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,517 | lnk.py | midgetspy_Sick-Beard/lib/hachoir_parser/misc/lnk.py | """
Windows Shortcut (.lnk) parser.
Documents:
- The Windows Shortcut File Format (document version 1.0)
Reverse-engineered by Jesse Hager
http://www.i2s-lab.com/Papers/The_Windows_Shortcut_File_Format.pdf
- Wine source code:
http://source.winehq.org/source/include/shlobj.h (SHELL_LINK_DATA_FLAGS enum)
http://source.winehq.org/source/dlls/shell32/pidl.h
- Microsoft:
http://msdn2.microsoft.com/en-us/library/ms538128.aspx
Author: Robert Xiao, Victor Stinner
Changes:
2007-06-27 - Robert Xiao
* Fixes to FileLocationInfo to correctly handle Unicode paths
2007-06-13 - Robert Xiao
* ItemID, FileLocationInfo and ExtraInfo structs, correct Unicode string handling
2007-03-15 - Victor Stinner
* Creation of the parser
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet,
CString, String,
UInt32, UInt16, UInt8,
Bit, Bits, PaddingBits,
TimestampWin64, DateTimeMSDOS32,
NullBytes, PaddingBytes, RawBytes, Enum)
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.text_handler import textHandler, hexadecimal
from lib.hachoir_parser.common.win32 import GUID
from lib.hachoir_parser.common.msdos import MSDOSFileAttr16, MSDOSFileAttr32
from lib.hachoir_core.text_handler import filesizeHandler
from lib.hachoir_core.tools import paddingSize
class ItemIdList(FieldSet):
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = (self["size"].value+2) * 8
def createFields(self):
yield UInt16(self, "size", "Size of item ID list")
while True:
item = ItemId(self, "itemid[]")
yield item
if not item["length"].value:
break
class ItemId(FieldSet):
ITEM_TYPE = {
0x1F: "GUID",
0x23: "Drive",
0x25: "Drive",
0x29: "Drive",
0x2E: "GUID",
0x2F: "Drive",
0x30: "Dir/File",
0x31: "Directory",
0x32: "File",
0x34: "File [Unicode Name]",
0x41: "Workgroup",
0x42: "Computer",
0x46: "Net Provider",
0x47: "Whole Network",
0x61: "MSITStore",
0x70: "Printer/RAS Connection",
0xB1: "History/Favorite",
0xC3: "Network Share",
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
if self["length"].value:
self._size = self["length"].value * 8
else:
self._size = 16
def createFields(self):
yield UInt16(self, "length", "Length of Item ID Entry")
if not self["length"].value:
return
yield Enum(UInt8(self, "type"),self.ITEM_TYPE)
entrytype=self["type"].value
if entrytype in (0x1F, 0x2E, 0x70):
# GUID
yield RawBytes(self, "dummy", 1, "should be 0x50")
yield GUID(self, "guid")
elif entrytype in (0x23, 0x25, 0x29, 0x2F):
# Drive
yield String(self, "drive", self["length"].value-3, strip="\0")
elif entrytype in (0x30, 0x31, 0x32):
yield RawBytes(self, "dummy", 1, "should be 0x00")
yield UInt32(self, "size", "size of file; 0 for folders")
yield DateTimeMSDOS32(self, "date_time", "File/folder date and time")
yield MSDOSFileAttr16(self, "attribs", "File/folder attributes")
yield CString(self, "name", "File/folder name")
if self.root.hasUnicodeNames():
# Align to 2-bytes
n = paddingSize(self.current_size//8, 2)
if n:
yield PaddingBytes(self, "pad", n)
yield UInt16(self, "length_w", "Length of wide struct member")
yield RawBytes(self, "unknown[]", 6)
yield DateTimeMSDOS32(self, "creation_date_time", "File/folder creation date and time")
yield DateTimeMSDOS32(self, "access_date_time", "File/folder last access date and time")
yield RawBytes(self, "unknown[]", 4)
yield CString(self, "unicode_name", "File/folder name", charset="UTF-16-LE")
yield RawBytes(self, "unknown[]", 2)
else:
yield CString(self, "name_short", "File/folder short name")
elif entrytype in (0x41, 0x42, 0x46):
yield RawBytes(self, "unknown[]", 2)
yield CString(self, "name")
yield CString(self, "protocol")
yield RawBytes(self, "unknown[]", 2)
elif entrytype == 0x47:
# Whole Network
yield RawBytes(self, "unknown[]", 2)
yield CString(self, "name")
elif entrytype == 0xC3:
# Network Share
yield RawBytes(self, "unknown[]", 2)
yield CString(self, "name")
yield CString(self, "protocol")
yield CString(self, "description")
yield RawBytes(self, "unknown[]", 2)
else:
yield RawBytes(self, "raw", self["length"].value-3)
def createDescription(self):
if self["length"].value:
return "Item ID Entry: "+self.ITEM_TYPE.get(self["type"].value,"Unknown")
else:
return "End of Item ID List"
def formatVolumeSerial(field):
val = field.value
return '%04X-%04X'%(val>>16, val&0xFFFF)
class LocalVolumeTable(FieldSet):
VOLUME_TYPE={
1: "No root directory",
2: "Removable (Floppy, Zip, etc.)",
3: "Fixed (Hard disk)",
4: "Remote (Network drive)",
5: "CD-ROM",
6: "Ram drive",
}
def createFields(self):
yield UInt32(self, "length", "Length of this structure")
yield Enum(UInt32(self, "volume_type", "Volume Type"),self.VOLUME_TYPE)
yield textHandler(UInt32(self, "volume_serial", "Volume Serial Number"), formatVolumeSerial)
yield UInt32(self, "label_offset", "Offset to volume label")
padding = self.seekByte(self["label_offset"].value)
if padding:
yield padding
yield CString(self, "drive")
def hasValue(self):
return bool(self["drive"].value)
def createValue(self):
return self["drive"].value
class NetworkVolumeTable(FieldSet):
def createFields(self):
yield UInt32(self, "length", "Length of this structure")
yield UInt32(self, "unknown[]")
yield UInt32(self, "share_name_offset", "Offset to share name")
yield UInt32(self, "unknown[]")
yield UInt32(self, "unknown[]")
padding = self.seekByte(self["share_name_offset"].value)
if padding:
yield padding
yield CString(self, "share_name")
def createValue(self):
return self["share_name"].value
class FileLocationInfo(FieldSet):
def createFields(self):
yield UInt32(self, "length", "Length of this structure")
if not self["length"].value:
return
yield UInt32(self, "first_offset_pos", "Position of first offset")
has_unicode_paths = (self["first_offset_pos"].value == 0x24)
yield Bit(self, "on_local_volume")
yield Bit(self, "on_network_volume")
yield PaddingBits(self, "reserved[]", 30)
yield UInt32(self, "local_info_offset", "Offset to local volume table; only meaningful if on_local_volume = 1")
yield UInt32(self, "local_pathname_offset", "Offset to local base pathname; only meaningful if on_local_volume = 1")
yield UInt32(self, "remote_info_offset", "Offset to network volume table; only meaningful if on_network_volume = 1")
yield UInt32(self, "pathname_offset", "Offset of remaining pathname")
if has_unicode_paths:
yield UInt32(self, "local_pathname_unicode_offset", "Offset to Unicode version of local base pathname; only meaningful if on_local_volume = 1")
yield UInt32(self, "pathname_unicode_offset", "Offset to Unicode version of remaining pathname")
if self["on_local_volume"].value:
padding = self.seekByte(self["local_info_offset"].value)
if padding:
yield padding
yield LocalVolumeTable(self, "local_volume_table", "Local Volume Table")
padding = self.seekByte(self["local_pathname_offset"].value)
if padding:
yield padding
yield CString(self, "local_base_pathname", "Local Base Pathname")
if has_unicode_paths:
padding = self.seekByte(self["local_pathname_unicode_offset"].value)
if padding:
yield padding
yield CString(self, "local_base_pathname_unicode", "Local Base Pathname in Unicode", charset="UTF-16-LE")
if self["on_network_volume"].value:
padding = self.seekByte(self["remote_info_offset"].value)
if padding:
yield padding
yield NetworkVolumeTable(self, "network_volume_table")
padding = self.seekByte(self["pathname_offset"].value)
if padding:
yield padding
yield CString(self, "final_pathname", "Final component of the pathname")
if has_unicode_paths:
padding = self.seekByte(self["pathname_unicode_offset"].value)
if padding:
yield padding
yield CString(self, "final_pathname_unicode", "Final component of the pathname in Unicode", charset="UTF-16-LE")
padding=self.seekByte(self["length"].value)
if padding:
yield padding
class LnkString(FieldSet):
def createFields(self):
yield UInt16(self, "length", "Length of this string")
if self.root.hasUnicodeNames():
yield String(self, "data", self["length"].value*2, charset="UTF-16-LE")
else:
yield String(self, "data", self["length"].value, charset="ASCII")
def createValue(self):
return self["data"].value
class ColorRef(FieldSet):
''' COLORREF struct, 0x00bbggrr '''
static_size=32
def createFields(self):
yield UInt8(self, "red", "Red")
yield UInt8(self, "green", "Green")
yield UInt8(self, "blue", "Blue")
yield PaddingBytes(self, "pad", 1, "Padding (must be 0)")
def createDescription(self):
rgb = self["red"].value, self["green"].value, self["blue"].value
return "RGB Color: #%02X%02X%02X" % rgb
class ColorTableIndex(Bits):
def __init__(self, parent, name, size, description=None):
Bits.__init__(self, parent, name, size, None)
self.desc=description
def createDescription(self):
assert hasattr(self, 'parent') and hasattr(self, 'value')
return "%s: %s"%(self.desc,
self.parent["color[%i]"%self.value].description)
class ExtraInfo(FieldSet):
INFO_TYPE={
0xA0000001: "Link Target Information", # EXP_SZ_LINK_SIG
0xA0000002: "Console Window Properties", # NT_CONSOLE_PROPS_SIG
0xA0000003: "Hostname and Other Stuff",
0xA0000004: "Console Codepage Information", # NT_FE_CONSOLE_PROPS_SIG
0xA0000005: "Special Folder Info", # EXP_SPECIAL_FOLDER_SIG
0xA0000006: "DarwinID (Windows Installer ID) Information", # EXP_DARWIN_ID_SIG
0xA0000007: "Custom Icon Details", # EXP_LOGO3_ID_SIG or EXP_SZ_ICON_SIG
}
SPECIAL_FOLDER = {
0: "DESKTOP",
1: "INTERNET",
2: "PROGRAMS",
3: "CONTROLS",
4: "PRINTERS",
5: "PERSONAL",
6: "FAVORITES",
7: "STARTUP",
8: "RECENT",
9: "SENDTO",
10: "BITBUCKET",
11: "STARTMENU",
16: "DESKTOPDIRECTORY",
17: "DRIVES",
18: "NETWORK",
19: "NETHOOD",
20: "FONTS",
21: "TEMPLATES",
22: "COMMON_STARTMENU",
23: "COMMON_PROGRAMS",
24: "COMMON_STARTUP",
25: "COMMON_DESKTOPDIRECTORY",
26: "APPDATA",
27: "PRINTHOOD",
28: "LOCAL_APPDATA",
29: "ALTSTARTUP",
30: "COMMON_ALTSTARTUP",
31: "COMMON_FAVORITES",
32: "INTERNET_CACHE",
33: "COOKIES",
34: "HISTORY",
35: "COMMON_APPDATA",
36: "WINDOWS",
37: "SYSTEM",
38: "PROGRAM_FILES",
39: "MYPICTURES",
40: "PROFILE",
41: "SYSTEMX86",
42: "PROGRAM_FILESX86",
43: "PROGRAM_FILES_COMMON",
44: "PROGRAM_FILES_COMMONX86",
45: "COMMON_TEMPLATES",
46: "COMMON_DOCUMENTS",
47: "COMMON_ADMINTOOLS",
48: "ADMINTOOLS",
49: "CONNECTIONS",
53: "COMMON_MUSIC",
54: "COMMON_PICTURES",
55: "COMMON_VIDEO",
56: "RESOURCES",
57: "RESOURCES_LOCALIZED",
58: "COMMON_OEM_LINKS",
59: "CDBURN_AREA",
61: "COMPUTERSNEARME",
}
BOOL_ENUM = {
0: "False",
1: "True",
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
if self["length"].value:
self._size = self["length"].value * 8
else:
self._size = 32
def createFields(self):
yield UInt32(self, "length", "Length of this structure")
if not self["length"].value:
return
yield Enum(textHandler(UInt32(self, "signature", "Signature determining the function of this structure"),hexadecimal),self.INFO_TYPE)
if self["signature"].value == 0xA0000003:
# Hostname and Other Stuff
yield UInt32(self, "remaining_length")
yield UInt32(self, "unknown[]")
yield String(self, "hostname", 16, "Computer hostname on which shortcut was last modified", strip="\0")
yield RawBytes(self, "unknown[]", 32)
yield RawBytes(self, "unknown[]", 32)
elif self["signature"].value == 0xA0000005:
# Special Folder Info
yield Enum(UInt32(self, "special_folder_id", "ID of the special folder"),self.SPECIAL_FOLDER)
yield UInt32(self, "offset", "Offset to Item ID entry")
elif self["signature"].value in (0xA0000001, 0xA0000006, 0xA0000007):
if self["signature"].value == 0xA0000001: # Link Target Information
object_name="target"
elif self["signature"].value == 0xA0000006: # DarwinID (Windows Installer ID) Information
object_name="darwinID"
else: # Custom Icon Details
object_name="icon_path"
yield CString(self, object_name, "Data (ASCII format)", charset="ASCII")
remaining = self["length"].value - self.current_size/8 - 260*2 # 260*2 = size of next part
if remaining:
yield RawBytes(self, "slack_space[]", remaining, "Data beyond end of string")
yield CString(self, object_name+'_unicode', "Data (Unicode format)", charset="UTF-16-LE", truncate="\0")
remaining = self["length"].value - self.current_size/8
if remaining:
yield RawBytes(self, "slack_space[]", remaining, "Data beyond end of string")
elif self["signature"].value == 0xA0000002:
# Console Window Properties
yield ColorTableIndex(self, "color_text", 4, "Screen text color index")
yield ColorTableIndex(self, "color_bg", 4, "Screen background color index")
yield NullBytes(self, "reserved[]", 1)
yield ColorTableIndex(self, "color_popup_text", 4, "Pop-up text color index")
yield ColorTableIndex(self, "color_popup_bg", 4, "Pop-up background color index")
yield NullBytes(self, "reserved[]", 1)
yield UInt16(self, "buffer_width", "Screen buffer width (character cells)")
yield UInt16(self, "buffer_height", "Screen buffer height (character cells)")
yield UInt16(self, "window_width", "Window width (character cells)")
yield UInt16(self, "window_height", "Window height (character cells)")
yield UInt16(self, "position_left", "Window distance from left edge (screen coords)")
yield UInt16(self, "position_top", "Window distance from top edge (screen coords)")
yield UInt32(self, "font_number")
yield UInt32(self, "input_buffer_size")
yield UInt16(self, "font_width", "Font width in pixels; 0 for a non-raster font")
yield UInt16(self, "font_height", "Font height in pixels; equal to the font size for non-raster fonts")
yield UInt32(self, "font_family")
yield UInt32(self, "font_weight")
yield String(self, "font_name_unicode", 64, "Font Name (Unicode format)", charset="UTF-16-LE", truncate="\0")
yield UInt32(self, "cursor_size", "Relative size of cursor (% of character size)")
yield Enum(UInt32(self, "full_screen", "Run console in full screen?"), self.BOOL_ENUM)
yield Enum(UInt32(self, "quick_edit", "Console uses quick-edit feature (using mouse to cut & paste)?"), self.BOOL_ENUM)
yield Enum(UInt32(self, "insert_mode", "Console uses insertion mode?"), self.BOOL_ENUM)
yield Enum(UInt32(self, "auto_position", "System automatically positions window?"), self.BOOL_ENUM)
yield UInt32(self, "history_size", "Size of the history buffer (in lines)")
yield UInt32(self, "history_count", "Number of history buffers (each process gets one up to this limit)")
yield Enum(UInt32(self, "history_no_dup", "Automatically eliminate duplicate lines in the history buffer?"), self.BOOL_ENUM)
for index in xrange(16):
yield ColorRef(self, "color[]")
elif self["signature"].value == 0xA0000004:
# Console Codepage Information
yield UInt32(self, "codepage", "Console's code page")
else:
yield RawBytes(self, "raw", self["length"].value-self.current_size/8)
def createDescription(self):
if self["length"].value:
return "Extra Info Entry: "+self["signature"].display
else:
return "End of Extra Info"
HOT_KEYS = {
0x00: u'None',
0x13: u'Pause',
0x14: u'Caps Lock',
0x21: u'Page Up',
0x22: u'Page Down',
0x23: u'End',
0x24: u'Home',
0x25: u'Left',
0x26: u'Up',
0x27: u'Right',
0x28: u'Down',
0x2d: u'Insert',
0x2e: u'Delete',
0x6a: u'Num *',
0x6b: u'Num +',
0x6d: u'Num -',
0x6e: u'Num .',
0x6f: u'Num /',
0x90: u'Num Lock',
0x91: u'Scroll Lock',
0xba: u';',
0xbb: u'=',
0xbc: u',',
0xbd: u'-',
0xbe: u'.',
0xbf: u'/',
0xc0: u'`',
0xdb: u'[',
0xdc: u'\\',
0xdd: u']',
0xde: u"'",
}
def text_hot_key(field):
assert hasattr(field, "value")
val=field.value
if 0x30 <= val <= 0x39:
return unichr(val)
elif 0x41 <= val <= 0x5A:
return unichr(val)
elif 0x60 <= val <= 0x69:
return u'Numpad %c' % unichr(val-0x30)
elif 0x70 <= val <= 0x87:
return 'F%i'%(val-0x6F)
elif val in HOT_KEYS:
return HOT_KEYS[val]
return str(val)
class LnkFile(Parser):
MAGIC = "\x4C\0\0\0\x01\x14\x02\x00\x00\x00\x00\x00\xc0\x00\x00\x00\x00\x00\x00\x46"
PARSER_TAGS = {
"id": "lnk",
"category": "misc",
"file_ext": ("lnk",),
"mime": (u"application/x-ms-shortcut",),
"magic": ((MAGIC, 0),),
"min_size": len(MAGIC)*8, # signature + guid = 20 bytes
"description": "Windows Shortcut (.lnk)",
}
endian = LITTLE_ENDIAN
SHOW_WINDOW_STATE = {
0: u"Hide",
1: u"Show Normal",
2: u"Show Minimized",
3: u"Show Maximized",
4: u"Show Normal, not activated",
5: u"Show",
6: u"Minimize",
7: u"Show Minimized, not activated",
8: u"Show, not activated",
9: u"Restore",
10: u"Show Default",
}
def validate(self):
if self["signature"].value != 0x0000004C:
return "Invalid signature"
if self["guid"].value != "00021401-0000-0000-C000-000000000046":
return "Invalid GUID"
return True
def hasUnicodeNames(self):
return self["has_unicode_names"].value
def createFields(self):
yield UInt32(self, "signature", "Shortcut signature (0x0000004C)")
yield GUID(self, "guid", "Shortcut GUID (00021401-0000-0000-C000-000000000046)")
yield Bit(self, "has_shell_id", "Is the Item ID List present?")
yield Bit(self, "target_is_file", "Is a file or a directory?")
yield Bit(self, "has_description", "Is the Description field present?")
yield Bit(self, "has_rel_path", "Is the relative path to the target available?")
yield Bit(self, "has_working_dir", "Is there a working directory?")
yield Bit(self, "has_cmd_line_args", "Are there any command line arguments?")
yield Bit(self, "has_custom_icon", "Is there a custom icon?")
yield Bit(self, "has_unicode_names", "Are Unicode names used?")
yield Bit(self, "force_no_linkinfo")
yield Bit(self, "has_exp_sz")
yield Bit(self, "run_in_separate")
yield Bit(self, "has_logo3id", "Is LOGO3 ID info present?")
yield Bit(self, "has_darwinid", "Is the DarwinID info present?")
yield Bit(self, "runas_user", "Is the target run as another user?")
yield Bit(self, "has_exp_icon_sz", "Is custom icon information available?")
yield Bit(self, "no_pidl_alias")
yield Bit(self, "force_unc_name")
yield Bit(self, "run_with_shim_layer")
yield PaddingBits(self, "reserved[]", 14, "Flag bits reserved for future use")
yield MSDOSFileAttr32(self, "target_attr")
yield TimestampWin64(self, "creation_time")
yield TimestampWin64(self, "modification_time")
yield TimestampWin64(self, "last_access_time")
yield filesizeHandler(UInt32(self, "target_filesize"))
yield UInt32(self, "icon_number")
yield Enum(UInt32(self, "show_window"), self.SHOW_WINDOW_STATE)
yield textHandler(UInt8(self, "hot_key", "Hot key used for quick access"),text_hot_key)
yield Bit(self, "hot_key_shift", "Hot key: is Shift used?")
yield Bit(self, "hot_key_ctrl", "Hot key: is Ctrl used?")
yield Bit(self, "hot_key_alt", "Hot key: is Alt used?")
yield PaddingBits(self, "hot_key_reserved", 21, "Hot key: (reserved)")
yield NullBytes(self, "reserved[]", 8)
if self["has_shell_id"].value:
yield ItemIdList(self, "item_idlist", "Item ID List")
if self["target_is_file"].value:
yield FileLocationInfo(self, "file_location_info", "File Location Info")
if self["has_description"].value:
yield LnkString(self, "description")
if self["has_rel_path"].value:
yield LnkString(self, "relative_path", "Relative path to target")
if self["has_working_dir"].value:
yield LnkString(self, "working_dir", "Working directory (dir to start target in)")
if self["has_cmd_line_args"].value:
yield LnkString(self, "cmd_line_args", "Command Line Arguments")
if self["has_custom_icon"].value:
yield LnkString(self, "custom_icon", "Custom Icon Path")
while not self.eof:
yield ExtraInfo(self, "extra_info[]")
| 23,407 | Python | .py | 520 | 35.723077 | 155 | 0.605257 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,518 | pdf.py | midgetspy_Sick-Beard/lib/hachoir_parser/misc/pdf.py | """
Adobe Portable Document Format (PDF) parser.
Author: Christophe Gisquet <[email protected]>
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (
Field, FieldSet,
ParserError,
GenericVector,
UInt8, UInt16, UInt32,
String,
RawBytes)
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.text_handler import textHandler, hexadecimal
MAGIC = "%PDF-"
ENDMAGIC = "%%EOF"
def getLineEnd(s, pos=None):
if pos == None:
pos = (s.absolute_address+s.current_size)//8
end = s.stream.searchBytesLength("\x0D", False, 8*pos)
other_end = s.stream.searchBytesLength("\x0A", False, 8*pos)
if end == None or (other_end != None and other_end < end):
return other_end
return end
# TODO: rewrite to account for all possible terminations: ' ', '/', '\0XD'
# But this probably requires changing *ALL* of the places they are used,
# as ' ' is swallowed but not the others
def getElementEnd(s, limit=' ', offset=0):
addr = s.absolute_address+s.current_size
addr += 8*offset
pos = s.stream.searchBytesLength(limit, True, addr)
if pos == None:
#s.info("Can't find '%s' starting at %u" % (limit, addr))
return None
return pos
class PDFNumber(Field):
LIMITS = ['[', '/', '\x0D', ']']
"""
sprintf("%i") or sprinf("%.?f")
"""
def __init__(self, parent, name, desc=None):
Field.__init__(self, parent, name, description=desc)
# Get size
size = getElementEnd(parent)
for limit in self.LIMITS:
other_size = getElementEnd(parent, limit)
if other_size != None:
other_size -= 1
if size == None or other_size < size:
size = other_size
self._size = 8*size
# Get value
val = parent.stream.readBytes(self.absolute_address, size)
self.info("Number: size=%u value='%s'" % (size, val))
if val.find('.') != -1:
self.createValue = lambda: float(val)
else:
self.createValue = lambda: int(val)
class PDFString(Field):
"""
A string of the shape:
( This string \
uses 3 lines \
with the CR(LF) inhibited )
"""
def __init__(self, parent, name, desc=None):
Field.__init__(self, parent, name, description=desc)
val = ""
count = 1
off = 1
while not parent.eof:
char = parent.stream.readBytes(self.absolute_address+8*off, 1)
# Non-ASCII
if not char.isalpha() or char == '\\':
off += 1
continue
if char == '(':
count += 1
if char == ')':
count -= 1
# Parenthesis block = 0 => end of string
if count == 0:
off += 1
break
# Add it to the string
val += char
self._size = 8*off
self.createValue = lambda: val
class PDFName(Field):
LIMITS = ['[', '/', '<', ']']
"""
String starting with '/', where characters may be written using their
ASCII code (exemple: '#20' would be ' '
' ', ']' and '\0' are supposed not to be part of the name
"""
def __init__(self, parent, name, desc=None):
Field.__init__(self, parent, name, description=desc)
if parent.stream.readBytes(self.absolute_address, 1) != '/':
raise ParserError("Unknown PDFName '%s'" %
parent.stream.readBytes(self.absolute_address, 10))
size = getElementEnd(parent, offset=1)
#other_size = getElementEnd(parent, '[')-1
#if size == None or (other_size != None and other_size < size):
# size = other_size
for limit in self.LIMITS:
other_size = getElementEnd(parent, limit, 1)
if other_size != None:
other_size -= 1
if size == None or other_size < size:
#self.info("New size: %u" % other_size)
size = other_size
self._size = 8*(size+1)
# Value should be without the initial '/' and final ' '
self.createValue = lambda: parent.stream.readBytes(self.absolute_address+8, size).strip(' ')
class PDFID(Field):
"""
Not described as an object, but let's do as it was.
This ID has the shape <hexadecimal ASCII string>
"""
def __init__(self, parent, name, desc=None):
Field.__init__(self, parent, name, description=desc)
self._size = 8*getElementEnd(parent, '>')
self.createValue = lambda: parent.stream.readBytes(self.absolute_address+8, (self._size//8)-1)
class NotABool(Exception): pass
class PDFBool(Field):
"""
"true" or "false" string standing for the boolean value
"""
def __init__(self, parent, name, desc=None):
Field.__init__(self, parent, name, description=desc)
if parent.stream.readBytes(self.absolute_address, 4) == "true":
self._size = 4
self.createValue = lambda: True
elif parent.stream.readBytes(self.absolute_address, 5) == "false":
self._size = 5
self.createValue = lambda: False
raise NotABool
class LineEnd(FieldSet):
"""
Made of 0x0A, 0x0D (we may include several line ends)
"""
def createFields(self):
while not self.eof:
addr = self.absolute_address+self.current_size
char = self.stream.readBytes(addr, 1)
if char == '\x0A':
yield UInt8(self, "lf", "Line feed")
elif char == '\x0D':
yield UInt8(self, "cr", "Line feed")
else:
self.info("Line ends at %u/%u, len %u" %
(addr, self.stream._size, self.current_size))
break
class PDFDictionaryPair(FieldSet):
def createFields(self):
yield PDFName(self, "name", getElementEnd(self))
for field in parsePDFType(self):
yield field
class PDFDictionary(FieldSet):
def createFields(self):
yield String(self, "dict_start", 2)
while not self.eof:
addr = self.absolute_address+self.current_size
if self.stream.readBytes(addr, 2) != '>>':
for field in parsePDFType(self):
yield field
else:
break
yield String(self, "dict_end", 2)
class PDFArray(FieldSet):
"""
Array of possibly non-homogeneous elements, starting with '[' and ending
with ']'
"""
def createFields(self):
yield String(self, "array_start", 1)
while self.stream.readBytes(self.absolute_address+self.current_size, 1) != ']':
for field in parsePDFType(self):
yield field
yield String(self, "array_end", 1)
def parsePDFType(s):
addr = s.absolute_address+s.current_size
char = s.stream.readBytes(addr, 1)
if char == '/':
yield PDFName(s, "type[]", getElementEnd(s))
elif char == '<':
if s.stream.readBytes(addr+8, 1) == '<':
yield PDFDictionary(s, "dict[]")
else:
yield PDFID(s, "id[]")
elif char == '(':
yield PDFString(s, "string[]")
elif char == '[':
yield PDFArray(s, "array[]")
else:
# First parse size
size = getElementEnd(s)
for limit in ['/', '>', '<']:
other_size = getElementEnd(s, limit)
if other_size != None:
other_size -= 1
if size == None or (other_size>0 and other_size < size):
size = other_size
# Get element
name = s.stream.readBytes(addr, size)
char = s.stream.readBytes(addr+8*size+8, 1)
if name.count(' ') > 1 and char == '<':
# Probably a catalog
yield Catalog(s, "catalog[]")
elif name[0] in ('.','-','+', '0', '1', '2', '3', \
'4', '5', '6', '7', '8', '9'):
s.info("Not a catalog: %u spaces and end='%s'" % (name.count(' '), char))
yield PDFNumber(s, "integer[]")
else:
s.info("Trying to parse '%s': %u bytes" % \
(s.stream.readBytes(s.absolute_address+s.current_size, 4), size))
yield String(s, "unknown[]", size)
class Header(FieldSet):
def createFields(self):
yield String(self, "marker", 5, MAGIC)
length = getLineEnd(self, 4)
if length != None:
#self.info("Found at position %08X" % len)
yield String(self, "version", length-1)
yield LineEnd(self, "line_end")
else:
self.warning("Can't determine version!")
def createDescription(self):
return "PDF version %s" % self["version"].display
class Body(FieldSet):
def __init__(self, parent, name, desc=None):
FieldSet.__init__(self, parent, name, desc)
pos = self.stream.searchBytesLength(CrossReferenceTable.MAGIC, False)
if pos == None:
raise ParserError("Can't find xref starting at %u" %
(self.absolute_address//8))
self._size = 8*pos-self.absolute_address
def createFields(self):
while self.stream.readBytes(self.absolute_address+self.current_size, 1) == '%':
size = getLineEnd(self, 4)
if size == 2:
yield textHandler(UInt16(self, "crc32"), hexadecimal)
elif size == 4:
yield textHandler(UInt32(self, "crc32"), hexadecimal)
elif self.stream.readBytes(self.absolute_address+self.current_size, size).isalpha():
yield String(self, "comment[]", size)
else:
RawBytes(self, "unknown_data[]", size)
yield LineEnd(self, "line_end[]")
#abs_offset = self.current_size//8
# TODO: yield objects that read offsets and deduce size from
# "/cross_ref_table/sub_section[]/entries/item[]"
offsets = []
for subsection in self.array("/cross_ref_table/sub_section"):
for obj in subsection.array("entries/item"):
if "byte_offset" in obj:
# Could be inserted already sorted
offsets.append(obj["byte_offset"].value)
offsets.append(self["/cross_ref_table"].absolute_address//8)
offsets.sort()
for index in xrange(len(offsets)-1):
yield Catalog(self, "object[]", size=offsets[index+1]-offsets[index])
class Entry(FieldSet):
static_size = 20*8
def createFields(self):
typ = self.stream.readBytes(self.absolute_address+17*8, 1)
if typ == 'n':
yield PDFNumber(self, "byte_offset")
elif typ == 'f':
yield PDFNumber(self, "next_free_object_number")
else:
yield PDFNumber(self, "unknown_string")
yield PDFNumber(self, "generation_number")
yield UInt8(self, "type")
yield LineEnd(self, "line_end")
def createDescription(self):
if self["type"].value == 'n':
return "In-use entry at offset %u" % int(self["byte_offset"].value)
elif self["type"].value == 'f':
return "Free entry before in-use object %u" % \
int(self["next_free_object_number"].value)
else:
return "unknown %s" % self["unknown_string"].value
class SubSection(FieldSet):
def __init__(self, parent, name, desc=None):
FieldSet.__init__(self, parent, name, desc)
self.info("Got entry count: '%s'" % self["entry_count"].value)
self._size = self.current_size + 8*20*int(self["entry_count"].value) \
+ self["line_end"].size
def createFields(self):
yield PDFNumber(self, "start_number",
"Object number of first entry in subsection")
self.info("start_number = %i" % self["start_number"].value)
yield PDFNumber(self, "entry_count", "Number of entries in subsection")
self.info("entry_count = %i" % self["entry_count"].value)
yield LineEnd(self, "line_end")
yield GenericVector(self, "entries", int(self["entry_count"].value),
Entry)
#yield LineEnd(self, "line_end[]")
def createDescription(self):
return "Subsection with %s elements, starting at %s" % \
(self["entry_count"].value, self["start_number"])
class CrossReferenceTable(FieldSet):
MAGIC = "xref"
def __init__(self, parent, name, desc=None):
FieldSet.__init__(self, parent, name, description=desc)
pos = self.stream.searchBytesLength(Trailer.MAGIC, False)
if pos == None:
raise ParserError("Can't find '%s' starting at %u" \
(Trailer.MAGIC, self.absolute_address//8))
self._size = 8*pos-self.absolute_address
def createFields(self):
yield RawBytes(self, "marker", len(self.MAGIC))
yield LineEnd(self, "line_end[]")
while not self.eof:
yield SubSection(self, "sub_section[]")
class Catalog(FieldSet):
END_NAME = ['<', '/', '[']
def __init__(self, parent, name, size=None, desc=None):
FieldSet.__init__(self, parent, name, description=desc)
if size != None:
self._size = 8*size
# object catalogs are ended with "obj"
elif self["object"].value == "obj":
size = self.stream.searchBytesLength("endobj", False)
if size != None:
self._size = 8*(size+2)
def createFields(self):
yield PDFNumber(self, "index")
yield PDFNumber(self, "unknown[]")
length = getElementEnd(self)
for limit in self.END_NAME:
new_length = getElementEnd(self, limit)-len(limit)
if length == None or (new_length != None and new_length < length):
length = new_length
yield String(self, "object", length, strip=' ')
if self.stream.readBytes(self.absolute_address+self.current_size, 2) == '<<':
yield PDFDictionary(self, "key_list")
# End of catalog: this one has "endobj"
if self["object"].value == "obj":
yield LineEnd(self, "line_end[]")
yield String(self, "end_object", len("endobj"))
yield LineEnd(self, "line_end[]")
class Trailer(FieldSet):
MAGIC = "trailer"
def createFields(self):
yield RawBytes(self, "marker", len(self.MAGIC))
yield LineEnd(self, "line_end[]")
yield String(self, "start_attribute_marker", 2)
addr = self.absolute_address + self.current_size
while self.stream.readBytes(addr, 2) != '>>':
t = PDFName(self, "type[]")
yield t
name = t.value
self.info("Parsing PDFName '%s'" % name)
if name == "Size":
yield PDFNumber(self, "size", "Entries in the file cross-reference section")
elif name == "Prev":
yield PDFNumber(self, "offset")
elif name == "Root":
yield Catalog(self, "object_catalog")
elif name == "Info":
yield Catalog(self, "info")
elif name == "ID":
yield PDFArray(self, "id")
elif name == "Encrypt":
yield PDFDictionary(self, "decrypt")
else:
raise ParserError("Don't know trailer type '%s'" % name)
addr = self.absolute_address + self.current_size
yield String(self, "end_attribute_marker", 2)
yield LineEnd(self, "line_end[]")
yield String(self, "start_xref", 9)
yield LineEnd(self, "line_end[]")
yield PDFNumber(self, "cross_ref_table_start_address")
yield LineEnd(self, "line_end[]")
yield String(self, "end_marker", len(ENDMAGIC))
yield LineEnd(self, "line_end[]")
class PDFDocument(Parser):
endian = LITTLE_ENDIAN
PARSER_TAGS = {
"id": "pdf",
"category": "misc",
"file_ext": ("pdf",),
"mime": (u"application/pdf",),
"min_size": (5+4)*8,
"magic": ((MAGIC, 5),),
"description": "Portable Document Format (PDF) document"
}
def validate(self):
if self.stream.readBytes(0, len(MAGIC)) != MAGIC:
return "Invalid magic string"
return True
# Size is not always determined by position of "%%EOF":
# - updated documents have several of those
# - PDF files should be parsed from *end*
# => TODO: find when a document has been updated
def createFields(self):
yield Header(self, "header")
yield Body(self, "body")
yield CrossReferenceTable(self, "cross_ref_table")
yield Trailer(self, "trailer")
| 16,806 | Python | .py | 402 | 32.121891 | 102 | 0.568932 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,519 | ttf.py | midgetspy_Sick-Beard/lib/hachoir_parser/misc/ttf.py | """
TrueType Font parser.
Documents:
- "An Introduction to TrueType Fonts: A look inside the TTF format"
written by "NRSI: Computers & Writing Systems"
http://scripts.sil.org/cms/scripts/page.php?site_id=nrsi&item_id=IWS-Chapter08
Author: Victor Stinner
Creation date: 2007-02-08
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
UInt16, UInt32, Bit, Bits,
PaddingBits, NullBytes,
String, RawBytes, Bytes, Enum,
TimestampMac32)
from lib.hachoir_core.endian import BIG_ENDIAN
from lib.hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler
MAX_NAME_COUNT = 300
MIN_NB_TABLE = 3
MAX_NB_TABLE = 30
DIRECTION_NAME = {
0: u"Mixed directional",
1: u"Left to right",
2: u"Left to right + neutrals",
-1: u"Right to left",
-2: u"Right to left + neutrals",
}
NAMEID_NAME = {
0: u"Copyright notice",
1: u"Font family name",
2: u"Font subfamily name",
3: u"Unique font identifier",
4: u"Full font name",
5: u"Version string",
6: u"Postscript name",
7: u"Trademark",
8: u"Manufacturer name",
9: u"Designer",
10: u"Description",
11: u"URL Vendor",
12: u"URL Designer",
13: u"License Description",
14: u"License info URL",
16: u"Preferred Family",
17: u"Preferred Subfamily",
18: u"Compatible Full",
19: u"Sample text",
20: u"PostScript CID findfont name",
}
PLATFORM_NAME = {
0: "Unicode",
1: "Macintosh",
2: "ISO",
3: "Microsoft",
4: "Custom",
}
CHARSET_MAP = {
# (platform, encoding) => charset
0: {3: "UTF-16-BE"},
1: {0: "MacRoman"},
3: {1: "UTF-16-BE"},
}
class TableHeader(FieldSet):
def createFields(self):
yield String(self, "tag", 4)
yield textHandler(UInt32(self, "checksum"), hexadecimal)
yield UInt32(self, "offset")
yield filesizeHandler(UInt32(self, "size"))
def createDescription(self):
return "Table entry: %s (%s)" % (self["tag"].display, self["size"].display)
class NameHeader(FieldSet):
def createFields(self):
yield Enum(UInt16(self, "platformID"), PLATFORM_NAME)
yield UInt16(self, "encodingID")
yield UInt16(self, "languageID")
yield Enum(UInt16(self, "nameID"), NAMEID_NAME)
yield UInt16(self, "length")
yield UInt16(self, "offset")
def getCharset(self):
platform = self["platformID"].value
encoding = self["encodingID"].value
try:
return CHARSET_MAP[platform][encoding]
except KeyError:
self.warning("TTF: Unknown charset (%s,%s)" % (platform, encoding))
return "ISO-8859-1"
def createDescription(self):
platform = self["platformID"].display
name = self["nameID"].display
return "Name record: %s (%s)" % (name, platform)
def parseFontHeader(self):
yield UInt16(self, "maj_ver", "Major version")
yield UInt16(self, "min_ver", "Minor version")
yield UInt16(self, "font_maj_ver", "Font major version")
yield UInt16(self, "font_min_ver", "Font minor version")
yield textHandler(UInt32(self, "checksum"), hexadecimal)
yield Bytes(self, "magic", 4, r"Magic string (\x5F\x0F\x3C\xF5)")
if self["magic"].value != "\x5F\x0F\x3C\xF5":
raise ParserError("TTF: invalid magic of font header")
# Flags
yield Bit(self, "y0", "Baseline at y=0")
yield Bit(self, "x0", "Left sidebearing point at x=0")
yield Bit(self, "instr_point", "Instructions may depend on point size")
yield Bit(self, "ppem", "Force PPEM to integer values for all")
yield Bit(self, "instr_width", "Instructions may alter advance width")
yield Bit(self, "vertical", "e laid out vertically?")
yield PaddingBits(self, "reserved[]", 1)
yield Bit(self, "linguistic", "Requires layout for correct linguistic rendering?")
yield Bit(self, "gx", "Metamorphosis effects?")
yield Bit(self, "strong", "Contains strong right-to-left glyphs?")
yield Bit(self, "indic", "contains Indic-style rearrangement effects?")
yield Bit(self, "lossless", "Data is lossless (Agfa MicroType compression)")
yield Bit(self, "converted", "Font converted (produce compatible metrics)")
yield Bit(self, "cleartype", "Optimised for ClearType")
yield Bits(self, "adobe", 2, "(used by Adobe)")
yield UInt16(self, "unit_per_em", "Units per em")
if not(16 <= self["unit_per_em"].value <= 16384):
raise ParserError("TTF: Invalid unit/em value")
yield UInt32(self, "created_high")
yield TimestampMac32(self, "created")
yield UInt32(self, "modified_high")
yield TimestampMac32(self, "modified")
yield UInt16(self, "xmin")
yield UInt16(self, "ymin")
yield UInt16(self, "xmax")
yield UInt16(self, "ymax")
# Mac style
yield Bit(self, "bold")
yield Bit(self, "italic")
yield Bit(self, "underline")
yield Bit(self, "outline")
yield Bit(self, "shadow")
yield Bit(self, "condensed", "(narrow)")
yield Bit(self, "expanded")
yield PaddingBits(self, "reserved[]", 9)
yield UInt16(self, "lowest", "Smallest readable size in pixels")
yield Enum(UInt16(self, "font_dir", "Font direction hint"), DIRECTION_NAME)
yield Enum(UInt16(self, "ofst_format"), {0: "short offsets", 1: "long"})
yield UInt16(self, "glyph_format", "(=0)")
def parseNames(self):
# Read header
yield UInt16(self, "format")
if self["format"].value != 0:
raise ParserError("TTF (names): Invalid format (%u)" % self["format"].value)
yield UInt16(self, "count")
yield UInt16(self, "offset")
if MAX_NAME_COUNT < self["count"].value:
raise ParserError("Invalid number of names (%s)"
% self["count"].value)
# Read name index
entries = []
for index in xrange(self["count"].value):
entry = NameHeader(self, "header[]")
yield entry
entries.append(entry)
# Sort names by their offset
entries.sort(key=lambda field: field["offset"].value)
# Read name value
last = None
for entry in entries:
# Skip duplicates values
new = (entry["offset"].value, entry["length"].value)
if last and last == new:
self.warning("Skip duplicate %s %s" % (entry.name, new))
continue
last = (entry["offset"].value, entry["length"].value)
# Skip negative offset
offset = entry["offset"].value + self["offset"].value
if offset < self.current_size//8:
self.warning("Skip value %s (negative offset)" % entry.name)
continue
# Add padding if any
padding = self.seekByte(offset, relative=True, null=True)
if padding:
yield padding
# Read value
size = entry["length"].value
if size:
yield String(self, "value[]", size, entry.description, charset=entry.getCharset())
padding = (self.size - self.current_size) // 8
if padding:
yield NullBytes(self, "padding_end", padding)
class Table(FieldSet):
TAG_INFO = {
"head": ("header", "Font header", parseFontHeader),
"name": ("names", "Names", parseNames),
}
def __init__(self, parent, name, table, **kw):
FieldSet.__init__(self, parent, name, **kw)
self.table = table
tag = table["tag"].value
if tag in self.TAG_INFO:
self._name, self._description, self.parser = self.TAG_INFO[tag]
else:
self.parser = None
def createFields(self):
if self.parser:
for field in self.parser(self):
yield field
else:
yield RawBytes(self, "content", self.size//8)
def createDescription(self):
return "Table %s (%s)" % (self.table["tag"].value, self.table.path)
class TrueTypeFontFile(Parser):
endian = BIG_ENDIAN
PARSER_TAGS = {
"id": "ttf",
"category": "misc",
"file_ext": ("ttf",),
"min_size": 10*8, # FIXME
"description": "TrueType font",
}
def validate(self):
if self["maj_ver"].value != 1:
return "Invalid major version (%u)" % self["maj_ver"].value
if self["min_ver"].value != 0:
return "Invalid minor version (%u)" % self["min_ver"].value
if not (MIN_NB_TABLE <= self["nb_table"].value <= MAX_NB_TABLE):
return "Invalid number of table (%u)" % self["nb_table"].value
return True
def createFields(self):
yield UInt16(self, "maj_ver", "Major version")
yield UInt16(self, "min_ver", "Minor version")
yield UInt16(self, "nb_table")
yield UInt16(self, "search_range")
yield UInt16(self, "entry_selector")
yield UInt16(self, "range_shift")
tables = []
for index in xrange(self["nb_table"].value):
table = TableHeader(self, "table_hdr[]")
yield table
tables.append(table)
tables.sort(key=lambda field: field["offset"].value)
for table in tables:
padding = self.seekByte(table["offset"].value, null=True)
if padding:
yield padding
size = table["size"].value
if size:
yield Table(self, "table[]", table, size=size*8)
padding = self.seekBit(self.size, null=True)
if padding:
yield padding
| 9,433 | Python | .py | 243 | 31.983539 | 94 | 0.62429 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,520 | chm.py | midgetspy_Sick-Beard/lib/hachoir_parser/misc/chm.py | """
InfoTech Storage Format (ITSF) parser, used by Microsoft's HTML Help (.chm)
Document:
- Microsoft's HTML Help (.chm) format
http://www.wotsit.org (search "chm")
- chmlib library
http://www.jedrea.com/chmlib/
Author: Victor Stinner
Creation date: 2007-03-04
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (Field, FieldSet, ParserError,
Int32, UInt32, UInt64,
RawBytes, PaddingBytes,
Enum, String)
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_parser.common.win32 import GUID
from lib.hachoir_parser.common.win32_lang_id import LANGUAGE_ID
from lib.hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler
class CWord(Field):
"""
Compressed double-word
"""
def __init__(self, parent, name, description=None):
Field.__init__(self, parent, name, 8, description)
endian = self._parent.endian
stream = self._parent.stream
addr = self.absolute_address
value = 0
byte = stream.readBits(addr, 8, endian)
while byte & 0x80:
value <<= 7
value += (byte & 0x7f)
self._size += 8
if 64 < self._size:
raise ParserError("CHM: CWord is limited to 64 bits")
addr += 8
byte = stream.readBits(addr, 8, endian)
value += byte
self.createValue = lambda: value
class Filesize_Header(FieldSet):
def createFields(self):
yield textHandler(UInt32(self, "unknown[]", "0x01FE"), hexadecimal)
yield textHandler(UInt32(self, "unknown[]", "0x0"), hexadecimal)
yield filesizeHandler(UInt64(self, "file_size"))
yield textHandler(UInt32(self, "unknown[]", "0x0"), hexadecimal)
yield textHandler(UInt32(self, "unknown[]", "0x0"), hexadecimal)
class ITSP(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["size"].value * 8
def createFields(self):
yield String(self, "magic", 4, "ITSP", charset="ASCII")
yield UInt32(self, "version", "Version (=1)")
yield filesizeHandler(UInt32(self, "size", "Length (in bytes) of the directory header (84)"))
yield UInt32(self, "unknown[]", "(=10)")
yield filesizeHandler(UInt32(self, "block_size", "Directory block size"))
yield UInt32(self, "density", "Density of quickref section, usually 2")
yield UInt32(self, "index_depth", "Depth of the index tree")
yield Int32(self, "nb_dir", "Chunk number of root index chunk")
yield UInt32(self, "first_pmgl", "Chunk number of first PMGL (listing) chunk")
yield UInt32(self, "last_pmgl", "Chunk number of last PMGL (listing) chunk")
yield Int32(self, "unknown[]", "-1")
yield UInt32(self, "nb_dir_chunk", "Number of directory chunks (total)")
yield Enum(UInt32(self, "lang_id", "Windows language ID"), LANGUAGE_ID)
yield GUID(self, "system_uuid", "{5D02926A-212E-11D0-9DF9-00A0C922E6EC}")
yield filesizeHandler(UInt32(self, "size2", "Same value than size"))
yield Int32(self, "unknown[]", "-1")
yield Int32(self, "unknown[]", "-1")
yield Int32(self, "unknown[]", "-1")
class ITSF(FieldSet):
def createFields(self):
yield String(self, "magic", 4, "ITSF", charset="ASCII")
yield UInt32(self, "version")
yield UInt32(self, "header_size", "Total header length (in bytes)")
yield UInt32(self, "one")
yield UInt32(self, "last_modified")
yield Enum(UInt32(self, "lang_id", "Windows Language ID"), LANGUAGE_ID)
yield GUID(self, "dir_uuid", "{7C01FD10-7BAA-11D0-9E0C-00A0-C922-E6EC}")
yield GUID(self, "stream_uuid", "{7C01FD11-7BAA-11D0-9E0C-00A0-C922-E6EC}")
yield UInt64(self, "filesize_offset")
yield filesizeHandler(UInt64(self, "filesize_len"))
yield UInt64(self, "dir_offset")
yield filesizeHandler(UInt64(self, "dir_len"))
if 3 <= self["version"].value:
yield UInt64(self, "data_offset")
class PMGL_Entry(FieldSet):
def createFields(self):
yield CWord(self, "name_len")
yield String(self, "name", self["name_len"].value, charset="UTF-8")
yield CWord(self, "space")
yield CWord(self, "start")
yield filesizeHandler(CWord(self, "length"))
def createDescription(self):
return "%s (%s)" % (self["name"].value, self["length"].display)
class PMGL(FieldSet):
def createFields(self):
# Header
yield String(self, "magic", 4, "PMGL", charset="ASCII")
yield filesizeHandler(Int32(self, "free_space",
"Length of free space and/or quickref area at end of directory chunk"))
yield Int32(self, "unknown")
yield Int32(self, "previous", "Chunk number of previous listing chunk")
yield Int32(self, "next", "Chunk number of previous listing chunk")
# Entries
stop = self.size - self["free_space"].value * 8
while self.current_size < stop:
yield PMGL_Entry(self, "entry[]")
# Padding
padding = (self.size - self.current_size) // 8
if padding:
yield PaddingBytes(self, "padding", padding)
class PMGI_Entry(FieldSet):
def createFields(self):
yield CWord(self, "name_len")
yield String(self, "name", self["name_len"].value, charset="UTF-8")
yield CWord(self, "page")
def createDescription(self):
return "%s (page #%u)" % (self["name"].value, self["page"].value)
class PMGI(FieldSet):
def createFields(self):
yield String(self, "magic", 4, "PMGI", charset="ASCII")
yield filesizeHandler(UInt32(self, "free_space",
"Length of free space and/or quickref area at end of directory chunk"))
stop = self.size - self["free_space"].value * 8
while self.current_size < stop:
yield PMGI_Entry(self, "entry[]")
padding = (self.size - self.current_size) // 8
if padding:
yield PaddingBytes(self, "padding", padding)
class Directory(FieldSet):
def createFields(self):
yield ITSP(self, "itsp")
block_size = self["itsp/block_size"].value * 8
nb_dir = self["itsp/nb_dir"].value
if nb_dir < 0:
nb_dir = 1
for index in xrange(nb_dir):
yield PMGL(self, "pmgl[]", size=block_size)
if self.current_size < self.size:
yield PMGI(self, "pmgi", size=block_size)
class ChmFile(Parser):
PARSER_TAGS = {
"id": "chm",
"category": "misc",
"file_ext": ("chm",),
"min_size": 4*8,
"magic": (("ITSF\3\0\0\0", 0),),
"description": "Microsoft's HTML Help (.chm)",
}
endian = LITTLE_ENDIAN
def validate(self):
if self.stream.readBytes(0, 4) != "ITSF":
return "Invalid magic"
if self["itsf/version"].value != 3:
return "Invalid version"
return True
def createFields(self):
yield ITSF(self, "itsf")
yield Filesize_Header(self, "file_size", size=self["itsf/filesize_len"].value*8)
padding = self.seekByte(self["itsf/dir_offset"].value)
if padding:
yield padding
yield Directory(self, "dir", size=self["itsf/dir_len"].value*8)
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "raw_end", size)
def createContentSize(self):
return self["file_size/file_size"].value * 8
| 7,546 | Python | .py | 169 | 36.721893 | 101 | 0.623332 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,521 | word_doc.py | midgetspy_Sick-Beard/lib/hachoir_parser/misc/word_doc.py | """
Documents:
* libwx source code: see fib.c source code
* "Microsoft Word 97 Binary File Format"
http://bio.gsi.de/DOCS/AIX/wword8.html
Microsoft Word 97 (aka Version 8) for Windows and Macintosh. From the Office
book, found in the Microsoft Office Development section in the MSDN Online
Library. HTMLified June 1998. Revised Aug 1 1998, added missing Definitions
section. Revised Dec 21 1998, added missing Document Properties (section).
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet,
Bit, Bits,
UInt8, Int16, UInt16, UInt32, Int32,
NullBytes, RawBytes, PascalString16,
DateTimeMSDOS32)
from lib.hachoir_core.endian import LITTLE_ENDIAN
TIMESTAMP = DateTimeMSDOS32
class BaseWordDocument:
def createFields(self):
yield UInt16(self, "wIdent", 2)
yield UInt16(self, "nFib")
yield UInt16(self, "nProduct")
yield UInt16(self, "lid")
yield Int16(self, "pnNext")
yield Bit(self, "fDot")
yield Bit(self, "fGlsy")
yield Bit(self, "fComplex")
yield Bit(self, "fHasPic")
yield Bits(self, "cQuickSaves", 4)
yield Bit(self, "fEncrypted")
yield Bit(self, "fWhichTblStm")
yield Bit(self, "fReadOnlyRecommanded")
yield Bit(self, "fWriteReservation")
yield Bit(self, "fExtChar")
yield Bit(self, "fLoadOverride")
yield Bit(self, "fFarEeast")
yield Bit(self, "fCrypto")
yield UInt16(self, "nFibBack")
yield UInt32(self, "lKey")
yield UInt8(self, "envr")
yield Bit(self, "fMac")
yield Bit(self, "fEmptySpecial")
yield Bit(self, "fLoadOverridePage")
yield Bit(self, "fFutureSavedUndo")
yield Bit(self, "fWord97Save")
yield Bits(self, "fSpare0", 3)
yield UInt16(self, "chse")
yield UInt16(self, "chsTables")
yield UInt32(self, "fcMin")
yield UInt32(self, "fcMac")
yield PascalString16(self, "file_creator", strip="\0")
yield NullBytes(self, "reserved[]", 12)
yield Int16(self, "lidFE")
yield UInt16(self, "clw")
yield Int32(self, "cbMac")
yield UInt32(self, "lProductCreated")
yield TIMESTAMP(self, "lProductRevised")
yield UInt32(self, "ccpText")
yield Int32(self, "ccpFtn")
yield Int32(self, "ccpHdr")
yield Int32(self, "ccpMcr")
yield Int32(self, "ccpAtn")
yield Int32(self, "ccpEdn")
yield Int32(self, "ccpTxbx")
yield Int32(self, "ccpHdrTxbx")
yield Int32(self, "pnFbpChpFirst")
yield Int32(self, "pnChpFirst")
yield Int32(self, "cpnBteChp")
yield Int32(self, "pnFbpPapFirst")
yield Int32(self, "pnPapFirst")
yield Int32(self, "cpnBtePap")
yield Int32(self, "pnFbpLvcFirst")
yield Int32(self, "pnLvcFirst")
yield Int32(self, "cpnBteLvc")
yield Int32(self, "fcIslandFirst")
yield Int32(self, "fcIslandLim")
yield UInt16(self, "cfclcb")
yield Int32(self, "fcStshfOrig")
yield UInt32(self, "lcbStshfOrig")
yield Int32(self, "fcStshf")
yield UInt32(self, "lcbStshf")
yield Int32(self, "fcPlcffndRef")
yield UInt32(self, "lcbPlcffndRef")
yield Int32(self, "fcPlcffndTxt")
yield UInt32(self, "lcbPlcffndTxt")
yield Int32(self, "fcPlcfandRef")
yield UInt32(self, "lcbPlcfandRef")
yield Int32(self, "fcPlcfandTxt")
yield UInt32(self, "lcbPlcfandTxt")
yield Int32(self, "fcPlcfsed")
yield UInt32(self, "lcbPlcfsed")
yield Int32(self, "fcPlcpad")
yield UInt32(self, "lcbPlcpad")
yield Int32(self, "fcPlcfphe")
yield UInt32(self, "lcbPlcfphe")
yield Int32(self, "fcSttbfglsy")
yield UInt32(self, "lcbSttbfglsy")
yield Int32(self, "fcPlcfglsy")
yield UInt32(self, "lcbPlcfglsy")
yield Int32(self, "fcPlcfhdd")
yield UInt32(self, "lcbPlcfhdd")
yield Int32(self, "fcPlcfbteChpx")
yield UInt32(self, "lcbPlcfbteChpx")
yield Int32(self, "fcPlcfbtePapx")
yield UInt32(self, "lcbPlcfbtePapx")
yield Int32(self, "fcPlcfsea")
yield UInt32(self, "lcbPlcfsea")
yield Int32(self, "fcSttbfffn")
yield UInt32(self, "lcbSttbfffn")
yield Int32(self, "fcPlcffldMom")
yield UInt32(self, "lcbPlcffldMom")
yield Int32(self, "fcPlcffldHdr")
yield UInt32(self, "lcbPlcffldHdr")
yield Int32(self, "fcPlcffldFtn")
yield UInt32(self, "lcbPlcffldFtn")
yield Int32(self, "fcPlcffldAtn")
yield UInt32(self, "lcbPlcffldAtn")
yield Int32(self, "fcPlcffldMcr")
yield UInt32(self, "lcbPlcffldMcr")
yield Int32(self, "fcSttbfbkmk")
yield UInt32(self, "lcbSttbfbkmk")
yield Int32(self, "fcPlcfbkf")
yield UInt32(self, "lcbPlcfbkf")
yield Int32(self, "fcPlcfbkl")
yield UInt32(self, "lcbPlcfbkl")
yield Int32(self, "fcCmds")
yield UInt32(self, "lcbCmds")
yield Int32(self, "fcPlcmcr")
yield UInt32(self, "lcbPlcmcr")
yield Int32(self, "fcSttbfmcr")
yield UInt32(self, "lcbSttbfmcr")
yield Int32(self, "fcPrDrvr")
yield UInt32(self, "lcbPrDrvr")
yield Int32(self, "fcPrEnvPort")
yield UInt32(self, "lcbPrEnvPort")
yield Int32(self, "fcPrEnvLand")
yield UInt32(self, "lcbPrEnvLand")
yield Int32(self, "fcWss")
yield UInt32(self, "lcbWss")
yield Int32(self, "fcDop")
yield UInt32(self, "lcbDop")
yield Int32(self, "fcSttbfAssoc")
yield UInt32(self, "lcbSttbfAssoc")
yield Int32(self, "fcClx")
yield UInt32(self, "lcbClx")
yield Int32(self, "fcPlcfpgdFtn")
yield UInt32(self, "lcbPlcfpgdFtn")
yield Int32(self, "fcAutosaveSource")
yield UInt32(self, "lcbAutosaveSource")
yield Int32(self, "fcGrpXstAtnOwners")
yield UInt32(self, "lcbGrpXstAtnOwners")
yield Int32(self, "fcSttbfAtnbkmk")
yield UInt32(self, "lcbSttbfAtnbkmk")
yield Int32(self, "fcPlcdoaMom")
yield UInt32(self, "lcbPlcdoaMom")
yield Int32(self, "fcPlcdoaHdr")
yield UInt32(self, "lcbPlcdoaHdr")
yield Int32(self, "fcPlcspaMom")
yield UInt32(self, "lcbPlcspaMom")
yield Int32(self, "fcPlcspaHdr")
yield UInt32(self, "lcbPlcspaHdr")
yield Int32(self, "fcPlcfAtnbkf")
yield UInt32(self, "lcbPlcfAtnbkf")
yield Int32(self, "fcPlcfAtnbkl")
yield UInt32(self, "lcbPlcfAtnbkl")
yield Int32(self, "fcPms")
yield UInt32(self, "lcbPms")
yield Int32(self, "fcFormFldSttbs")
yield UInt32(self, "lcbFormFldSttbs")
yield Int32(self, "fcPlcfendRef")
yield UInt32(self, "lcbPlcfendRef")
yield Int32(self, "fcPlcfendTxt")
yield UInt32(self, "lcbPlcfendTxt")
yield Int32(self, "fcPlcffldEdn")
yield UInt32(self, "lcbPlcffldEdn")
yield Int32(self, "fcPlcfpgdEdn")
yield UInt32(self, "lcbPlcfpgdEdn")
yield Int32(self, "fcDggInfo")
yield UInt32(self, "lcbDggInfo")
yield Int32(self, "fcSttbfRMark")
yield UInt32(self, "lcbSttbfRMark")
yield Int32(self, "fcSttbCaption")
yield UInt32(self, "lcbSttbCaption")
yield Int32(self, "fcSttbAutoCaption")
yield UInt32(self, "lcbSttbAutoCaption")
yield Int32(self, "fcPlcfwkb")
yield UInt32(self, "lcbPlcfwkb")
yield Int32(self, "fcPlcfspl")
yield UInt32(self, "lcbPlcfspl")
yield Int32(self, "fcPlcftxbxTxt")
yield UInt32(self, "lcbPlcftxbxTxt")
yield Int32(self, "fcPlcffldTxbx")
yield UInt32(self, "lcbPlcffldTxbx")
yield Int32(self, "fcPlcfhdrtxbxTxt")
yield UInt32(self, "lcbPlcfhdrtxbxTxt")
yield Int32(self, "fcPlcffldHdrTxbx")
yield UInt32(self, "lcbPlcffldHdrTxbx")
yield Int32(self, "fcStwUser")
yield UInt32(self, "lcbStwUser")
yield Int32(self, "fcSttbttmbd")
yield UInt32(self, "cbSttbttmbd")
yield Int32(self, "fcUnused")
yield UInt32(self, "lcbUnused")
yield Int32(self, "fcPgdMother")
yield UInt32(self, "lcbPgdMother")
yield Int32(self, "fcBkdMother")
yield UInt32(self, "lcbBkdMother")
yield Int32(self, "fcPgdFtn")
yield UInt32(self, "lcbPgdFtn")
yield Int32(self, "fcBkdFtn")
yield UInt32(self, "lcbBkdFtn")
yield Int32(self, "fcPgdEdn")
yield UInt32(self, "lcbPgdEdn")
yield Int32(self, "fcBkdEdn")
yield UInt32(self, "lcbBkdEdn")
yield Int32(self, "fcSttbfIntlFld")
yield UInt32(self, "lcbSttbfIntlFld")
yield Int32(self, "fcRouteSlip")
yield UInt32(self, "lcbRouteSlip")
yield Int32(self, "fcSttbSavedBy")
yield UInt32(self, "lcbSttbSavedBy")
yield Int32(self, "fcSttbFnm")
yield UInt32(self, "lcbSttbFnm")
yield Int32(self, "fcPlcfLst")
yield UInt32(self, "lcbPlcfLst")
yield Int32(self, "fcPlfLfo")
yield UInt32(self, "lcbPlfLfo")
yield Int32(self, "fcPlcftxbxBkd")
yield UInt32(self, "lcbPlcftxbxBkd")
yield Int32(self, "fcPlcftxbxHdrBkd")
yield UInt32(self, "lcbPlcftxbxHdrBkd")
yield Int32(self, "fcDocUndo")
yield UInt32(self, "lcbDocUndo")
yield Int32(self, "fcRgbuse")
yield UInt32(self, "lcbRgbuse")
yield Int32(self, "fcUsp")
yield UInt32(self, "lcbUsp")
yield Int32(self, "fcUskf")
yield UInt32(self, "lcbUskf")
yield Int32(self, "fcPlcupcRgbuse")
yield UInt32(self, "lcbPlcupcRgbuse")
yield Int32(self, "fcPlcupcUsp")
yield UInt32(self, "lcbPlcupcUsp")
yield Int32(self, "fcSttbGlsyStyle")
yield UInt32(self, "lcbSttbGlsyStyle")
yield Int32(self, "fcPlgosl")
yield UInt32(self, "lcbPlgosl")
yield Int32(self, "fcPlcocx")
yield UInt32(self, "lcbPlcocx")
yield Int32(self, "fcPlcfbteLvc")
yield UInt32(self, "lcbPlcfbteLvc")
yield TIMESTAMP(self, "ftModified")
yield Int32(self, "fcPlcflvc")
yield UInt32(self, "lcbPlcflvc")
yield Int32(self, "fcPlcasumy")
yield UInt32(self, "lcbPlcasumy")
yield Int32(self, "fcPlcfgram")
yield UInt32(self, "lcbPlcfgram")
yield Int32(self, "fcSttbListNames")
yield UInt32(self, "lcbSttbListNames")
yield Int32(self, "fcSttbfUssr")
yield UInt32(self, "lcbSttbfUssr")
tail = (self.size - self.current_size) // 8
if tail:
yield RawBytes(self, "tail", tail)
class WordDocumentFieldSet(BaseWordDocument, FieldSet):
pass
class WordDocumentParser(BaseWordDocument, Parser):
PARSER_TAGS = {
"id": "word_document",
"min_size": 8,
"description": "Microsoft Office Word document",
}
endian = LITTLE_ENDIAN
def __init__(self, stream, **kw):
Parser.__init__(self, stream, **kw)
def validate(self):
return True
| 11,320 | Python | .py | 279 | 32.078853 | 79 | 0.634425 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,522 | file_3do.py | midgetspy_Sick-Beard/lib/hachoir_parser/misc/file_3do.py | # -*- coding: utf-8 -*-
"""
3do model parser.
Author: Cyril Zorin
Creation date: 28 september 2006
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet,
UInt32, Int32, String, Float32,
RawBytes, PaddingBytes)
from lib.hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
from lib.hachoir_parser.misc.common import Vertex, MapUV
class Vector(FieldSet):
def __init__(self, parent, name,
count, type, ename, edesc=None, description=None):
FieldSet.__init__(self, parent, name, description)
self.count = count
self.type = type
self.ename = ename+"[]"
self.edesc = edesc
try:
item_size = self.type.static_size(self.ename, self.edesc)
except TypeError:
item_size = self.type.static_size
if item_size:
self._size = item_size * self.count
def createFields(self):
for index in xrange(self.count):
yield self.type(self, self.ename, self.edesc)
class Face(FieldSet):
def createFields(self):
yield UInt32(self, "id")
yield UInt32(self, "type")
yield UInt32(self, "geometry_mode")
yield UInt32(self, "lighting_mode")
yield UInt32(self, "texture_mode")
yield UInt32(self, "nvertices")
yield Float32(self, "unknown[]", "unknown")
yield UInt32(self, "has_texture", "Has texture?")
yield UInt32(self, "has_material", "Has material?")
yield Vertex(self, "unknown[]")
yield Float32(self, "extra_light")
yield Vertex(self, "unknown[]")
yield Vertex(self, "normal")
if self["nvertices"].value:
yield Vector(self, "vertex_indices",
self["nvertices"].value, UInt32, "vertex")
if self["has_texture"].value:
yield Vector(self, "texture_vertex_indices",
self["nvertices"].value, UInt32, "texture_vertex")
if self["has_material"].value:
yield UInt32(self, "material_index", "material index")
def createDescription(self):
return "Face: id=%s" % self["id"].value
class Mesh(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
def createFields(self):
yield String(self, "name", 32, strip="\0")
yield UInt32(self, "id")
yield UInt32(self, "geometry_mode")
yield UInt32(self, "lighting_mode")
yield UInt32(self, "texture_mode")
yield UInt32(self, "nmesh_vertices")
yield UInt32(self, "ntexture_vertices")
yield UInt32(self, "nfaces")
nb_vert = self["nmesh_vertices"].value
if nb_vert:
yield Vector(self, "vertices",
nb_vert, Vertex, "vertex")
if self["ntexture_vertices"].value:
yield Vector(self, "texture vertices",
self["ntexture_vertices"].value, MapUV, "texture_vertex")
if nb_vert:
yield Vector(self, "light vertices",
nb_vert, Float32, "extra_light")
yield Vector(self, "unknown[]",
nb_vert, Float32, "unknown")
if self["nfaces"].value:
yield Vector(self, "faces", self["nfaces"].value, Face, "face")
if nb_vert:
yield Vector(self, "vertex normals",
nb_vert, Vertex, "normal")
yield UInt32(self, "has_shadow")
yield Float32(self, "unknown[]")
yield Float32(self, "radius")
yield Vertex(self, "unknown[]")
yield Vertex(self, "unknown[]")
def createDescription(self):
return 'Mesh "%s" (id %s)' % (self["name"].value, self["id"].value)
class Geoset(FieldSet):
def createFields(self):
yield UInt32(self, "count")
for index in xrange(self["count"].value):
yield Mesh(self, "mesh[]")
def createDescription(self):
return "Set of %s meshes" % self["count"].value
class Node(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
size = (188-4)*8
if self["parent_offset"].value != 0:
size += 32
if self["first_child_offset"].value != 0:
size += 32
if self["next_sibling_offset"].value != 0:
size += 32
self._size = size
def createFields(self):
yield String(self, "name", 32, strip="\0")
yield PaddingBytes(self, "unknown[]", 32, pattern="\xCC")
yield UInt32(self, "flags")
yield UInt32(self, "id")
yield UInt32(self, "type")
yield Int32(self, "mesh_id")
yield UInt32(self, "depth")
yield Int32(self, "parent_offset")
yield UInt32(self, "nchildren")
yield UInt32(self, "first_child_offset")
yield UInt32(self, "next_sibling_offset")
yield Vertex(self, "pivot")
yield Vertex(self, "position")
yield Float32(self, "pitch")
yield Float32(self, "yaw")
yield Float32(self, "roll")
for index in xrange(4):
yield Vertex(self, "unknown_vertex[]")
if self["parent_offset"].value != 0:
yield UInt32(self, "parent_id")
if self["first_child_offset"].value != 0:
yield UInt32(self, "first_child_id")
if self["next_sibling_offset"].value != 0:
yield UInt32(self, "next_sibling_id")
def createDescription(self):
return 'Node "%s"' % self["name"].value
class Nodes(FieldSet):
def createFields(self):
yield UInt32(self, "count")
for index in xrange(self["count"].value):
yield Node(self, "node[]")
def createDescription(self):
return 'Nodes (%s)' % self["count"].value
class Materials(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
count = self["count"]
self._size = count.size + count.value * (32*8)
def createFields(self):
yield UInt32(self, "count")
for index in xrange(self["count"].value):
yield String(self, "filename[]", 32, "Material file name", strip="\0")
def createDescription(self):
return 'Material file names (%s)' % self["count"].value
class File3do(Parser):
PARSER_TAGS = {
"id": "3do",
"category": "misc",
"file_ext": ("3do",),
"mime": (u"image/x-3do",),
"min_size": 8*4,
"description": "renderdroid 3d model."
}
endian = LITTLE_ENDIAN
def validate(self):
signature = self.stream.readBytes(0, 4)
return signature in ('LDOM', 'MODL') # lazy endian-safe hack =D
def createFields(self):
# Read file signature, and fix endian if needed
yield String(self, "file_sig", 4, "File signature", charset="ASCII")
if self["file_sig"].value == "MODL":
self.endian = BIG_ENDIAN
# Read file content
yield Materials(self, "materials")
yield String(self, "model_name", 32, "model file name", strip="\0")
yield RawBytes(self, "unknown[]", 4)
yield UInt32(self, "ngeosets")
for index in xrange(self["ngeosets"].value):
yield Geoset(self, "geoset[]")
yield RawBytes(self, "unknown[]", 4)
yield Nodes(self, "nodes")
yield Float32(self, "model_radius")
yield Vertex(self, "insertion_offset")
# Read the end of the file
if self.current_size < self._size:
yield self.seekBit(self._size, "end")
| 7,447 | Python | .py | 185 | 31.551351 | 82 | 0.591456 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,523 | ole2.py | midgetspy_Sick-Beard/lib/hachoir_parser/misc/ole2.py | """
Microsoft Office documents parser.
Informations:
* wordole.c of AntiWord program (v0.35)
Copyright (C) 1998-2003 A.J. van Os
Released under GNU GPL
http://www.winfield.demon.nl/
* File gsf-infile-msole.c of libgsf library (v1.14.0)
Copyright (C) 2002-2004 Jody Goldberg ([email protected])
Released under GNU LGPL 2.1
http://freshmeat.net/projects/libgsf/
* PDF from AAF Association
Copyright (C) 2004 AAF Association
Copyright (C) 1991-2003 Microsoft Corporation
http://www.aafassociation.org/html/specs/aafcontainerspec-v1.0.1.pdf
Author: Victor Stinner
Creation: 2006-04-23
"""
from lib.hachoir_parser import HachoirParser
from lib.hachoir_core.field import (
FieldSet, ParserError, SeekableFieldSet, RootSeekableFieldSet,
UInt8, UInt16, UInt32, UInt64, TimestampWin64, Enum,
Bytes, RawBytes, NullBytes, String)
from lib.hachoir_core.text_handler import filesizeHandler
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_parser.common.win32 import GUID
from lib.hachoir_parser.misc.msoffice import CustomFragment, OfficeRootEntry, PROPERTY_NAME
from lib.hachoir_parser.misc.word_doc import WordDocumentParser
from lib.hachoir_parser.misc.msoffice_summary import SummaryParser
MIN_BIG_BLOCK_LOG2 = 6 # 512 bytes
MAX_BIG_BLOCK_LOG2 = 14 # 64 kB
# Number of items in DIFAT
NB_DIFAT = 109
class SECT(UInt32):
UNUSED = 0xFFFFFFFF # -1
END_OF_CHAIN = 0xFFFFFFFE # -2
BFAT_SECTOR = 0xFFFFFFFD # -3
DIFAT_SECTOR = 0xFFFFFFFC # -4
SPECIALS = set((END_OF_CHAIN, UNUSED, BFAT_SECTOR, DIFAT_SECTOR))
special_value_name = {
UNUSED: "unused",
END_OF_CHAIN: "end of a chain",
BFAT_SECTOR: "BFAT sector (in a FAT)",
DIFAT_SECTOR: "DIFAT sector (in a FAT)",
}
def __init__(self, parent, name, description=None):
UInt32.__init__(self, parent, name, description)
def createDisplay(self):
val = self.value
return SECT.special_value_name.get(val, str(val))
class Property(FieldSet):
TYPE_ROOT = 5
TYPE_NAME = {
1: "storage",
2: "stream",
3: "ILockBytes",
4: "IPropertyStorage",
5: "root"
}
DECORATOR_NAME = {
0: "red",
1: "black",
}
static_size = 128 * 8
def createFields(self):
bytes = self.stream.readBytes(self.absolute_address, 4)
if bytes == "\0R\0\0":
charset = "UTF-16-BE"
else:
charset = "UTF-16-LE"
yield String(self, "name", 64, charset=charset, truncate="\0")
yield UInt16(self, "namelen", "Length of the name")
yield Enum(UInt8(self, "type", "Property type"), self.TYPE_NAME)
yield Enum(UInt8(self, "decorator", "Decorator"), self.DECORATOR_NAME)
yield SECT(self, "left")
yield SECT(self, "right")
yield SECT(self, "child", "Child node (valid for storage and root types)")
yield GUID(self, "clsid", "CLSID of this storage (valid for storage and root types)")
yield NullBytes(self, "flags", 4, "User flags")
yield TimestampWin64(self, "creation", "Creation timestamp(valid for storage and root types)")
yield TimestampWin64(self, "lastmod", "Modify timestamp (valid for storage and root types)")
yield SECT(self, "start", "Starting SECT of the stream (valid for stream and root types)")
if self["/header/bb_shift"].value == 9:
yield filesizeHandler(UInt32(self, "size", "Size in bytes (valid for stream and root types)"))
yield NullBytes(self, "padding", 4)
else:
yield filesizeHandler(UInt64(self, "size", "Size in bytes (valid for stream and root types)"))
def createDescription(self):
name = self["name"].display
size = self["size"].display
return "Property: %s (%s)" % (name, size)
class DIFat(SeekableFieldSet):
def __init__(self, parent, name, db_start, db_count, description=None):
SeekableFieldSet.__init__(self, parent, name, description)
self.start=db_start
self.count=db_count
def createFields(self):
for index in xrange(NB_DIFAT):
yield SECT(self, "index[%u]" % index)
for index in xrange(self.count):
# this is relative to real DIFAT start
self.seekBit(NB_DIFAT * SECT.static_size+self.parent.sector_size*(self.start+index))
for sect_index in xrange(NB_DIFAT*(index+1),NB_DIFAT*(index+2)):
yield SECT(self, "index[%u]" % sect_index)
class Header(FieldSet):
static_size = 68 * 8
def createFields(self):
yield GUID(self, "clsid", "16 bytes GUID used by some apps")
yield UInt16(self, "ver_min", "Minor version")
yield UInt16(self, "ver_maj", "Minor version")
yield Bytes(self, "endian", 2, "Endian (0xFFFE for Intel)")
yield UInt16(self, "bb_shift", "Log, base 2, of the big block size")
yield UInt16(self, "sb_shift", "Log, base 2, of the small block size")
yield NullBytes(self, "reserved[]", 6, "(reserved)")
yield UInt32(self, "csectdir", "Number of SECTs in directory chain for 4 KB sectors (version 4)")
yield UInt32(self, "bb_count", "Number of Big Block Depot blocks")
yield SECT(self, "bb_start", "Root start block")
yield NullBytes(self, "transaction", 4, "Signature used for transactions (must be zero)")
yield UInt32(self, "threshold", "Maximum size for a mini stream (typically 4096 bytes)")
yield SECT(self, "sb_start", "Small Block Depot start block")
yield UInt32(self, "sb_count")
yield SECT(self, "db_start", "First block of DIFAT")
yield UInt32(self, "db_count", "Number of SECTs in DIFAT")
# Header (ole_id, header, difat) size in bytes
HEADER_SIZE = 64 + Header.static_size + NB_DIFAT * SECT.static_size
class SectFat(FieldSet):
def __init__(self, parent, name, start, count, description=None):
FieldSet.__init__(self, parent, name, description, size=count*32)
self.count = count
self.start = start
def createFields(self):
for i in xrange(self.start, self.start + self.count):
yield SECT(self, "index[%u]" % i)
class OLE2_File(HachoirParser, RootSeekableFieldSet):
PARSER_TAGS = {
"id": "ole2",
"category": "misc",
"file_ext": (
"doc", "dot", # Microsoft Word
"ppt", "ppz", "pps", "pot", # Microsoft Powerpoint
"xls", "xla", # Microsoft Excel
"msi", # Windows installer
),
"mime": (
u"application/msword",
u"application/msexcel",
u"application/mspowerpoint",
),
"min_size": 512*8,
"description": "Microsoft Office document",
"magic": (("\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1", 0),),
}
endian = LITTLE_ENDIAN
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **args)
def validate(self):
if self["ole_id"].value != "\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1":
return "Invalid magic"
if self["header/ver_maj"].value not in (3, 4):
return "Unknown major version (%s)" % self["header/ver_maj"].value
if self["header/endian"].value not in ("\xFF\xFE", "\xFE\xFF"):
return "Unknown endian (%s)" % self["header/endian"].raw_display
if not(MIN_BIG_BLOCK_LOG2 <= self["header/bb_shift"].value <= MAX_BIG_BLOCK_LOG2):
return "Invalid (log 2 of) big block size (%s)" % self["header/bb_shift"].value
if self["header/bb_shift"].value < self["header/sb_shift"].value:
return "Small block size (log2=%s) is bigger than big block size (log2=%s)!" \
% (self["header/sb_shift"].value, self["header/bb_shift"].value)
return True
def createFields(self):
# Signature
yield Bytes(self, "ole_id", 8, "OLE object signature")
header = Header(self, "header")
yield header
# Configure values
self.sector_size = (8 << header["bb_shift"].value)
self.fat_count = header["bb_count"].value
self.items_per_bbfat = self.sector_size / SECT.static_size
self.ss_size = (8 << header["sb_shift"].value)
self.items_per_ssfat = self.items_per_bbfat
# Read DIFAT (one level of indirection)
yield DIFat(self, "difat", header["db_start"].value, header["db_count"].value, "Double Indirection FAT")
# Read FAT (one level of indirection)
for field in self.readBFAT():
yield field
# Read SFAT
for field in self.readSFAT():
yield field
# Read properties
chain = self.getChain(self["header/bb_start"].value)
prop_per_sector = self.sector_size // Property.static_size
self.properties = []
for block in chain:
self.seekBlock(block)
for index in xrange(prop_per_sector):
property = Property(self, "property[]")
yield property
self.properties.append(property)
# Parse first property
for index, property in enumerate(self.properties):
if index == 0:
name = "root"
else:
try:
name = PROPERTY_NAME[property["name"].value]
except LookupError:
name = property.name+"content"
for field in self.parseProperty(property, name):
yield field
def parseProperty(self, property, name_prefix):
if not property["size"].value:
return
if property.name != "property[0]" \
and (property["size"].value < self["header/threshold"].value):
# Field is stored in the ministream, skip it
return
name = "%s[]" % name_prefix
first = None
previous = None
size = 0
fragment_group = None
chain = self.getChain(property["start"].value)
while True:
try:
block = chain.next()
contiguous = False
if not first:
first = block
contiguous = True
if previous and block == (previous+1):
contiguous = True
if contiguous:
previous = block
size += self.sector_size
continue
except StopIteration:
block = None
if first is None:
break
self.seekBlock(first)
desc = "Big blocks %s..%s (%s)" % (first, previous, previous-first+1)
desc += " of %s bytes" % (self.sector_size // 8)
if name_prefix in set(("root", "summary", "doc_summary", "word_doc")):
if name_prefix == "root":
parser = OfficeRootEntry
elif name_prefix == "word_doc":
parser = WordDocumentParser
else:
parser = SummaryParser
field = CustomFragment(self, name, size, parser, desc, fragment_group)
yield field
if not fragment_group:
fragment_group = field.group
else:
yield RawBytes(self, name, size//8, desc)
if block is None:
break
first = block
previous = block
size = self.sector_size
def getChain(self, start, use_sfat=False):
if use_sfat:
fat = self.ss_fat
items_per_fat = self.items_per_ssfat
err_prefix = "SFAT chain"
else:
fat = self.bb_fat
items_per_fat = self.items_per_bbfat
err_prefix = "BFAT chain"
block = start
block_set = set()
previous = block
while block != SECT.END_OF_CHAIN:
if block in SECT.SPECIALS:
raise ParserError("%s: Invalid block index (0x%08x), previous=%s" % (err_prefix, block, previous))
if block in block_set:
raise ParserError("%s: Found a loop (%s=>%s)" % (err_prefix, previous, block))
block_set.add(block)
yield block
previous = block
index = block // items_per_fat
try:
block = fat[index]["index[%u]" % block].value
except LookupError:
break
def readBFAT(self):
self.bb_fat = []
start = 0
count = self.items_per_bbfat
for index, block in enumerate(self.array("difat/index")):
block = block.value
if block == SECT.UNUSED:
break
desc = "FAT %u/%u at block %u" % \
(1+index, self["header/bb_count"].value, block)
self.seekBlock(block)
field = SectFat(self, "bbfat[]", start, count, desc)
yield field
self.bb_fat.append(field)
start += count
def readSFAT(self):
chain = self.getChain(self["header/sb_start"].value)
start = 0
self.ss_fat = []
count = self.items_per_ssfat
for index, block in enumerate(chain):
self.seekBlock(block)
field = SectFat(self, "sfat[]", \
start, count, \
"SFAT %u/%u at block %u" % \
(1+index, self["header/sb_count"].value, block))
yield field
self.ss_fat.append(field)
start += count
def createContentSize(self):
max_block = 0
for fat in self.array("bbfat"):
for entry in fat:
block = entry.value
if block not in SECT.SPECIALS:
max_block = max(block, max_block)
if max_block in SECT.SPECIALS:
return None
else:
return HEADER_SIZE + (max_block+1) * self.sector_size
def seekBlock(self, block):
self.seekBit(HEADER_SIZE + block * self.sector_size)
| 14,227 | Python | .py | 327 | 33.486239 | 114 | 0.583766 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,524 | gnome_keyring.py | midgetspy_Sick-Beard/lib/hachoir_parser/misc/gnome_keyring.py | """
Gnome keyring parser.
Sources:
- Gnome Keyring source code,
function generate_file() in keyrings/gkr-keyring.c,
Author: Victor Stinner
Creation date: 2008-04-09
"""
from lib.hachoir_core.tools import paddingSize
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet,
Bit, NullBits, NullBytes,
UInt8, UInt32, String, RawBytes, Enum,
TimestampUnix64, CompressedField,
SubFile)
from lib.hachoir_core.endian import BIG_ENDIAN
try:
import hashlib
def sha256(data):
hash = hashlib.new('sha256')
hash.update(data)
return hash.digest()
except ImportError:
def sha256(data):
raise ImportError("hashlib module is missing")
try:
from Crypto.Cipher import AES
class DeflateStream:
def __init__(self, stream):
hash_iterations = 1234
password = "x" * 8
salt = "\0" * 8
key, iv = generate_key(password, salt, hash_iterations)
self.cipher = AES.new(key, AES.MODE_CBC, iv)
def __call__(self, size, data=None):
if data is None:
return ''
return self.cipher.decrypt(data)
def Deflate(field):
CompressedField(field, DeflateStream)
return field
except ImportError:
def Deflate(field):
return field
class KeyringString(FieldSet):
def createFields(self):
yield UInt32(self, "length")
length = self["length"].value
if length == 0xffffffff:
return
yield String(self, "text", length, charset="UTF-8")
def createValue(self):
if "text" in self:
return self["text"].value
else:
return u''
def createDescription(self):
if "text" in self:
return self["text"].value
else:
return u"(empty string)"
class Attribute(FieldSet):
def createFields(self):
yield KeyringString(self, "name")
yield UInt32(self, "type")
type = self["type"].value
if type == 0:
yield KeyringString(self, "value")
elif type == 1:
yield UInt32(self, "value")
else:
raise TypeError("Unknown attribute type (%s)" % type)
def createDescription(self):
return 'Attribute "%s"' % self["name"].value
class ACL(FieldSet):
def createFields(self):
yield UInt32(self, "types_allowed")
yield KeyringString(self, "display_name")
yield KeyringString(self, "pathname")
yield KeyringString(self, "reserved[]")
yield UInt32(self, "reserved[]")
class Item(FieldSet):
def createFields(self):
yield UInt32(self, "id")
yield UInt32(self, "type")
yield UInt32(self, "attr_count")
for index in xrange(self["attr_count"].value):
yield Attribute(self, "attr[]")
def createDescription(self):
return "Item #%s: %s attributes" % (self["id"].value, self["attr_count"].value)
class Items(FieldSet):
def createFields(self):
yield UInt32(self, "count")
for index in xrange(self["count"].value):
yield Item(self, "item[]")
class EncryptedItem(FieldSet):
def createFields(self):
yield KeyringString(self, "display_name")
yield KeyringString(self, "secret")
yield TimestampUnix64(self, "mtime")
yield TimestampUnix64(self, "ctime")
yield KeyringString(self, "reserved[]")
for index in xrange(4):
yield UInt32(self, "reserved[]")
yield UInt32(self, "attr_count")
for index in xrange(self["attr_count"].value):
yield Attribute(self, "attr[]")
yield UInt32(self, "acl_count")
for index in xrange(self["acl_count"].value):
yield ACL(self, "acl[]")
# size = 8 # paddingSize((self.stream.size - self.current_size) // 8, 16)
# if size:
# yield NullBytes(self, "hash_padding", size, "16 bytes alignment")
class EncryptedData(Parser):
PARSER_TAGS = {
"id": "gnomeencryptedkeyring",
"min_size": 16*8,
"description": u"Gnome encrypted keyring",
}
endian = BIG_ENDIAN
def validate(self):
return True
def createFields(self):
yield RawBytes(self, "md5", 16)
while True:
size = (self.size - self.current_size) // 8
if size < 77:
break
yield EncryptedItem(self, "item[]")
size = paddingSize(self.current_size // 8, 16)
if size:
yield NullBytes(self, "padding_align", size)
class GnomeKeyring(Parser):
MAGIC = "GnomeKeyring\n\r\0\n"
PARSER_TAGS = {
"id": "gnomekeyring",
"category": "misc",
"magic": ((MAGIC, 0),),
"min_size": 47*8,
"description": u"Gnome keyring",
}
CRYPTO_NAMES = {
0: u"AEL",
}
HASH_NAMES = {
0: u"MD5",
}
endian = BIG_ENDIAN
def validate(self):
if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC:
return u"Invalid magic string"
return True
def createFields(self):
yield String(self, "magic", len(self.MAGIC), 'Magic string (%r)' % self.MAGIC, charset="ASCII")
yield UInt8(self, "major_version")
yield UInt8(self, "minor_version")
yield Enum(UInt8(self, "crypto"), self.CRYPTO_NAMES)
yield Enum(UInt8(self, "hash"), self.HASH_NAMES)
yield KeyringString(self, "keyring_name")
yield TimestampUnix64(self, "mtime")
yield TimestampUnix64(self, "ctime")
yield Bit(self, "lock_on_idle")
yield NullBits(self, "reserved[]", 31, "Reserved for future flags")
yield UInt32(self, "lock_timeout")
yield UInt32(self, "hash_iterations")
yield RawBytes(self, "salt", 8)
yield NullBytes(self, "reserved[]", 16)
yield Items(self, "items")
yield UInt32(self, "encrypted_size")
yield Deflate(SubFile(self, "encrypted", self["encrypted_size"].value, "AES128 CBC", parser_class=EncryptedData))
def generate_key(password, salt, hash_iterations):
sha = sha256(password+salt)
for index in xrange(hash_iterations-1):
sha = sha256(sha)
return sha[:16], sha[16:]
| 6,271 | Python | .py | 175 | 28.177143 | 121 | 0.611926 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,525 | pcf.py | midgetspy_Sick-Beard/lib/hachoir_parser/misc/pcf.py | """
X11 Portable Compiled Font (pcf) parser.
Documents:
- Format for X11 pcf bitmap font files
http://fontforge.sourceforge.net/pcf-format.html
(file is based on the X11 sources)
Author: Victor Stinner
Creation date: 2007-03-20
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, Enum,
UInt8, UInt32, Bytes, RawBytes, NullBytes,
Bit, Bits, PaddingBits, CString)
from lib.hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
from lib.hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler
from lib.hachoir_core.tools import paddingSize
class TOC(FieldSet):
TYPE_NAME = {
0x00000001: "Properties",
0x00000002: "Accelerators",
0x00000004: "Metrics",
0x00000008: "Bitmaps",
0x00000010: "Ink metrics",
0x00000020: "BDF encodings",
0x00000040: "SWidths",
0x00000080: "Glyph names",
0x00000100: "BDF accelerators",
}
FORMAT_NAME = {
0x00000000: "Default",
0x00000200: "Ink bounds",
0x00000100: "Accelerator W ink bounds",
# 0x00000200: "Compressed metrics",
}
def createFields(self):
yield Enum(UInt32(self, "type"), self.TYPE_NAME)
yield UInt32(self, "format")
yield filesizeHandler(UInt32(self, "size"))
yield UInt32(self, "offset")
def createDescription(self):
return "%s at %s (%s)" % (
self["type"].display, self["offset"].value, self["size"].display)
class PropertiesFormat(FieldSet):
static_size = 32
endian = LITTLE_ENDIAN
def createFields(self):
yield Bits(self, "reserved[]", 2)
yield Bit(self, "byte_big_endian")
yield Bit(self, "bit_big_endian")
yield Bits(self, "scan_unit", 2)
yield textHandler(PaddingBits(self, "reserved[]", 26), hexadecimal)
class Property(FieldSet):
def createFields(self):
yield UInt32(self, "name_offset")
yield UInt8(self, "is_string")
yield UInt32(self, "value_offset")
def createDescription(self):
# FIXME: Use link or any better way to read name value
name = self["../name[%s]" % (self.index-2)].value
return "Property %s" % name
class GlyphNames(FieldSet):
def __init__(self, parent, name, toc, description, size=None):
FieldSet.__init__(self, parent, name, description, size=size)
self.toc = toc
if self["format/byte_big_endian"].value:
self.endian = BIG_ENDIAN
else:
self.endian = LITTLE_ENDIAN
def createFields(self):
yield PropertiesFormat(self, "format")
yield UInt32(self, "count")
offsets = []
for index in xrange(self["count"].value):
offset = UInt32(self, "offset[]")
yield offset
offsets.append(offset.value)
yield UInt32(self, "total_str_length")
offsets.sort()
offset0 = self.current_size // 8
for offset in offsets:
padding = self.seekByte(offset0+offset)
if padding:
yield padding
yield CString(self, "name[]")
padding = (self.size - self.current_size) // 8
if padding:
yield NullBytes(self, "end_padding", padding)
class Properties(GlyphNames):
def createFields(self):
yield PropertiesFormat(self, "format")
yield UInt32(self, "nb_prop")
properties = []
for index in xrange(self["nb_prop"].value):
property = Property(self, "property[]")
yield property
properties.append(property)
padding = paddingSize(self.current_size//8, 4)
if padding:
yield NullBytes(self, "padding", padding)
yield UInt32(self, "total_str_length")
properties.sort(key=lambda entry: entry["name_offset"].value)
offset0 = self.current_size // 8
for property in properties:
padding = self.seekByte(offset0+property["name_offset"].value)
if padding:
yield padding
yield CString(self, "name[]", "Name of %s" % property.name)
if property["is_string"].value:
yield CString(self, "value[]", "Value of %s" % property.name)
padding = (self.size - self.current_size) // 8
if padding:
yield NullBytes(self, "end_padding", padding)
class PcfFile(Parser):
MAGIC = "\1fcp"
PARSER_TAGS = {
"id": "pcf",
"category": "misc",
"file_ext": ("pcf",),
"magic": ((MAGIC, 0),),
"min_size": 32, # FIXME
"description": "X11 Portable Compiled Font (pcf)",
}
endian = LITTLE_ENDIAN
def validate(self):
if self["signature"].value != self.MAGIC:
return "Invalid signature"
return True
def createFields(self):
yield Bytes(self, "signature", 4, r'File signature ("\1pcf")')
yield UInt32(self, "nb_toc")
entries = []
for index in xrange(self["nb_toc"].value):
entry = TOC(self, "toc[]")
yield entry
entries.append(entry)
entries.sort(key=lambda entry: entry["offset"].value)
for entry in entries:
size = entry["size"].value
padding = self.seekByte(entry["offset"].value)
if padding:
yield padding
maxsize = (self.size-self.current_size)//8
if maxsize < size:
self.warning("Truncate content of %s to %s bytes (was %s)" % (entry.path, maxsize, size))
size = maxsize
if not size:
continue
if entry["type"].value == 1:
yield Properties(self, "properties", entry, "Properties", size=size*8)
elif entry["type"].value == 128:
yield GlyphNames(self, "glyph_names", entry, "Glyph names", size=size*8)
else:
yield RawBytes(self, "data[]", size, "Content of %s" % entry.path)
| 6,014 | Python | .py | 153 | 30.48366 | 105 | 0.600958 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,526 | __init__.py | midgetspy_Sick-Beard/lib/hachoir_parser/misc/__init__.py | from lib.hachoir_parser.misc.file_3do import File3do
from lib.hachoir_parser.misc.file_3ds import File3ds
from lib.hachoir_parser.misc.torrent import TorrentFile
from lib.hachoir_parser.misc.ttf import TrueTypeFontFile
from lib.hachoir_parser.misc.chm import ChmFile
from lib.hachoir_parser.misc.lnk import LnkFile
from lib.hachoir_parser.misc.pcf import PcfFile
from lib.hachoir_parser.misc.ole2 import OLE2_File
from lib.hachoir_parser.misc.pdf import PDFDocument
from lib.hachoir_parser.misc.pifv import PIFVFile
from lib.hachoir_parser.misc.hlp import HlpFile
from lib.hachoir_parser.misc.gnome_keyring import GnomeKeyring
from lib.hachoir_parser.misc.bplist import BPList
| 678 | Python | .py | 13 | 51.076923 | 62 | 0.856928 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,527 | msoffice_summary.py | midgetspy_Sick-Beard/lib/hachoir_parser/misc/msoffice_summary.py | """
Microsoft Document summaries structures.
Documents
---------
- Apache POI (HPSF Internals):
http://poi.apache.org/hpsf/internals.html
"""
from lib.hachoir_parser import HachoirParser
from lib.hachoir_core.field import (FieldSet, ParserError,
RootSeekableFieldSet, SeekableFieldSet,
Bit, Bits, NullBits,
UInt8, UInt16, UInt32, TimestampWin64, TimedeltaWin64, Enum,
Bytes, RawBytes, NullBytes, String,
Int8, Int32, Float32, Float64, PascalString32)
from lib.hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler
from lib.hachoir_core.tools import createDict
from lib.hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
from lib.hachoir_parser.common.win32 import GUID, PascalStringWin32, CODEPAGE_CHARSET
from lib.hachoir_parser.image.bmp import BmpHeader, parseImageData
MAX_SECTION_COUNT = 100
OS_MAC = 1
OS_NAME = {
0: "Windows 16-bit",
1: "Macintosh",
2: "Windows 32-bit",
}
class OSConfig:
def __init__(self, big_endian):
if big_endian:
self.charset = "MacRoman"
self.utf16 = "UTF-16-BE"
else:
# FIXME: Don't guess the charset, use ISO-8859-1 or UTF-8
#self.charset = "ISO-8859-1"
self.charset = None
self.utf16 = "UTF-16-LE"
class PropertyIndex(FieldSet):
TAG_CODEPAGE = 1
COMMON_PROPERTY = {
0: "Dictionary",
1: "CodePage",
0x80000000: "LOCALE_SYSTEM_DEFAULT",
0x80000003: "CASE_SENSITIVE",
}
DOCUMENT_PROPERTY = {
2: "Category",
3: "PresentationFormat",
4: "NumBytes",
5: "NumLines",
6: "NumParagraphs",
7: "NumSlides",
8: "NumNotes",
9: "NumHiddenSlides",
10: "NumMMClips",
11: "Scale",
12: "HeadingPairs",
13: "DocumentParts",
14: "Manager",
15: "Company",
16: "LinksDirty",
17: "DocSumInfo_17",
18: "DocSumInfo_18",
19: "DocSumInfo_19",
20: "DocSumInfo_20",
21: "DocSumInfo_21",
22: "DocSumInfo_22",
23: "DocSumInfo_23",
}
DOCUMENT_PROPERTY.update(COMMON_PROPERTY)
COMPONENT_PROPERTY = {
2: "Title",
3: "Subject",
4: "Author",
5: "Keywords",
6: "Comments",
7: "Template",
8: "LastSavedBy",
9: "RevisionNumber",
10: "TotalEditingTime",
11: "LastPrinted",
12: "CreateTime",
13: "LastSavedTime",
14: "NumPages",
15: "NumWords",
16: "NumCharacters",
17: "Thumbnail",
18: "AppName",
19: "Security",
}
COMPONENT_PROPERTY.update(COMMON_PROPERTY)
def createFields(self):
if self["../.."].name.startswith("doc_summary"):
enum = self.DOCUMENT_PROPERTY
else:
enum = self.COMPONENT_PROPERTY
yield Enum(UInt32(self, "id"), enum)
yield UInt32(self, "offset")
def createDescription(self):
return "Property: %s" % self["id"].display
class Bool(Int8):
def createValue(self):
value = Int8.createValue(self)
return (value == -1)
class Thumbnail(FieldSet):
"""
Thumbnail.
Documents:
- See Jakarta POI
http://jakarta.apache.org/poi/hpsf/thumbnails.html
http://www.penguin-soft.com/penguin/developer/poi/
org/apache/poi/hpsf/Thumbnail.html#CF_BITMAP
- How To Extract Thumbnail Images
http://sparks.discreet.com/knowledgebase/public/
solutions/ExtractThumbnailImg.htm
"""
FORMAT_CLIPBOARD = -1
FORMAT_NAME = {
-1: "Windows clipboard",
-2: "Macintosh clipboard",
-3: "GUID that contains format identifier",
0: "No data",
2: "Bitmap",
3: "Windows metafile format",
8: "Device Independent Bitmap (DIB)",
14: "Enhanced Windows metafile",
}
DIB_BMP = 8
DIB_FORMAT = {
2: "Bitmap Obsolete (old BMP)",
3: "Windows metafile format (WMF)",
8: "Device Independent Bitmap (BMP)",
14: "Enhanced Windows metafile (EMF)",
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["size"].value * 8
def createFields(self):
yield filesizeHandler(UInt32(self, "size"))
yield Enum(Int32(self, "format"), self.FORMAT_NAME)
if self["format"].value == self.FORMAT_CLIPBOARD:
yield Enum(UInt32(self, "dib_format"), self.DIB_FORMAT)
if self["dib_format"].value == self.DIB_BMP:
yield BmpHeader(self, "bmp_header")
size = (self.size - self.current_size) // 8
yield parseImageData(self, "pixels", size, self["bmp_header"])
return
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "data", size)
class PropertyContent(FieldSet):
TYPE_LPSTR = 30
TYPE_INFO = {
0: ("EMPTY", None),
1: ("NULL", None),
2: ("UInt16", UInt16),
3: ("UInt32", UInt32),
4: ("Float32", Float32),
5: ("Float64", Float64),
6: ("CY", None),
7: ("DATE", None),
8: ("BSTR", None),
9: ("DISPATCH", None),
10: ("ERROR", None),
11: ("BOOL", Bool),
12: ("VARIANT", None),
13: ("UNKNOWN", None),
14: ("DECIMAL", None),
16: ("I1", None),
17: ("UI1", None),
18: ("UI2", None),
19: ("UI4", None),
20: ("I8", None),
21: ("UI8", None),
22: ("INT", None),
23: ("UINT", None),
24: ("VOID", None),
25: ("HRESULT", None),
26: ("PTR", None),
27: ("SAFEARRAY", None),
28: ("CARRAY", None),
29: ("USERDEFINED", None),
30: ("LPSTR", PascalString32),
31: ("LPWSTR", PascalString32),
64: ("FILETIME", TimestampWin64),
65: ("BLOB", None),
66: ("STREAM", None),
67: ("STORAGE", None),
68: ("STREAMED_OBJECT", None),
69: ("STORED_OBJECT", None),
70: ("BLOB_OBJECT", None),
71: ("THUMBNAIL", Thumbnail),
72: ("CLSID", None),
0x1000: ("Vector", None),
}
TYPE_NAME = createDict(TYPE_INFO, 0)
def createFields(self):
self.osconfig = self.parent.osconfig
if True:
yield Enum(Bits(self, "type", 12), self.TYPE_NAME)
yield Bit(self, "is_vector")
yield NullBits(self, "padding", 32-12-1)
else:
yield Enum(Bits(self, "type", 32), self.TYPE_NAME)
tag = self["type"].value
kw = {}
try:
handler = self.TYPE_INFO[tag][1]
if handler == PascalString32:
osconfig = self.osconfig
if tag == self.TYPE_LPSTR:
kw["charset"] = osconfig.charset
else:
kw["charset"] = osconfig.utf16
elif handler == TimestampWin64:
if self.description == "TotalEditingTime":
handler = TimedeltaWin64
except LookupError:
handler = None
if not handler:
raise ParserError("OLE2: Unable to parse property of type %s" \
% self["type"].display)
if self["is_vector"].value:
yield UInt32(self, "count")
for index in xrange(self["count"].value):
yield handler(self, "item[]", **kw)
else:
yield handler(self, "value", **kw)
self.createValue = lambda: self["value"].value
PropertyContent.TYPE_INFO[12] = ("VARIANT", PropertyContent)
class SummarySection(SeekableFieldSet):
def __init__(self, *args):
SeekableFieldSet.__init__(self, *args)
self._size = self["size"].value * 8
def createFields(self):
self.osconfig = self.parent.osconfig
yield UInt32(self, "size")
yield UInt32(self, "property_count")
for index in xrange(self["property_count"].value):
yield PropertyIndex(self, "property_index[]")
for index in xrange(self["property_count"].value):
findex = self["property_index[%u]" % index]
self.seekByte(findex["offset"].value)
field = PropertyContent(self, "property[]", findex["id"].display)
yield field
if not self.osconfig.charset \
and findex['id'].value == PropertyIndex.TAG_CODEPAGE:
codepage = field['value'].value
if codepage in CODEPAGE_CHARSET:
self.osconfig.charset = CODEPAGE_CHARSET[codepage]
else:
self.warning("Unknown codepage: %r" % codepage)
class SummaryIndex(FieldSet):
static_size = 20*8
def createFields(self):
yield String(self, "name", 16)
yield UInt32(self, "offset")
class BaseSummary:
endian = LITTLE_ENDIAN
def __init__(self):
if self["endian"].value == "\xFF\xFE":
self.endian = BIG_ENDIAN
elif self["endian"].value == "\xFE\xFF":
self.endian = LITTLE_ENDIAN
else:
raise ParserError("OLE2: Invalid endian value")
self.osconfig = OSConfig(self["os_type"].value == OS_MAC)
def createFields(self):
yield Bytes(self, "endian", 2, "Endian (0xFF 0xFE for Intel)")
yield UInt16(self, "format", "Format (0)")
yield UInt8(self, "os_version")
yield UInt8(self, "os_revision")
yield Enum(UInt16(self, "os_type"), OS_NAME)
yield GUID(self, "format_id")
yield UInt32(self, "section_count")
if MAX_SECTION_COUNT < self["section_count"].value:
raise ParserError("OLE2: Too much sections (%s)" % self["section_count"].value)
section_indexes = []
for index in xrange(self["section_count"].value):
section_index = SummaryIndex(self, "section_index[]")
yield section_index
section_indexes.append(section_index)
for section_index in section_indexes:
self.seekByte(section_index["offset"].value)
yield SummarySection(self, "section[]")
size = (self.size - self.current_size) // 8
if 0 < size:
yield NullBytes(self, "end_padding", size)
class SummaryParser(BaseSummary, HachoirParser, RootSeekableFieldSet):
PARSER_TAGS = {
"description": "Microsoft Office summary",
}
def __init__(self, stream, **kw):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **kw)
BaseSummary.__init__(self)
def validate(self):
return True
class SummaryFieldSet(BaseSummary, FieldSet):
def __init__(self, parent, name, description=None, size=None):
FieldSet.__init__(self, parent, name, description=description, size=size)
BaseSummary.__init__(self)
class CompObj(FieldSet):
OS_VERSION = {
0x0a03: "Windows 3.1",
}
def createFields(self):
# Header
yield UInt16(self, "version", "Version (=1)")
yield textHandler(UInt16(self, "endian", "Endian (0xFF 0xFE for Intel)"), hexadecimal)
yield UInt8(self, "os_version")
yield UInt8(self, "os_revision")
yield Enum(UInt16(self, "os_type"), OS_NAME)
yield Int32(self, "unused", "(=-1)")
yield GUID(self, "clsid")
# User type
yield PascalString32(self, "user_type", strip="\0")
# Clipboard format
if self["os_type"].value == OS_MAC:
yield Int32(self, "unused[]", "(=-2)")
yield String(self, "clipboard_format", 4)
else:
yield PascalString32(self, "clipboard_format", strip="\0")
if self.current_size == self.size:
return
#-- OLE 2.01 ---
# Program ID
yield PascalString32(self, "prog_id", strip="\0")
if self["os_type"].value != OS_MAC:
# Magic number
yield textHandler(UInt32(self, "magic", "Magic number (0x71B239F4)"), hexadecimal)
# Unicode version
yield PascalStringWin32(self, "user_type_unicode", strip="\0")
yield PascalStringWin32(self, "clipboard_format_unicode", strip="\0")
yield PascalStringWin32(self, "prog_id_unicode", strip="\0")
size = (self.size - self.current_size) // 8
if size:
yield NullBytes(self, "end_padding", size)
| 12,537 | Python | .py | 337 | 28.281899 | 94 | 0.574178 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,528 | torrent.py | midgetspy_Sick-Beard/lib/hachoir_parser/misc/torrent.py | """
.torrent metainfo file parser
http://wiki.theory.org/BitTorrentSpecification#Metainfo_File_Structure
Status: To statufy
Author: Christophe Gisquet <[email protected]>
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
String, RawBytes)
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.tools import makePrintable, timestampUNIX, humanFilesize
# Maximum number of bytes for string length
MAX_STRING_LENGTH = 6 # length in 0..999999
# Maximum number of bytes for integer value
MAX_INTEGER_SIZE = 21 # 21 decimal digits (or "-" sign and 20 digits)
class Integer(FieldSet):
# i<integer encoded in base ten ASCII>e
def createFields(self):
yield String(self, "start", 1, "Integer start delimiter (i)", charset="ASCII")
# Find integer end
addr = self.absolute_address+self.current_size
len = self.stream.searchBytesLength('e', False, addr, addr+(MAX_INTEGER_SIZE+1)*8)
if len is None:
raise ParserError("Torrent: Unable to find integer end delimiter (e)!")
if not len:
raise ParserError("Torrent: error, empty integer!")
yield String(self, "value", len, "Integer value", charset="ASCII")
yield String(self, "end", 1, "Integer end delimiter")
def createValue(self):
"""Read integer value (may raise ValueError)"""
return int(self["value"].value)
class TorrentString(FieldSet):
# <string length encoded in base ten ASCII>:<string data>
def createFields(self):
addr = self.absolute_address
len = self.stream.searchBytesLength(':', False, addr, addr+(MAX_STRING_LENGTH+1)*8)
if len is None:
raise ParserError("Torrent: unable to find string separator (':')")
if not len:
raise ParserError("Torrent: error: no string length!")
val = String(self, "length", len, "String length")
yield val
try:
len = int(val.value)
except ValueError:
len = -1
if len < 0:
raise ParserError("Invalid string length (%s)" % makePrintable(val.value, "ASCII", to_unicode=True))
yield String(self, "separator", 1, "String length/value separator")
if not len:
self.info("Empty string: len=%i" % len)
return
if len<512:
yield String(self, "value", len, "String value", charset="ISO-8859-1")
else:
# Probably raw data
yield RawBytes(self, "value", len, "Raw data")
def createValue(self):
if "value" in self:
field = self["value"]
if field.__class__ != RawBytes:
return field.value
else:
return None
else:
return None
class Dictionary(FieldSet):
# d<bencoded string><bencoded element>e
def createFields(self):
yield String(self, "start", 1, "Dictionary start delimiter (d)", charset="ASCII")
while self.stream.readBytes(self.absolute_address+self.current_size, 1) != "e":
yield DictionaryItem(self, "item[]")
yield String(self, "end", 1, "Dictionary end delimiter")
class List(FieldSet):
# l<bencoded values>e
def createFields(self):
yield String(self, "start", 1, "List start delimiter")
while self.stream.readBytes(self.absolute_address+self.current_size, 1) != "e":
yield Entry(self, "item[]")
yield String(self, "end", 1, "List end delimiter")
class DictionaryItem(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
# TODO: Remove this because it's not lazy?
key = self["key"]
if not key.hasValue():
return
key = key.value
self._name = str(key).replace(" ", "_")
def createDisplay(self):
if not self["value"].hasValue():
return None
if self._name in ("length", "piece_length"):
return humanFilesize(self.value)
return FieldSet.createDisplay(self)
def createValue(self):
if not self["value"].hasValue():
return None
if self._name == "creation_date":
return self.createTimestampValue()
else:
return self["value"].value
def createFields(self):
yield Entry(self, "key")
yield Entry(self, "value")
def createTimestampValue(self):
return timestampUNIX(self["value"].value)
# Map first chunk byte => type
TAGS = {'d': Dictionary, 'i': Integer, 'l': List}
for index in xrange(1, 9+1):
TAGS[str(index)] = TorrentString
# Create an entry
def Entry(parent, name):
addr = parent.absolute_address + parent.current_size
tag = parent.stream.readBytes(addr, 1)
if tag not in TAGS:
raise ParserError("Torrent: Entry of type %r not handled" % type)
cls = TAGS[tag]
return cls(parent, name)
class TorrentFile(Parser):
endian = LITTLE_ENDIAN
MAGIC = "d8:announce"
PARSER_TAGS = {
"id": "torrent",
"category": "misc",
"file_ext": ("torrent",),
"min_size": 50*8,
"mime": (u"application/x-bittorrent",),
"magic": ((MAGIC, 0),),
"description": "Torrent metainfo file"
}
def validate(self):
if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC:
return "Invalid magic"
return True
def createFields(self):
yield Dictionary(self, "root", size=self.size)
| 5,530 | Python | .py | 137 | 32.605839 | 112 | 0.627166 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,529 | pifv.py | midgetspy_Sick-Beard/lib/hachoir_parser/misc/pifv.py | """
EFI Platform Initialization Firmware Volume parser.
Author: Alexandre Boeglin
Creation date: 08 jul 2007
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet,
UInt8, UInt16, UInt24, UInt32, UInt64, Enum,
CString, String, PaddingBytes, RawBytes, NullBytes)
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.tools import paddingSize, humanFilesize
from lib.hachoir_parser.common.win32 import GUID
EFI_SECTION_COMPRESSION = 0x1
EFI_SECTION_GUID_DEFINED = 0x2
EFI_SECTION_PE32 = 0x10
EFI_SECTION_PIC = 0x11
EFI_SECTION_TE = 0x12
EFI_SECTION_DXE_DEPEX = 0x13
EFI_SECTION_VERSION = 0x14
EFI_SECTION_USER_INTERFACE = 0x15
EFI_SECTION_COMPATIBILITY16 = 0x16
EFI_SECTION_FIRMWARE_VOLUME_IMAGE = 0x17
EFI_SECTION_FREEFORM_SUBTYPE_GUID = 0x18
EFI_SECTION_RAW = 0x19
EFI_SECTION_PEI_DEPEX = 0x1b
EFI_SECTION_TYPE = {
EFI_SECTION_COMPRESSION: "Encapsulation section where other sections" \
+ " are compressed",
EFI_SECTION_GUID_DEFINED: "Encapsulation section where other sections" \
+ " have format defined by a GUID",
EFI_SECTION_PE32: "PE32+ Executable image",
EFI_SECTION_PIC: "Position-Independent Code",
EFI_SECTION_TE: "Terse Executable image",
EFI_SECTION_DXE_DEPEX: "DXE Dependency Expression",
EFI_SECTION_VERSION: "Version, Text and Numeric",
EFI_SECTION_USER_INTERFACE: "User-Friendly name of the driver",
EFI_SECTION_COMPATIBILITY16: "DOS-style 16-bit EXE",
EFI_SECTION_FIRMWARE_VOLUME_IMAGE: "PI Firmware Volume image",
EFI_SECTION_FREEFORM_SUBTYPE_GUID: "Raw data with GUID in header to" \
+ " define format",
EFI_SECTION_RAW: "Raw data",
EFI_SECTION_PEI_DEPEX: "PEI Dependency Expression",
}
EFI_FV_FILETYPE_RAW = 0x1
EFI_FV_FILETYPE_FREEFORM = 0x2
EFI_FV_FILETYPE_SECURITY_CORE = 0x3
EFI_FV_FILETYPE_PEI_CORE = 0x4
EFI_FV_FILETYPE_DXE_CORE = 0x5
EFI_FV_FILETYPE_PEIM = 0x6
EFI_FV_FILETYPE_DRIVER = 0x7
EFI_FV_FILETYPE_COMBINED_PEIM_DRIVER = 0x8
EFI_FV_FILETYPE_APPLICATION = 0x9
EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE = 0xb
EFI_FV_FILETYPE_FFS_PAD = 0xf0
EFI_FV_FILETYPE = {
EFI_FV_FILETYPE_RAW: "Binary data",
EFI_FV_FILETYPE_FREEFORM: "Sectioned data",
EFI_FV_FILETYPE_SECURITY_CORE: "Platform core code used during the SEC" \
+ " phase",
EFI_FV_FILETYPE_PEI_CORE: "PEI Foundation",
EFI_FV_FILETYPE_DXE_CORE: "DXE Foundation",
EFI_FV_FILETYPE_PEIM: "PEI module (PEIM)",
EFI_FV_FILETYPE_DRIVER: "DXE driver",
EFI_FV_FILETYPE_COMBINED_PEIM_DRIVER: "Combined PEIM/DXE driver",
EFI_FV_FILETYPE_APPLICATION: "Application",
EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE: "Firmware volume image",
EFI_FV_FILETYPE_FFS_PAD: "Pad File For FFS",
}
for x in xrange(0xc0, 0xe0):
EFI_FV_FILETYPE[x] = "OEM File"
for x in xrange(0xe0, 0xf0):
EFI_FV_FILETYPE[x] = "Debug/Test File"
for x in xrange(0xf1, 0x100):
EFI_FV_FILETYPE[x] = "Firmware File System Specific File"
class BlockMap(FieldSet):
static_size = 8*8
def createFields(self):
yield UInt32(self, "num_blocks")
yield UInt32(self, "len")
def createDescription(self):
return "%d blocks of %s" % (
self["num_blocks"].value, humanFilesize(self["len"].value))
class FileSection(FieldSet):
COMPRESSION_TYPE = {
0: 'Not Compressed',
1: 'Standard Compression',
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = self["size"].value * 8
section_type = self["type"].value
if section_type in (EFI_SECTION_DXE_DEPEX, EFI_SECTION_PEI_DEPEX):
# These sections can sometimes be longer than what their size
# claims! It's so nice to have so detailled specs and not follow
# them ...
if self.stream.readBytes(self.absolute_address +
self._size, 1) == '\0':
self._size = self._size + 16
def createFields(self):
# Header
yield UInt24(self, "size")
yield Enum(UInt8(self, "type"), EFI_SECTION_TYPE)
section_type = self["type"].value
if section_type == EFI_SECTION_COMPRESSION:
yield UInt32(self, "uncomp_len")
yield Enum(UInt8(self, "comp_type"), self.COMPRESSION_TYPE)
elif section_type == EFI_SECTION_FREEFORM_SUBTYPE_GUID:
yield GUID(self, "sub_type_guid")
elif section_type == EFI_SECTION_GUID_DEFINED:
yield GUID(self, "section_definition_guid")
yield UInt16(self, "data_offset")
yield UInt16(self, "attributes")
elif section_type == EFI_SECTION_USER_INTERFACE:
yield CString(self, "file_name", charset="UTF-16-LE")
elif section_type == EFI_SECTION_VERSION:
yield UInt16(self, "build_number")
yield CString(self, "version", charset="UTF-16-LE")
# Content
content_size = (self.size - self.current_size) // 8
if content_size == 0:
return
if section_type == EFI_SECTION_COMPRESSION:
compression_type = self["comp_type"].value
if compression_type == 1:
while not self.eof:
yield RawBytes(self, "compressed_content", content_size)
else:
while not self.eof:
yield FileSection(self, "section[]")
elif section_type == EFI_SECTION_FIRMWARE_VOLUME_IMAGE:
yield FirmwareVolume(self, "firmware_volume")
else:
yield RawBytes(self, "content", content_size,
EFI_SECTION_TYPE.get(self["type"].value,
"Unknown Section Type"))
def createDescription(self):
return EFI_SECTION_TYPE.get(self["type"].value,
"Unknown Section Type")
class File(FieldSet):
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = self["size"].value * 8
def createFields(self):
# Header
yield GUID(self, "name")
yield UInt16(self, "integrity_check")
yield Enum(UInt8(self, "type"), EFI_FV_FILETYPE)
yield UInt8(self, "attributes")
yield UInt24(self, "size")
yield UInt8(self, "state")
# Content
while not self.eof:
yield FileSection(self, "section[]")
def createDescription(self):
return "%s: %s containing %d section(s)" % (
self["name"].value,
self["type"].display,
len(self.array("section")))
class FirmwareVolume(FieldSet):
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
if not self._size:
self._size = self["volume_len"].value * 8
def createFields(self):
# Header
yield NullBytes(self, "zero_vector", 16)
yield GUID(self, "fs_guid")
yield UInt64(self, "volume_len")
yield String(self, "signature", 4)
yield UInt32(self, "attributes")
yield UInt16(self, "header_len")
yield UInt16(self, "checksum")
yield UInt16(self, "ext_header_offset")
yield UInt8(self, "reserved")
yield UInt8(self, "revision")
while True:
bm = BlockMap(self, "block_map[]")
yield bm
if bm['num_blocks'].value == 0 and bm['len'].value == 0:
break
# TODO must handle extended header
# Content
while not self.eof:
padding = paddingSize(self.current_size // 8, 8)
if padding:
yield PaddingBytes(self, "padding[]", padding)
yield File(self, "file[]")
def createDescription(self):
return "Firmware Volume containing %d file(s)" % len(self.array("file"))
class PIFVFile(Parser):
endian = LITTLE_ENDIAN
MAGIC = '_FVH'
PARSER_TAGS = {
"id": "pifv",
"category": "program",
"file_ext": ("bin", ""),
"min_size": 64*8, # smallest possible header
"magic_regex": (("\0{16}.{24}%s" % MAGIC, 0), ),
"description": "EFI Platform Initialization Firmware Volume",
}
def validate(self):
if self.stream.readBytes(40*8, 4) != self.MAGIC:
return "Invalid magic number"
if self.stream.readBytes(0, 16) != "\0"*16:
return "Invalid zero vector"
return True
def createFields(self):
while not self.eof:
yield FirmwareVolume(self, "firmware_volume[]")
| 8,492 | Python | .py | 209 | 33.086124 | 80 | 0.633741 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,530 | msoffice.py | midgetspy_Sick-Beard/lib/hachoir_parser/misc/msoffice.py | """
Parsers for the different streams and fragments found in an OLE2 file.
Documents:
- goffice source code
Author: Robert Xiao, Victor Stinner
Creation: 2006-04-23
"""
from lib.hachoir_parser import HachoirParser
from lib.hachoir_core.field import FieldSet, RootSeekableFieldSet, RawBytes
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.stream import StringInputStream
from lib.hachoir_parser.misc.msoffice_summary import SummaryFieldSet, CompObj
from lib.hachoir_parser.misc.word_doc import WordDocumentFieldSet
PROPERTY_NAME = {
u"\5DocumentSummaryInformation": "doc_summary",
u"\5SummaryInformation": "summary",
u"WordDocument": "word_doc",
}
class OfficeRootEntry(HachoirParser, RootSeekableFieldSet):
PARSER_TAGS = {
"description": "Microsoft Office document subfragments",
}
endian = LITTLE_ENDIAN
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **args)
def validate(self):
return True
def createFields(self):
for index, property in enumerate(self.ole2.properties):
if index == 0:
continue
try:
name = PROPERTY_NAME[property["name"].value]
except LookupError:
name = property.name+"content"
for field in self.parseProperty(index, property, name):
yield field
def parseProperty(self, property_index, property, name_prefix):
ole2 = self.ole2
if not property["size"].value:
return
if property["size"].value >= ole2["header/threshold"].value:
return
name = "%s[]" % name_prefix
first = None
previous = None
size = 0
start = property["start"].value
chain = ole2.getChain(start, True)
blocksize = ole2.ss_size
desc_format = "Small blocks %s..%s (%s)"
while True:
try:
block = chain.next()
contiguous = False
if not first:
first = block
contiguous = True
if previous and block == (previous+1):
contiguous = True
if contiguous:
previous = block
size += blocksize
continue
except StopIteration:
block = None
self.seekSBlock(first)
desc = desc_format % (first, previous, previous-first+1)
size = min(size, property["size"].value*8)
if name_prefix in ("summary", "doc_summary"):
yield SummaryFieldSet(self, name, desc, size=size)
elif name_prefix == "word_doc":
yield WordDocumentFieldSet(self, name, desc, size=size)
elif property_index == 1:
yield CompObj(self, "comp_obj", desc, size=size)
else:
yield RawBytes(self, name, size//8, desc)
if block is None:
break
first = block
previous = block
size = ole2.sector_size
def seekSBlock(self, block):
self.seekBit(block * self.ole2.ss_size)
class FragmentGroup:
def __init__(self, parser):
self.items = []
self.parser = parser
def add(self, item):
self.items.append(item)
def createInputStream(self):
# FIXME: Use lazy stream creation
data = []
for item in self.items:
data.append( item["rawdata"].value )
data = "".join(data)
# FIXME: Use smarter code to send arguments
args = {"ole2": self.items[0].root}
tags = {"class": self.parser, "args": args}
tags = tags.iteritems()
return StringInputStream(data, "<fragment group>", tags=tags)
class CustomFragment(FieldSet):
def __init__(self, parent, name, size, parser, description=None, group=None):
FieldSet.__init__(self, parent, name, description, size=size)
if not group:
group = FragmentGroup(parser)
self.group = group
self.group.add(self)
def createFields(self):
yield RawBytes(self, "rawdata", self.size//8)
def _createInputStream(self, **args):
return self.group.createInputStream()
| 4,405 | Python | .py | 113 | 29.247788 | 93 | 0.599204 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,531 | bplist.py | midgetspy_Sick-Beard/lib/hachoir_parser/misc/bplist.py | """
Apple/NeXT Binary Property List (BPLIST) parser.
Also includes a .createXML() function which produces an XML representation of the object.
Note that it will discard unknown objects, nulls and fill values, but should work for most files.
Documents:
- CFBinaryPList.c
http://src.gnu-darwin.org/DarwinSourceArchive/expanded/CF/CF-299/Parsing.subproj/CFBinaryPList.c
- ForFoundationOnly.h (for structure formats)
http://src.gnu-darwin.org/DarwinSourceArchive/expanded/CF/CF-299/Base.subproj/ForFoundationOnly.h
- XML <-> BPList converter
http://scw.us/iPhone/plutil/plutil.pl
Author: Robert Xiao
Created: 2008-09-21
"""
from lib.hachoir_parser import HachoirParser
from lib.hachoir_core.field import (RootSeekableFieldSet, FieldSet, Enum,
Bits, GenericInteger, Float32, Float64, UInt8, UInt64, Bytes, NullBytes, RawBytes, String)
from lib.hachoir_core.endian import BIG_ENDIAN
from lib.hachoir_core.text_handler import displayHandler
from lib.hachoir_core.tools import humanDatetime
from datetime import datetime, timedelta
class BPListTrailer(FieldSet):
def createFields(self):
yield NullBytes(self, "unused", 6)
yield UInt8(self, "offsetIntSize", "Size (in bytes) of offsets in the offset table")
yield UInt8(self, "objectRefSize", "Size (in bytes) of object numbers in object references")
yield UInt64(self, "numObjects", "Number of objects in this file")
yield UInt64(self, "topObject", "Top-level object reference")
yield UInt64(self, "offsetTableOffset", "File offset to the offset table")
def createDescription(self):
return "Binary PList trailer"
class BPListOffsetTable(FieldSet):
def createFields(self):
size = self["../trailer/offsetIntSize"].value*8
for i in range(self["../trailer/numObjects"].value):
yield Bits(self, "offset[]", size)
class BPListSize(FieldSet):
def createFields(self):
yield Bits(self, "size", 4)
if self['size'].value == 0xF:
yield BPListObject(self, "fullsize")
def createValue(self):
if 'fullsize' in self:
return self['fullsize'].value
else:
return self['size'].value
class BPListObjectRef(GenericInteger):
def __init__(self, parent, name, description=None):
size = parent['/trailer/objectRefSize'].value*8
GenericInteger.__init__(self, parent, name, False, size, description)
def getRef(self):
return self.parent['/object[' + str(self.value) + ']']
def createDisplay(self):
return self.getRef().display
def createXML(self, prefix=''):
return self.getRef().createXML(prefix)
class BPListArray(FieldSet):
def __init__(self, parent, name, size, description=None):
FieldSet.__init__(self, parent, name, description=description)
self.numels = size
def createFields(self):
for i in range(self.numels):
yield BPListObjectRef(self, "ref[]")
def createValue(self):
return self.array('ref')
def createDisplay(self):
return '[' + ', '.join([x.display for x in self.value]) + ']'
def createXML(self,prefix=''):
return prefix + '<array>\n' + ''.join([x.createXML(prefix + '\t' ) + '\n' for x in self.value]) + prefix + '</array>'
class BPListDict(FieldSet):
def __init__(self, parent, name, size, description=None):
FieldSet.__init__(self, parent, name, description=description)
self.numels = size
def createFields(self):
for i in range(self.numels):
yield BPListObjectRef(self, "keyref[]")
for i in range(self.numels):
yield BPListObjectRef(self, "valref[]")
def createValue(self):
return zip(self.array('keyref'),self.array('valref'))
def createDisplay(self):
return '{' + ', '.join(['%s: %s'%(k.display,v.display) for k,v in self.value]) + '}'
def createXML(self, prefix=''):
return prefix + '<dict>\n' + ''.join(['%s\t<key>%s</key>\n%s\n'%(prefix,k.getRef().value.encode('utf-8'),v.createXML(prefix + '\t')) for k,v in self.value]) + prefix + '</dict>'
class BPListObject(FieldSet):
def createFields(self):
yield Enum(Bits(self, "marker_type", 4),
{0: "Simple",
1: "Int",
2: "Real",
3: "Date",
4: "Data",
5: "ASCII String",
6: "UTF-16-BE String",
8: "UID",
10: "Array",
13: "Dict",})
markertype = self['marker_type'].value
if markertype == 0:
# Simple (Null)
yield Enum(Bits(self, "value", 4),
{0: "Null",
8: "False",
9: "True",
15: "Fill Byte",})
if self['value'].display == "False":
self.xml=lambda prefix:prefix + "<false/>"
elif self['value'].display == "True":
self.xml=lambda prefix:prefix + "<true/>"
else:
self.xml=lambda prefix:prefix + ""
elif markertype == 1:
# Int
yield Bits(self, "size", 4, "log2 of number of bytes")
size=self['size'].value
# 8-bit (size=0), 16-bit (size=1) and 32-bit (size=2) numbers are unsigned
# 64-bit (size=3) numbers are signed
yield GenericInteger(self, "value", (size>=3), (2**size)*8)
self.xml=lambda prefix:prefix + "<integer>%s</integer>"%self['value'].value
elif markertype == 2:
# Real
yield Bits(self, "size", 4, "log2 of number of bytes")
if self['size'].value == 2: # 2**2 = 4 byte float
yield Float32(self, "value")
elif self['size'].value == 3: # 2**3 = 8 byte float
yield Float64(self, "value")
else:
# FIXME: What is the format of the real?
yield Bits(self, "value", (2**self['size'].value)*8)
self.xml=lambda prefix:prefix + "<real>%s</real>"%self['value'].value
elif markertype == 3:
# Date
yield Bits(self, "extra", 4, "Extra value, should be 3")
cvt_time=lambda v:datetime(2001,1,1) + timedelta(seconds=v)
yield displayHandler(Float64(self, "value"),lambda x:humanDatetime(cvt_time(x)))
self.xml=lambda prefix:prefix + "<date>%s</date>"%(cvt_time(self['value'].value).isoformat())
elif markertype == 4:
# Data
yield BPListSize(self, "size")
if self['size'].value:
yield Bytes(self, "value", self['size'].value)
self.xml=lambda prefix:prefix + "<data>\n%s\n%s</data>"%(self['value'].value.encode('base64').strip(),prefix)
else:
self.xml=lambda prefix:prefix + '<data></data>'
elif markertype == 5:
# ASCII String
yield BPListSize(self, "size")
if self['size'].value:
yield String(self, "value", self['size'].value, charset="ASCII")
self.xml=lambda prefix:prefix + "<string>%s</string>"%(self['value'].value.encode('iso-8859-1'))
else:
self.xml=lambda prefix:prefix + '<string></string>'
elif markertype == 6:
# UTF-16-BE String
yield BPListSize(self, "size")
if self['size'].value:
yield String(self, "value", self['size'].value*2, charset="UTF-16-BE")
self.xml=lambda prefix:prefix + "<string>%s</string>"%(self['value'].value.encode('utf-8'))
else:
self.xml=lambda prefix:prefix + '<string></string>'
elif markertype == 8:
# UID
yield Bits(self, "size", 4, "Number of bytes minus 1")
yield GenericInteger(self, "value", False, (self['size'].value + 1)*8)
self.xml=lambda prefix:prefix + "" # no equivalent?
elif markertype == 10:
# Array
yield BPListSize(self, "size")
size = self['size'].value
if size:
yield BPListArray(self, "value", size)
self.xml=lambda prefix:self['value'].createXML(prefix)
elif markertype == 13:
# Dict
yield BPListSize(self, "size")
yield BPListDict(self, "value", self['size'].value)
self.xml=lambda prefix:self['value'].createXML(prefix)
else:
yield Bits(self, "value", 4)
self.xml=lambda prefix:''
def createValue(self):
if 'value' in self:
return self['value'].value
elif self['marker_type'].value in [4,5,6]:
return u''
else:
return None
def createDisplay(self):
if 'value' in self:
return unicode(self['value'].display)
elif self['marker_type'].value in [4,5,6]:
return u''
else:
return None
def createXML(self, prefix=''):
if 'value' in self:
try:
return self.xml(prefix)
except AttributeError:
return ''
return ''
def getFieldType(self):
return '%s<%s>'%(FieldSet.getFieldType(self), self['marker_type'].display)
class BPList(HachoirParser, RootSeekableFieldSet):
endian = BIG_ENDIAN
MAGIC = "bplist00"
PARSER_TAGS = {
"id": "bplist",
"category": "misc",
"file_ext": ("plist",),
"magic": ((MAGIC, 0),),
"min_size": 8 + 32, # bplist00 + 32-byte trailer
"description": "Apple/NeXT Binary Property List",
}
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **args)
def validate(self):
if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC:
return "Invalid magic"
return True
def createFields(self):
yield Bytes(self, "magic", 8, "File magic (bplist00)")
if self.size:
self.seekByte(self.size//8-32, True)
else:
# FIXME: UNTESTED
while True:
try:
self.seekByte(1024)
except:
break
self.seekByte(self.size//8-32)
yield BPListTrailer(self, "trailer")
self.seekByte(self['trailer/offsetTableOffset'].value)
yield BPListOffsetTable(self, "offset_table")
for i in self.array("offset_table/offset"):
if self.current_size > i.value*8:
self.seekByte(i.value)
elif self.current_size < i.value*8:
# try to detect files with gaps or unparsed content
yield RawBytes(self, "padding[]", i.value-self.current_size//8)
yield BPListObject(self, "object[]")
def createXML(self, prefix=''):
return '''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
''' + self['/object[' + str(self['/trailer/topObject'].value) + ']'].createXML(prefix) + '''
</plist>'''
| 11,349 | Python | .py | 249 | 35.11245 | 185 | 0.577824 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,532 | common.py | midgetspy_Sick-Beard/lib/hachoir_parser/misc/common.py | from lib.hachoir_core.field import StaticFieldSet, Float32
class Vertex(StaticFieldSet):
format = ((Float32, "x"), (Float32, "y"), (Float32, "z"))
def createValue(self):
return (self["x"].value, self["y"].value, self["z"].value)
class MapUV(StaticFieldSet):
format = ((Float32, "u"), (Float32, "v"))
def createValue(self):
return (self["u"].value, self["v"].value)
| 401 | Python | .py | 9 | 39.555556 | 66 | 0.641753 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,533 | file_3ds.py | midgetspy_Sick-Beard/lib/hachoir_parser/misc/file_3ds.py | """
3D Studio Max file (.3ds) parser.
Author: Victor Stinner
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (StaticFieldSet, FieldSet,
UInt16, UInt32, RawBytes, Enum, CString)
from lib.hachoir_parser.image.common import RGB
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.text_handler import textHandler, hexadecimal
from lib.hachoir_parser.misc.common import Vertex, MapUV
def readObject(parent):
yield CString(parent, "name", "Object name")
size = parent["size"].value * 8
while parent.current_size < size:
yield Chunk(parent, "chunk[]")
def readTextureFilename(parent):
yield CString(parent, "filename", "Texture filename")
def readVersion(parent):
yield UInt32(parent, "version", "3DS file format version")
def readMaterialName(parent):
yield CString(parent, "name", "Material name")
class Polygon(StaticFieldSet):
format = (
(UInt16, "a", "Vertex A"),
(UInt16, "b", "Vertex B"),
(UInt16, "c", "Vertex C"),
(UInt16, "flags", "Flags"))
def readMapList(parent):
yield UInt16(parent, "count", "Map count")
for index in xrange(parent["count"].value):
yield MapUV(parent, "map_uv[]", "Mapping UV")
def readColor(parent):
yield RGB(parent, "color")
def readVertexList(parent):
yield UInt16(parent, "count", "Vertex count")
for index in range(0, parent["count"].value):
yield Vertex(parent, "vertex[]", "Vertex")
def readPolygonList(parent):
count = UInt16(parent, "count", "Vertex count")
yield count
for i in range(0, count.value):
yield Polygon(parent, "polygon[]")
size = parent["size"].value * 8
while parent.current_size < size:
yield Chunk(parent, "chunk[]")
class Chunk(FieldSet):
# List of chunk type name
type_name = {
0x0011: "Color",
0x4D4D: "Main chunk",
0x0002: "File version",
0x3D3D: "Materials and objects",
0x4000: "Object",
0x4100: "Mesh (triangular)",
0x4110: "Vertices list",
0x4120: "Polygon (faces) list",
0x4140: "Map UV list",
0x4130: "Object material",
0xAFFF: "New material",
0xA000: "Material name",
0xA010: "Material ambient",
0xA020: "Material diffuse",
0xA030: "Texture specular",
0xA200: "Texture",
0xA300: "Texture filename",
# Key frames
0xB000: "Keyframes",
0xB002: "Object node tag",
0xB006: "Light target node tag",
0xB007: "Spot light node tag",
0xB00A: "Keyframes header",
0xB009: "Keyframe current time",
0xB030: "Node identifier",
0xB010: "Node header",
0x7001: "Viewport layout"
}
chunk_id_by_type = {
0x4d4d: "main",
0x0002: "version",
0x3d3d: "obj_mat",
0xb000: "keyframes",
0xafff: "material[]",
0x4000: "object[]",
0x4110: "vertices_list",
0x4120: "polygon_list",
0x4140: "mapuv_list",
0x4100: "mesh"
}
# List of chunks which contains other chunks
sub_chunks = \
(0x4D4D, 0x4100, 0x3D3D, 0xAFFF, 0xA200,
0xB002, 0xB006, 0xB007,
0xA010, 0xA030, 0xA020, 0xB000)
# List of chunk type handlers
handlers = {
0xA000: readMaterialName,
0x4000: readObject,
0xA300: readTextureFilename,
0x0011: readColor,
0x0002: readVersion,
0x4110: readVertexList,
0x4120: readPolygonList,
0x4140: readMapList
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
# Set description
self._description = "Chunk: %s" % self["type"].display
# Set name based on type field
type = self["type"].value
if type in Chunk.chunk_id_by_type:
self._name = Chunk.chunk_id_by_type[type]
else:
self._name = "chunk_%04x" % type
# Guess chunk size
self._size = self["size"].value * 8
def createFields(self):
yield Enum(textHandler(UInt16(self, "type", "Chunk type"), hexadecimal), Chunk.type_name)
yield UInt32(self, "size", "Chunk size (in bytes)")
content_size = self["size"].value - 6
if content_size == 0:
return
type = self["type"].value
if type in Chunk.sub_chunks:
while self.current_size < self.size:
yield Chunk(self, "chunk[]")
else:
if type in Chunk.handlers:
fields = Chunk.handlers[type] (self)
for field in fields:
yield field
else:
yield RawBytes(self, "data", content_size)
class File3ds(Parser):
endian = LITTLE_ENDIAN
PARSER_TAGS = {
"id": "3ds",
"category": "misc",
"file_ext": ("3ds",),
"mime": (u"image/x-3ds",),
"min_size": 16*8,
"description": "3D Studio Max model"
}
def validate(self):
if self.stream.readBytes(0, 2) != "MM":
return "Wrong signature"
if self["main/version/version"].value not in (2, 3):
return "Unknown format version"
return True
def createFields(self):
while not self.eof:
yield Chunk(self, "chunk[]")
| 5,329 | Python | .py | 153 | 27.150327 | 97 | 0.601514 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,534 | python.py | midgetspy_Sick-Beard/lib/hachoir_parser/program/python.py | """
Python compiled source code parser.
Informations:
- Python 2.4.2 source code:
files Python/marshal.c and Python/import.c
Author: Victor Stinner
Creation: 25 march 2005
"""
DISASSEMBLE = False
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, UInt8,
UInt16, Int32, UInt32, Int64, ParserError, Float64, Enum,
Character, Bytes, RawBytes, PascalString8, TimestampUnix32)
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.bits import long2raw
from lib.hachoir_core.text_handler import textHandler, hexadecimal
from lib.hachoir_core.i18n import ngettext
if DISASSEMBLE:
from dis import dis
def disassembleBytecode(field):
bytecode = field.value
dis(bytecode)
# --- String and string reference ---
def parseString(parent):
yield UInt32(parent, "length", "Length")
length = parent["length"].value
if parent.name == "lnotab":
bytecode_offset=0
line_number=parent['../firstlineno'].value
for i in range(0,length,2):
bc_off_delta=UInt8(parent, 'bytecode_offset_delta[]')
yield bc_off_delta
bytecode_offset+=bc_off_delta.value
bc_off_delta._description='Bytecode Offset %i'%bytecode_offset
line_number_delta=UInt8(parent, 'line_number_delta[]')
yield line_number_delta
line_number+=line_number_delta.value
line_number_delta._description='Line Number %i'%line_number
elif 0 < length:
yield RawBytes(parent, "text", length, "Content")
if DISASSEMBLE and parent.name == "compiled_code":
disassembleBytecode(parent["text"])
def parseStringRef(parent):
yield textHandler(UInt32(parent, "ref"), hexadecimal)
def createStringRefDesc(parent):
return "String ref: %s" % parent["ref"].display
# --- Integers ---
def parseInt32(parent):
yield Int32(parent, "value")
def parseInt64(parent):
yield Int64(parent, "value")
def parseLong(parent):
yield Int32(parent, "digit_count")
for index in xrange( abs(parent["digit_count"].value) ):
yield UInt16(parent, "digit[]")
# --- Float and complex ---
def parseFloat(parent):
yield PascalString8(parent, "value")
def parseBinaryFloat(parent):
yield Float64(parent, "value")
def parseComplex(parent):
yield PascalString8(parent, "real")
yield PascalString8(parent, "complex")
def parseBinaryComplex(parent):
yield Float64(parent, "real")
yield Float64(parent, "complex")
# --- Tuple and list ---
def parseTuple(parent):
yield Int32(parent, "count", "Item count")
count = parent["count"].value
if count < 0:
raise ParserError("Invalid tuple/list count")
for index in xrange(count):
yield Object(parent, "item[]")
def createTupleDesc(parent):
count = parent["count"].value
items = ngettext("%s item", "%s items", count) % count
return "%s: %s" % (parent.code_info[2], items)
# --- Dict ---
def parseDict(parent):
"""
Format is: (key1, value1, key2, value2, ..., keyn, valuen, NULL)
where each keyi and valuei is an object.
"""
parent.count = 0
while True:
key = Object(parent, "key[]")
yield key
if key["bytecode"].value == "0":
break
yield Object(parent, "value[]")
parent.count += 1
def createDictDesc(parent):
return "Dict: %s" % (ngettext("%s key", "%s keys", parent.count) % parent.count)
# --- Code ---
def parseCode(parent):
if 0x3000000 <= parent.root.getVersion():
yield UInt32(parent, "arg_count", "Argument count")
yield UInt32(parent, "kwonlyargcount", "Keyword only argument count")
yield UInt32(parent, "nb_locals", "Number of local variables")
yield UInt32(parent, "stack_size", "Stack size")
yield UInt32(parent, "flags")
elif 0x2030000 <= parent.root.getVersion():
yield UInt32(parent, "arg_count", "Argument count")
yield UInt32(parent, "nb_locals", "Number of local variables")
yield UInt32(parent, "stack_size", "Stack size")
yield UInt32(parent, "flags")
else:
yield UInt16(parent, "arg_count", "Argument count")
yield UInt16(parent, "nb_locals", "Number of local variables")
yield UInt16(parent, "stack_size", "Stack size")
yield UInt16(parent, "flags")
yield Object(parent, "compiled_code")
yield Object(parent, "consts")
yield Object(parent, "names")
yield Object(parent, "varnames")
if 0x2000000 <= parent.root.getVersion():
yield Object(parent, "freevars")
yield Object(parent, "cellvars")
yield Object(parent, "filename")
yield Object(parent, "name")
if 0x2030000 <= parent.root.getVersion():
yield UInt32(parent, "firstlineno", "First line number")
else:
yield UInt16(parent, "firstlineno", "First line number")
yield Object(parent, "lnotab")
class Object(FieldSet):
bytecode_info = {
# Don't contains any data
'0': ("null", None, "NULL", None),
'N': ("none", None, "None", None),
'F': ("false", None, "False", None),
'T': ("true", None, "True", None),
'S': ("stop_iter", None, "StopIter", None),
'.': ("ellipsis", None, "ELLIPSIS", None),
'?': ("unknown", None, "Unknown", None),
'i': ("int32", parseInt32, "Int32", None),
'I': ("int64", parseInt64, "Int64", None),
'f': ("float", parseFloat, "Float", None),
'g': ("bin_float", parseBinaryFloat, "Binary float", None),
'x': ("complex", parseComplex, "Complex", None),
'y': ("bin_complex", parseBinaryComplex, "Binary complex", None),
'l': ("long", parseLong, "Long", None),
's': ("string", parseString, "String", None),
't': ("interned", parseString, "Interned", None),
'u': ("unicode", parseString, "Unicode", None),
'R': ("string_ref", parseStringRef, "String ref", createStringRefDesc),
'(': ("tuple", parseTuple, "Tuple", createTupleDesc),
'[': ("list", parseTuple, "List", createTupleDesc),
'<': ("set", parseTuple, "Set", createTupleDesc),
'>': ("frozenset", parseTuple, "Frozen set", createTupleDesc),
'{': ("dict", parseDict, "Dict", createDictDesc),
'c': ("code", parseCode, "Code", None),
}
def __init__(self, parent, name, **kw):
FieldSet.__init__(self, parent, name, **kw)
code = self["bytecode"].value
if code not in self.bytecode_info:
raise ParserError('Unknown bytecode: "%s"' % code)
self.code_info = self.bytecode_info[code]
if not name:
self._name = self.code_info[0]
if code == "l":
self.createValue = self.createValueLong
elif code in ("i", "I", "f", "g"):
self.createValue = lambda: self["value"].value
elif code == "T":
self.createValue = lambda: True
elif code == "F":
self.createValue = lambda: False
elif code in ("x", "y"):
self.createValue = self.createValueComplex
elif code in ("s", "t", "u"):
self.createValue = self.createValueString
self.createDisplay = self.createDisplayString
if code == 't':
if not hasattr(self.root,'string_table'):
self.root.string_table=[]
self.root.string_table.append(self)
elif code == 'R':
if hasattr(self.root,'string_table'):
self.createValue = self.createValueStringRef
def createValueString(self):
if "text" in self:
return self["text"].value
else:
return ""
def createDisplayString(self):
if "text" in self:
return self["text"].display
else:
return "(empty)"
def createValueLong(self):
is_negative = self["digit_count"].value < 0
count = abs(self["digit_count"].value)
total = 0
for index in xrange(count-1, -1, -1):
total <<= 15
total += self["digit[%u]" % index].value
if is_negative:
total = -total
return total
def createValueStringRef(self):
return self.root.string_table[self['ref'].value].value
def createDisplayStringRef(self):
return self.root.string_table[self['ref'].value].display
def createValueComplex(self):
return complex(
float(self["real"].value),
float(self["complex"].value))
def createFields(self):
yield Character(self, "bytecode", "Bytecode")
parser = self.code_info[1]
if parser:
for field in parser(self):
yield field
def createDescription(self):
create = self.code_info[3]
if create:
return create(self)
else:
return self.code_info[2]
class PythonCompiledFile(Parser):
PARSER_TAGS = {
"id": "python",
"category": "program",
"file_ext": ("pyc", "pyo"),
"min_size": 9*8,
"description": "Compiled Python script (.pyc/.pyo files)"
}
endian = LITTLE_ENDIAN
# Dictionnary which associate the pyc signature (32-bit integer)
# to a Python version string (eg. "m\xf2\r\n" => "Python 2.4b1").
# This list comes from CPython source code, see "MAGIC"
# and "pyc_magic" in file Python/import.c
MAGIC = {
# Python 1.x
20121: ("1.5", 0x1050000),
# Python 2.x
50823: ("2.0", 0x2000000),
60202: ("2.1", 0x2010000),
60717: ("2.2", 0x2020000),
62011: ("2.3a0", 0x2030000),
62021: ("2.3a0", 0x2030000),
62041: ("2.4a0", 0x2040000),
62051: ("2.4a3", 0x2040000),
62061: ("2.4b1", 0x2040000),
62071: ("2.5a0", 0x2050000),
62081: ("2.5a0 (ast-branch)", 0x2050000),
62091: ("2.5a0 (with)", 0x2050000),
62092: ("2.5a0 (WITH_CLEANUP opcode)", 0x2050000),
62101: ("2.5b3", 0x2050000),
62111: ("2.5b3", 0x2050000),
62121: ("2.5c1", 0x2050000),
62131: ("2.5c2", 0x2050000),
# Python 3.x
3000: ("3.0 (3000)", 0x3000000),
3010: ("3.0 (3010)", 0x3000000),
3020: ("3.0 (3020)", 0x3000000),
3030: ("3.0 (3030)", 0x3000000),
3040: ("3.0 (3040)", 0x3000000),
3050: ("3.0 (3050)", 0x3000000),
3060: ("3.0 (3060)", 0x3000000),
3070: ("3.0 (3070)", 0x3000000),
3080: ("3.0 (3080)", 0x3000000),
3090: ("3.0 (3090)", 0x3000000),
3100: ("3.0 (3100)", 0x3000000),
3102: ("3.0 (3102)", 0x3000000),
3110: ("3.0a4", 0x3000000),
3130: ("3.0a5", 0x3000000),
3131: ("3.0a5 unicode", 0x3000000),
}
# Dictionnary which associate the pyc signature (4-byte long string)
# to a Python version string (eg. "m\xf2\r\n" => "2.4b1")
STR_MAGIC = dict( \
(long2raw(magic | (ord('\r')<<16) | (ord('\n')<<24), LITTLE_ENDIAN), value[0]) \
for magic, value in MAGIC.iteritems())
def validate(self):
signature = self.stream.readBits(0, 16, self.endian)
if signature not in self.MAGIC:
return "Unknown version (%s)" % signature
if self.stream.readBytes(2*8, 2) != "\r\n":
return r"Wrong signature (\r\n)"
if self.stream.readBytes(8*8, 1) != 'c':
return "First object bytecode is not code"
return True
def getVersion(self):
if not hasattr(self, "version"):
signature = self.stream.readBits(0, 16, self.endian)
self.version = self.MAGIC[signature][1]
return self.version
def createFields(self):
yield Enum(Bytes(self, "signature", 4, "Python file signature and version"), self.STR_MAGIC)
yield TimestampUnix32(self, "timestamp", "Timestamp")
yield Object(self, "content")
| 11,979 | Python | .py | 295 | 32.877966 | 100 | 0.599055 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,535 | elf.py | midgetspy_Sick-Beard/lib/hachoir_parser/program/elf.py | """
ELF (Unix/BSD executable file format) parser.
Author: Victor Stinner
Creation date: 08 may 2006
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
UInt8, UInt16, UInt32, Enum,
String, Bytes)
from lib.hachoir_core.text_handler import textHandler, hexadecimal
from lib.hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
class ElfHeader(FieldSet):
static_size = 52*8
LITTLE_ENDIAN_ID = 1
BIG_ENDIAN_ID = 2
MACHINE_NAME = {
1: u"AT&T WE 32100",
2: u"SPARC",
3: u"Intel 80386",
4: u"Motorola 68000",
5: u"Motorola 88000",
7: u"Intel 80860",
8: u"MIPS RS3000"
}
CLASS_NAME = {
1: u"32 bits",
2: u"64 bits"
}
TYPE_NAME = {
0: u"No file type",
1: u"Relocatable file",
2: u"Executable file",
3: u"Shared object file",
4: u"Core file",
0xFF00: u"Processor-specific (0xFF00)",
0xFFFF: u"Processor-specific (0xFFFF)"
}
ENDIAN_NAME = {
LITTLE_ENDIAN_ID: "Little endian",
BIG_ENDIAN_ID: "Big endian",
}
def createFields(self):
yield Bytes(self, "signature", 4, r'ELF signature ("\x7fELF")')
yield Enum(UInt8(self, "class", "Class"), self.CLASS_NAME)
yield Enum(UInt8(self, "endian", "Endian"), self.ENDIAN_NAME)
yield UInt8(self, "file_version", "File version")
yield String(self, "pad", 8, "Pad")
yield UInt8(self, "nb_ident", "Size of ident[]")
yield Enum(UInt16(self, "type", "File type"), self.TYPE_NAME)
yield Enum(UInt16(self, "machine", "Machine type"), self.MACHINE_NAME)
yield UInt32(self, "version", "ELF format version")
yield UInt32(self, "entry", "Number of entries")
yield UInt32(self, "phoff", "Program header offset")
yield UInt32(self, "shoff", "Section header offset")
yield UInt32(self, "flags", "Flags")
yield UInt16(self, "ehsize", "Elf header size (this header)")
yield UInt16(self, "phentsize", "Program header entry size")
yield UInt16(self, "phnum", "Program header entry count")
yield UInt16(self, "shentsize", "Section header entry size")
yield UInt16(self, "shnum", "Section header entre count")
yield UInt16(self, "shstrndx", "Section header strtab index")
def isValid(self):
if self["signature"].value != "\x7FELF":
return "Wrong ELF signature"
if self["class"].value not in self.CLASS_NAME:
return "Unknown class"
if self["endian"].value not in self.ENDIAN_NAME:
return "Unknown endian (%s)" % self["endian"].value
return ""
class SectionHeader32(FieldSet):
static_size = 40*8
TYPE_NAME = {
8: "BSS"
}
def createFields(self):
yield UInt32(self, "name", "Name")
yield Enum(UInt32(self, "type", "Type"), self.TYPE_NAME)
yield UInt32(self, "flags", "Flags")
yield textHandler(UInt32(self, "VMA", "Virtual memory address"), hexadecimal)
yield textHandler(UInt32(self, "LMA", "Logical memory address (in file)"), hexadecimal)
yield textHandler(UInt32(self, "size", "Size"), hexadecimal)
yield UInt32(self, "link", "Link")
yield UInt32(self, "info", "Information")
yield UInt32(self, "addr_align", "Address alignment")
yield UInt32(self, "entry_size", "Entry size")
def createDescription(self):
return "Section header (name: %s, type: %s)" % \
(self["name"].value, self["type"].display)
class ProgramHeader32(FieldSet):
TYPE_NAME = {
3: "Dynamic library"
}
static_size = 32*8
def createFields(self):
yield Enum(UInt16(self, "type", "Type"), ProgramHeader32.TYPE_NAME)
yield UInt16(self, "flags", "Flags")
yield UInt32(self, "offset", "Offset")
yield textHandler(UInt32(self, "vaddr", "V. address"), hexadecimal)
yield textHandler(UInt32(self, "paddr", "P. address"), hexadecimal)
yield UInt32(self, "file_size", "File size")
yield UInt32(self, "mem_size", "Memory size")
yield UInt32(self, "align", "Alignment")
yield UInt32(self, "xxx", "???")
def createDescription(self):
return "Program Header (%s)" % self["type"].display
def sortSection(a, b):
return int(a["offset"] - b["offset"])
#class Sections(FieldSet):
# def createFields?(self, stream, parent, sections):
# for section in sections:
# ofs = section["offset"]
# size = section["file_size"]
# if size != 0:
# sub = stream.createSub(ofs, size)
# #yield DeflateFilter(self, "section[]", sub, size, Section, "Section"))
# chunk = self.doRead("section[]", "Section", (Section,), {"stream": sub})
# else:
# chunk = self.doRead("section[]", "Section", (FormatChunk, "string[0]"))
# chunk.description = "ELF section (in file: %s..%s)" % (ofs, ofs+size)
class ElfFile(Parser):
PARSER_TAGS = {
"id": "elf",
"category": "program",
"file_ext": ("so", ""),
"min_size": ElfHeader.static_size, # At least one program header
"mime": (
u"application/x-executable",
u"application/x-object",
u"application/x-sharedlib",
u"application/x-executable-file",
u"application/x-coredump"),
"magic": (("\x7FELF", 0),),
"description": "ELF Unix/BSD program/library"
}
endian = LITTLE_ENDIAN
def validate(self):
err = self["header"].isValid()
if err:
return err
return True
def createFields(self):
# Choose the right endian depending on endian specified in header
if self.stream.readBits(5*8, 8, BIG_ENDIAN) == ElfHeader.BIG_ENDIAN_ID:
self.endian = BIG_ENDIAN
else:
self.endian = LITTLE_ENDIAN
# Parse header and program headers
yield ElfHeader(self, "header", "Header")
for index in xrange(self["header/phnum"].value):
yield ProgramHeader32(self, "prg_header[]")
if False:
raise ParserError("TODO: Parse sections...")
#sections = self.array("prg_header")
#size = self["header/shoff"].value - self.current_size//8
#chunk = self.doRead("data", "Data", (DeflateFilter, stream, size, Sections, sections))
#chunk.description = "Sections (use an evil hack to manage share same data on differents parts)"
#assert self.current_size//8 == self["header/shoff"].value
else:
raw = self.seekByte(self["header/shoff"].value, "raw[]", relative=False)
if raw:
yield raw
for index in xrange(self["header/shnum"].value):
yield SectionHeader32(self, "section_header[]")
def createDescription(self):
return "ELF Unix/BSD program/library: %s" % (
self["header/class"].display)
| 7,120 | Python | .py | 166 | 35.036145 | 108 | 0.59974 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,536 | exe_ne.py | midgetspy_Sick-Beard/lib/hachoir_parser/program/exe_ne.py | from lib.hachoir_core.field import (FieldSet,
Bit, UInt8, UInt16, UInt32, Bytes,
PaddingBits, PaddingBytes, NullBits, NullBytes)
from lib.hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler
class NE_Header(FieldSet):
static_size = 64*8
def createFields(self):
yield Bytes(self, "signature", 2, "New executable signature (NE)")
yield UInt8(self, "link_ver", "Linker version number")
yield UInt8(self, "link_rev", "Linker revision number")
yield UInt16(self, "entry_table_ofst", "Offset to the entry table")
yield UInt16(self, "entry_table_size", "Length (in bytes) of the entry table")
yield PaddingBytes(self, "reserved[]", 4)
yield Bit(self, "is_dll", "Is a dynamic-link library (DLL)?")
yield Bit(self, "is_win_app", "Is a Windows application?")
yield PaddingBits(self, "reserved[]", 9)
yield Bit(self, "first_seg_code", "First segment contains code that loads the application?")
yield NullBits(self, "reserved[]", 1)
yield Bit(self, "link_error", "Load even if linker detects errors?")
yield NullBits(self, "reserved[]", 1)
yield Bit(self, "is_lib", "Is a library module?")
yield UInt16(self, "auto_data_seg", "Automatic data segment number")
yield filesizeHandler(UInt16(self, "local_heap_size", "Initial size (in bytes) of the local heap"))
yield filesizeHandler(UInt16(self, "stack_size", "Initial size (in bytes) of the stack"))
yield textHandler(UInt32(self, "cs_ip", "Value of CS:IP"), hexadecimal)
yield textHandler(UInt32(self, "ss_sp", "Value of SS:SP"), hexadecimal)
yield UInt16(self, "nb_entry_seg_tab", "Number of entries in the segment table")
yield UInt16(self, "nb_entry_modref_tab", "Number of entries in the module-reference table")
yield filesizeHandler(UInt16(self, "size_nonres_name_tab", "Number of bytes in the nonresident-name table"))
yield UInt16(self, "seg_tab_ofs", "Segment table offset")
yield UInt16(self, "rsrc_ofs", "Resource offset")
yield UInt16(self, "res_name_tab_ofs", "Resident-name table offset")
yield UInt16(self, "mod_ref_tab_ofs", "Module-reference table offset")
yield UInt16(self, "import_tab_ofs", "Imported-name table offset")
yield UInt32(self, "non_res_name_tab_ofs", "Nonresident-name table offset")
yield UInt16(self, "nb_mov_ent_pt", "Number of movable entry points")
yield UInt16(self, "log2_sector_size", "Log2 of the segment sector size")
yield UInt16(self, "nb_rsrc_seg", "Number of resource segments")
yield Bit(self, "unknown_os_format", "Operating system format is unknown")
yield PaddingBits(self, "reserved[]", 1)
yield Bit(self, "os_windows", "Operating system is Microsoft Windows")
yield NullBits(self, "reserved[]", 6)
yield Bit(self, "is_win20_prot", "Is Windows 2.x application running in version 3.x protected mode")
yield Bit(self, "is_win20_font", "Is Windows 2.x application supporting proportional fonts")
yield Bit(self, "fast_load", "Contains a fast-load area?")
yield NullBits(self, "reserved[]", 4)
yield UInt16(self, "fastload_ofs", "Fast-load area offset (in sector)")
yield UInt16(self, "fastload_size", "Fast-load area length (in sector)")
yield NullBytes(self, "reserved[]", 2)
yield textHandler(UInt16(self, "win_version", "Expected Windows version number"), hexadecimal)
| 3,551 | Python | .py | 50 | 62.62 | 116 | 0.674592 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,537 | java.py | midgetspy_Sick-Beard/lib/hachoir_parser/program/java.py | """
Compiled Java classes parser.
Author: Thomas de Grenier de Latour (TGL) <[email protected]>
Creation: 2006/11/01
Last-update: 2006/11/06
Introduction:
* This parser is for compiled Java classes, aka .class files. What is nice
with this format is that it is well documented in the official Java VM specs.
* Some fields, and most field sets, have dynamic sizes, and there is no offset
to directly jump from an header to a given section, or anything like that.
It means that accessing a field at the end of the file requires that you've
already parsed almost the whole file. That's not very efficient, but it's
okay given the usual size of .class files (usually a few KB).
* Most fields are just indexes of some "constant pool" entries, which holds
most constant datas of the class. And constant pool entries reference other
constant pool entries, etc. Hence, a raw display of this fields only shows
integers and is not really understandable. Because of that, this parser
comes with two important custom field classes:
- CPInfo are constant pool entries. They have a type ("Utf8", "Methodref",
etc.), and some contents fields depending on this type. They also have a
"__str__()" method, which returns a syntetic view of this contents.
- CPIndex are constant pool indexes (UInt16). It is possible to specify
what type of CPInfo they are allowed to points to. They also have a
custom display method, usually printing something like "-> foo", where
foo is the str() of their target CPInfo.
References:
* The Java Virtual Machine Specification, 2nd edition, chapter 4, in HTML:
http://java.sun.com/docs/books/vmspec/2nd-edition/html/ClassFile.doc.html
=> That's the spec i've been implementing so far. I think it is format
version 46.0 (JDK 1.2).
* The Java Virtual Machine Specification, 2nd edition, chapter 4, in PDF:
http://java.sun.com/docs/books/vmspec/2nd-edition/ClassFileFormat.pdf
=> don't trust the URL, this PDF version is more recent than the HTML one.
It highligths some recent additions to the format (i don't know the
exact version though), which are not yet implemented in this parser.
* The Java Virtual Machine Specification, chapter 4:
http://java.sun.com/docs/books/vmspec/html/ClassFile.doc.html
=> describes an older format, probably version 45.3 (JDK 1.1).
TODO/FIXME:
* Google for some existing free .class files parsers, to get more infos on
the various formats differences, etc.
* Write/compile some good tests cases.
* Rework pretty-printing of CPIndex fields. This str() thing sinks.
* Add support of formats other than 46.0 (45.3 seems to already be ok, but
there are things to add for later formats).
* Make parsing robust: currently, the parser will die on asserts as soon as
something seems wrong. It should rather be tolerant, print errors/warnings,
and try its best to continue. Check how error-handling is done in other
parsers.
* Gettextize the whole thing.
* Check whether Float32/64 are really the same as Java floats/double. PEP-0754
says that handling of +/-infinity and NaN is very implementation-dependent.
Also check how this values are displayed.
* Make the parser edition-proof. For instance, editing a constant-pool string
should update the length field of it's entry, etc. Sounds like a huge work.
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (
ParserError, FieldSet, StaticFieldSet,
Enum, RawBytes, PascalString16, Float32, Float64,
Int8, UInt8, Int16, UInt16, Int32, UInt32, Int64,
Bit, NullBits )
from lib.hachoir_core.endian import BIG_ENDIAN
from lib.hachoir_core.text_handler import textHandler, hexadecimal
from lib.hachoir_core.tools import paddingSize
###############################################################################
def parse_flags(flags, flags_dict, show_unknown_flags=True, separator=" "):
"""
Parses an integer representing a set of flags. The known flags are
stored with their bit-mask in a dictionnary. Returns a string.
"""
flags_list = []
mask = 0x01
while mask <= flags:
if flags & mask:
if mask in flags_dict:
flags_list.append(flags_dict[mask])
elif show_unknown_flags:
flags_list.append("???")
mask = mask << 1
return separator.join(flags_list)
###############################################################################
code_to_type_name = {
'B': "byte",
'C': "char",
'D': "double",
'F': "float",
'I': "int",
'J': "long",
'S': "short",
'Z': "boolean",
'V': "void",
}
def eat_descriptor(descr):
"""
Read head of a field/method descriptor. Returns a pair of strings, where
the first one is a human-readable string representation of the first found
type, and the second one is the tail of the parameter.
"""
array_dim = 0
while descr[0] == '[':
array_dim += 1
descr = descr[1:]
if (descr[0] == 'L'):
try: end = descr.find(';')
except: raise ParserError("Not a valid descriptor string: " + descr)
type = descr[1:end]
descr = descr[end:]
else:
global code_to_type_name
try:
type = code_to_type_name[descr[0]]
except KeyError:
raise ParserError("Not a valid descriptor string: %s" % descr)
return (type.replace("/", ".") + array_dim * "[]", descr[1:])
def parse_field_descriptor(descr, name=None):
"""
Parse a field descriptor (single type), and returns it as human-readable
string representation.
"""
assert descr
(type, tail) = eat_descriptor(descr)
assert not tail
if name:
return type + " " + name
else:
return type
def parse_method_descriptor(descr, name=None):
"""
Parse a method descriptor (params type and return type), and returns it
as human-readable string representation.
"""
assert descr and (descr[0] == '(')
descr = descr[1:]
params_list = []
while descr[0] != ')':
(param, descr) = eat_descriptor(descr)
params_list.append(param)
(type, tail) = eat_descriptor(descr[1:])
assert not tail
params = ", ".join(params_list)
if name:
return "%s %s(%s)" % (type, name, params)
else:
return "%s (%s)" % (type, params)
def parse_any_descriptor(descr, name=None):
"""
Parse either a field or method descriptor, and returns it as human-
readable string representation.
"""
assert descr
if descr[0] == '(':
return parse_method_descriptor(descr, name)
else:
return parse_field_descriptor(descr, name)
###############################################################################
class FieldArray(FieldSet):
"""
Holds a fixed length array of fields which all have the same type. This
type may be variable-length. Each field will be named "foo[x]" (with x
starting at 0).
"""
def __init__(self, parent, name, elements_class, length,
**elements_extra_args):
"""Create a FieldArray of <length> fields of class <elements_class>,
named "<name>[x]". The **elements_extra_args will be passed to the
constructor of each field when yielded."""
FieldSet.__init__(self, parent, name)
self.array_elements_class = elements_class
self.array_length = length
self.array_elements_extra_args = elements_extra_args
def createFields(self):
for i in range(0, self.array_length):
yield self.array_elements_class(self, "%s[%d]" % (self.name, i),
**self.array_elements_extra_args)
class ConstantPool(FieldSet):
"""
ConstantPool is similar to a FieldArray of CPInfo fields, but:
- numbering starts at 1 instead of zero
- some indexes are skipped (after Long or Double entries)
"""
def __init__(self, parent, name, length):
FieldSet.__init__(self, parent, name)
self.constant_pool_length = length
def createFields(self):
i = 1
while i < self.constant_pool_length:
name = "%s[%d]" % (self.name, i)
yield CPInfo(self, name)
i += 1
if self[name].constant_type in ("Long", "Double"):
i += 1
###############################################################################
class CPIndex(UInt16):
"""
Holds index of a constant pool entry.
"""
def __init__(self, parent, name, description=None, target_types=None,
target_text_handler=(lambda x: x), allow_zero=False):
"""
Initialize a CPIndex.
- target_type is the tuple of expected type for the target CPInfo
(if None, then there will be no type check)
- target_text_handler is a string transformation function used for
pretty printing the target str() result
- allow_zero states whether null index is allowed (sometimes, constant
pool index is optionnal)
"""
UInt16.__init__(self, parent, name, description)
if isinstance(target_types, str):
self.target_types = (target_types,)
else:
self.target_types = target_types
self.allow_zero = allow_zero
self.target_text_handler = target_text_handler
self.getOriginalDisplay = lambda: self.value
def createDisplay(self):
cp_entry = self.get_cp_entry()
if self.allow_zero and not cp_entry:
return "ZERO"
assert cp_entry
return "-> " + self.target_text_handler(str(cp_entry))
def get_cp_entry(self):
"""
Returns the target CPInfo field.
"""
assert self.value < self["/constant_pool_count"].value
if self.allow_zero and not self.value: return None
cp_entry = self["/constant_pool/constant_pool[%d]" % self.value]
assert isinstance(cp_entry, CPInfo)
if self.target_types:
assert cp_entry.constant_type in self.target_types
return cp_entry
###############################################################################
class JavaOpcode(FieldSet):
OPSIZE = 0
def __init__(self, parent, name, op, desc):
FieldSet.__init__(self, parent, name)
if self.OPSIZE != 0: self._size = self.OPSIZE*8
self.op = op
self.desc = desc
def createDisplay(self):
return self.op
def createDescription(self):
return self.desc
def createValue(self):
return self.createDisplay()
class OpcodeNoArgs(JavaOpcode):
OPSIZE = 1
def createFields(self):
yield UInt8(self, "opcode")
class OpcodeCPIndex(JavaOpcode):
OPSIZE = 3
def createFields(self):
yield UInt8(self, "opcode")
yield CPIndex(self, "index")
def createDisplay(self):
return "%s(%i)"%(self.op, self["index"].value)
class OpcodeCPIndexShort(JavaOpcode):
OPSIZE = 2
def createFields(self):
yield UInt8(self, "opcode")
yield UInt8(self, "index")
def createDisplay(self):
return "%s(%i)"%(self.op, self["index"].value)
class OpcodeIndex(JavaOpcode):
OPSIZE = 2
def createFields(self):
yield UInt8(self, "opcode")
yield UInt8(self, "index")
def createDisplay(self):
return "%s(%i)"%(self.op, self["index"].value)
class OpcodeShortJump(JavaOpcode):
OPSIZE = 3
def createFields(self):
yield UInt8(self, "opcode")
yield Int16(self, "offset")
def createDisplay(self):
return "%s(%s)"%(self.op, self["offset"].value)
class OpcodeLongJump(JavaOpcode):
OPSIZE = 5
def createFields(self):
yield UInt8(self, "opcode")
yield Int32(self, "offset")
def createDisplay(self):
return "%s(%s)"%(self.op, self["offset"].value)
class OpcodeSpecial_bipush(JavaOpcode):
OPSIZE = 2
def createFields(self):
yield UInt8(self, "opcode")
yield Int8(self, "value")
def createDisplay(self):
return "%s(%s)"%(self.op, self["value"].value)
class OpcodeSpecial_sipush(JavaOpcode):
OPSIZE = 3
def createFields(self):
yield UInt8(self, "opcode")
yield Int16(self, "value")
def createDisplay(self):
return "%s(%s)"%(self.op, self["value"].value)
class OpcodeSpecial_iinc(JavaOpcode):
OPSIZE = 3
def createFields(self):
yield UInt8(self, "opcode")
yield UInt8(self, "index")
yield Int8(self, "value")
def createDisplay(self):
return "%s(%i,%i)"%(self.op, self["index"].value, self["value"].value)
class OpcodeSpecial_wide(JavaOpcode):
def createFields(self):
yield UInt8(self, "opcode")
new_op = UInt8(self, "new_opcode")
yield new_op
op = new_op._description = JavaBytecode.OPCODE_TABLE.get(new_op.value, ["reserved", None, "Reserved"])[0]
yield UInt16(self, "index")
if op == "iinc":
yield Int16(self, "value")
self.createDisplay = lambda self: "%s(%i,%i)"%(self.op, self["index"].value, self["value"].value)
else:
self.createDisplay = lambda self: "%s(%i)"%(self.op, self["index"].value)
class OpcodeSpecial_invokeinterface(JavaOpcode):
OPSIZE = 5
def createFields(self):
yield UInt8(self, "opcode")
yield CPIndex(self, "index")
yield UInt8(self, "count")
yield UInt8(self, "zero", "Must be zero.")
def createDisplay(self):
return "%s(%i,%i,%i)"%(self.op, self["index"].value, self["count"].value, self["zero"].value)
class OpcodeSpecial_newarray(JavaOpcode):
OPSIZE = 2
def createFields(self):
yield UInt8(self, "opcode")
yield Enum(UInt8(self, "atype"), {4: "boolean",
5: "char",
6: "float",
7: "double",
8: "byte",
9: "short",
10:"int",
11:"long"})
def createDisplay(self):
return "%s(%s)"%(self.op, self["atype"].createDisplay())
class OpcodeSpecial_multianewarray(JavaOpcode):
OPSIZE = 4
def createFields(self):
yield UInt8(self, "opcode")
yield CPIndex(self, "index")
yield UInt8(self, "dimensions")
def createDisplay(self):
return "%s(%i,%i)"%(self.op, self["index"].value, self["dimensions"].value)
class OpcodeSpecial_tableswitch(JavaOpcode):
def createFields(self):
yield UInt8(self, "opcode")
pad = paddingSize(self.address+8, 32)
if pad:
yield NullBits(self, "padding", pad)
yield Int32(self, "default")
low = Int32(self, "low")
yield low
high = Int32(self, "high")
yield high
for i in range(high.value-low.value+1):
yield Int32(self, "offset[]")
def createDisplay(self):
return "%s(%i,%i,%i,...)"%(self.op, self["default"].value, self["low"].value, self["high"].value)
class OpcodeSpecial_lookupswitch(JavaOpcode):
def createFields(self):
yield UInt8(self, "opcode")
pad = paddingSize(self.address+8, 32)
if pad:
yield NullBits(self, "padding", pad)
yield Int32(self, "default")
n = Int32(self, "npairs")
yield n
for i in range(n.value):
yield Int32(self, "match[]")
yield Int32(self, "offset[]")
def createDisplay(self):
return "%s(%i,%i,...)"%(self.op, self["default"].value, self["npairs"].value)
class JavaBytecode(FieldSet):
OPCODE_TABLE = {
0x00: ("nop", OpcodeNoArgs, "performs no operation. Stack: [No change]"),
0x01: ("aconst_null", OpcodeNoArgs, "pushes a 'null' reference onto the stack. Stack: -> null"),
0x02: ("iconst_m1", OpcodeNoArgs, "loads the int value -1 onto the stack. Stack: -> -1"),
0x03: ("iconst_0", OpcodeNoArgs, "loads the int value 0 onto the stack. Stack: -> 0"),
0x04: ("iconst_1", OpcodeNoArgs, "loads the int value 1 onto the stack. Stack: -> 1"),
0x05: ("iconst_2", OpcodeNoArgs, "loads the int value 2 onto the stack. Stack: -> 2"),
0x06: ("iconst_3", OpcodeNoArgs, "loads the int value 3 onto the stack. Stack: -> 3"),
0x07: ("iconst_4", OpcodeNoArgs, "loads the int value 4 onto the stack. Stack: -> 4"),
0x08: ("iconst_5", OpcodeNoArgs, "loads the int value 5 onto the stack. Stack: -> 5"),
0x09: ("lconst_0", OpcodeNoArgs, "pushes the long 0 onto the stack. Stack: -> 0L"),
0x0a: ("lconst_1", OpcodeNoArgs, "pushes the long 1 onto the stack. Stack: -> 1L"),
0x0b: ("fconst_0", OpcodeNoArgs, "pushes '0.0f' onto the stack. Stack: -> 0.0f"),
0x0c: ("fconst_1", OpcodeNoArgs, "pushes '1.0f' onto the stack. Stack: -> 1.0f"),
0x0d: ("fconst_2", OpcodeNoArgs, "pushes '2.0f' onto the stack. Stack: -> 2.0f"),
0x0e: ("dconst_0", OpcodeNoArgs, "pushes the constant '0.0' onto the stack. Stack: -> 0.0"),
0x0f: ("dconst_1", OpcodeNoArgs, "pushes the constant '1.0' onto the stack. Stack: -> 1.0"),
0x10: ("bipush", OpcodeSpecial_bipush, "pushes the signed 8-bit integer argument onto the stack. Stack: -> value"),
0x11: ("sipush", OpcodeSpecial_sipush, "pushes the signed 16-bit integer argument onto the stack. Stack: -> value"),
0x12: ("ldc", OpcodeCPIndexShort, "pushes a constant from a constant pool (String, int, float or class type) onto the stack. Stack: -> value"),
0x13: ("ldc_w", OpcodeCPIndex, "pushes a constant from a constant pool (String, int, float or class type) onto the stack. Stack: -> value"),
0x14: ("ldc2_w", OpcodeCPIndex, "pushes a constant from a constant pool (double or long) onto the stack. Stack: -> value"),
0x15: ("iload", OpcodeIndex, "loads an int 'value' from a local variable '#index'. Stack: -> value"),
0x16: ("lload", OpcodeIndex, "loads a long value from a local variable '#index'. Stack: -> value"),
0x17: ("fload", OpcodeIndex, "loads a float 'value' from a local variable '#index'. Stack: -> value"),
0x18: ("dload", OpcodeIndex, "loads a double 'value' from a local variable '#index'. Stack: -> value"),
0x19: ("aload", OpcodeIndex, "loads a reference onto the stack from a local variable '#index'. Stack: -> objectref"),
0x1a: ("iload_0", OpcodeNoArgs, "loads an int 'value' from variable 0. Stack: -> value"),
0x1b: ("iload_1", OpcodeNoArgs, "loads an int 'value' from variable 1. Stack: -> value"),
0x1c: ("iload_2", OpcodeNoArgs, "loads an int 'value' from variable 2. Stack: -> value"),
0x1d: ("iload_3", OpcodeNoArgs, "loads an int 'value' from variable 3. Stack: -> value"),
0x1e: ("lload_0", OpcodeNoArgs, "load a long value from a local variable 0. Stack: -> value"),
0x1f: ("lload_1", OpcodeNoArgs, "load a long value from a local variable 1. Stack: -> value"),
0x20: ("lload_2", OpcodeNoArgs, "load a long value from a local variable 2. Stack: -> value"),
0x21: ("lload_3", OpcodeNoArgs, "load a long value from a local variable 3. Stack: -> value"),
0x22: ("fload_0", OpcodeNoArgs, "loads a float 'value' from local variable 0. Stack: -> value"),
0x23: ("fload_1", OpcodeNoArgs, "loads a float 'value' from local variable 1. Stack: -> value"),
0x24: ("fload_2", OpcodeNoArgs, "loads a float 'value' from local variable 2. Stack: -> value"),
0x25: ("fload_3", OpcodeNoArgs, "loads a float 'value' from local variable 3. Stack: -> value"),
0x26: ("dload_0", OpcodeNoArgs, "loads a double from local variable 0. Stack: -> value"),
0x27: ("dload_1", OpcodeNoArgs, "loads a double from local variable 1. Stack: -> value"),
0x28: ("dload_2", OpcodeNoArgs, "loads a double from local variable 2. Stack: -> value"),
0x29: ("dload_3", OpcodeNoArgs, "loads a double from local variable 3. Stack: -> value"),
0x2a: ("aload_0", OpcodeNoArgs, "loads a reference onto the stack from local variable 0. Stack: -> objectref"),
0x2b: ("aload_1", OpcodeNoArgs, "loads a reference onto the stack from local variable 1. Stack: -> objectref"),
0x2c: ("aload_2", OpcodeNoArgs, "loads a reference onto the stack from local variable 2. Stack: -> objectref"),
0x2d: ("aload_3", OpcodeNoArgs, "loads a reference onto the stack from local variable 3. Stack: -> objectref"),
0x2e: ("iaload", OpcodeNoArgs, "loads an int from an array. Stack: arrayref, index -> value"),
0x2f: ("laload", OpcodeNoArgs, "load a long from an array. Stack: arrayref, index -> value"),
0x30: ("faload", OpcodeNoArgs, "loads a float from an array. Stack: arrayref, index -> value"),
0x31: ("daload", OpcodeNoArgs, "loads a double from an array. Stack: arrayref, index -> value"),
0x32: ("aaload", OpcodeNoArgs, "loads onto the stack a reference from an array. Stack: arrayref, index -> value"),
0x33: ("baload", OpcodeNoArgs, "loads a byte or Boolean value from an array. Stack: arrayref, index -> value"),
0x34: ("caload", OpcodeNoArgs, "loads a char from an array. Stack: arrayref, index -> value"),
0x35: ("saload", OpcodeNoArgs, "load short from array. Stack: arrayref, index -> value"),
0x36: ("istore", OpcodeIndex, "store int 'value' into variable '#index'. Stack: value ->"),
0x37: ("lstore", OpcodeIndex, "store a long 'value' in a local variable '#index'. Stack: value ->"),
0x38: ("fstore", OpcodeIndex, "stores a float 'value' into a local variable '#index'. Stack: value ->"),
0x39: ("dstore", OpcodeIndex, "stores a double 'value' into a local variable '#index'. Stack: value ->"),
0x3a: ("astore", OpcodeIndex, "stores a reference into a local variable '#index'. Stack: objectref ->"),
0x3b: ("istore_0", OpcodeNoArgs, "store int 'value' into variable 0. Stack: value ->"),
0x3c: ("istore_1", OpcodeNoArgs, "store int 'value' into variable 1. Stack: value ->"),
0x3d: ("istore_2", OpcodeNoArgs, "store int 'value' into variable 2. Stack: value ->"),
0x3e: ("istore_3", OpcodeNoArgs, "store int 'value' into variable 3. Stack: value ->"),
0x3f: ("lstore_0", OpcodeNoArgs, "store a long 'value' in a local variable 0. Stack: value ->"),
0x40: ("lstore_1", OpcodeNoArgs, "store a long 'value' in a local variable 1. Stack: value ->"),
0x41: ("lstore_2", OpcodeNoArgs, "store a long 'value' in a local variable 2. Stack: value ->"),
0x42: ("lstore_3", OpcodeNoArgs, "store a long 'value' in a local variable 3. Stack: value ->"),
0x43: ("fstore_0", OpcodeNoArgs, "stores a float 'value' into local variable 0. Stack: value ->"),
0x44: ("fstore_1", OpcodeNoArgs, "stores a float 'value' into local variable 1. Stack: value ->"),
0x45: ("fstore_2", OpcodeNoArgs, "stores a float 'value' into local variable 2. Stack: value ->"),
0x46: ("fstore_3", OpcodeNoArgs, "stores a float 'value' into local variable 3. Stack: value ->"),
0x47: ("dstore_0", OpcodeNoArgs, "stores a double into local variable 0. Stack: value ->"),
0x48: ("dstore_1", OpcodeNoArgs, "stores a double into local variable 1. Stack: value ->"),
0x49: ("dstore_2", OpcodeNoArgs, "stores a double into local variable 2. Stack: value ->"),
0x4a: ("dstore_3", OpcodeNoArgs, "stores a double into local variable 3. Stack: value ->"),
0x4b: ("astore_0", OpcodeNoArgs, "stores a reference into local variable 0. Stack: objectref ->"),
0x4c: ("astore_1", OpcodeNoArgs, "stores a reference into local variable 1. Stack: objectref ->"),
0x4d: ("astore_2", OpcodeNoArgs, "stores a reference into local variable 2. Stack: objectref ->"),
0x4e: ("astore_3", OpcodeNoArgs, "stores a reference into local variable 3. Stack: objectref ->"),
0x4f: ("iastore", OpcodeNoArgs, "stores an int into an array. Stack: arrayref, index, value ->"),
0x50: ("lastore", OpcodeNoArgs, "store a long to an array. Stack: arrayref, index, value ->"),
0x51: ("fastore", OpcodeNoArgs, "stores a float in an array. Stack: arreyref, index, value ->"),
0x52: ("dastore", OpcodeNoArgs, "stores a double into an array. Stack: arrayref, index, value ->"),
0x53: ("aastore", OpcodeNoArgs, "stores into a reference to an array. Stack: arrayref, index, value ->"),
0x54: ("bastore", OpcodeNoArgs, "stores a byte or Boolean value into an array. Stack: arrayref, index, value ->"),
0x55: ("castore", OpcodeNoArgs, "stores a char into an array. Stack: arrayref, index, value ->"),
0x56: ("sastore", OpcodeNoArgs, "store short to array. Stack: arrayref, index, value ->"),
0x57: ("pop", OpcodeNoArgs, "discards the top value on the stack. Stack: value ->"),
0x58: ("pop2", OpcodeNoArgs, "discards the top two values on the stack (or one value, if it is a double or long). Stack: {value2, value1} ->"),
0x59: ("dup", OpcodeNoArgs, "duplicates the value on top of the stack. Stack: value -> value, value"),
0x5a: ("dup_x1", OpcodeNoArgs, "inserts a copy of the top value into the stack two values from the top. Stack: value2, value1 -> value1, value2, value1"),
0x5b: ("dup_x2", OpcodeNoArgs, "inserts a copy of the top value into the stack two (if value2 is double or long it takes up the entry of value3, too) or three values (if value2 is neither double nor long) from the top. Stack: value3, value2, value1 -> value1, value3, value2, value1"),
0x5c: ("dup2", OpcodeNoArgs, "duplicate top two stack words (two values, if value1 is not double nor long; a single value, if value1 is double or long). Stack: {value2, value1} -> {value2, value1}, {value2, value1}"),
0x5d: ("dup2_x1", OpcodeNoArgs, "duplicate two words and insert beneath third word. Stack: value3, {value2, value1} -> {value2, value1}, value3, {value2, value1}"),
0x5e: ("dup2_x2", OpcodeNoArgs, "duplicate two words and insert beneath fourth word. Stack: {value4, value3}, {value2, value1} -> {value2, value1}, {value4, value3}, {value2, value1}"),
0x5f: ("swap", OpcodeNoArgs, "swaps two top words on the stack (note that value1 and value2 must not be double or long). Stack: value2, value1 -> value1, value2"),
0x60: ("iadd", OpcodeNoArgs, "adds two ints together. Stack: value1, value2 -> result"),
0x61: ("ladd", OpcodeNoArgs, "add two longs. Stack: value1, value2 -> result"),
0x62: ("fadd", OpcodeNoArgs, "adds two floats. Stack: value1, value2 -> result"),
0x63: ("dadd", OpcodeNoArgs, "adds two doubles. Stack: value1, value2 -> result"),
0x64: ("isub", OpcodeNoArgs, "int subtract. Stack: value1, value2 -> result"),
0x65: ("lsub", OpcodeNoArgs, "subtract two longs. Stack: value1, value2 -> result"),
0x66: ("fsub", OpcodeNoArgs, "subtracts two floats. Stack: value1, value2 -> result"),
0x67: ("dsub", OpcodeNoArgs, "subtracts a double from another. Stack: value1, value2 -> result"),
0x68: ("imul", OpcodeNoArgs, "multiply two integers. Stack: value1, value2 -> result"),
0x69: ("lmul", OpcodeNoArgs, "multiplies two longs. Stack: value1, value2 -> result"),
0x6a: ("fmul", OpcodeNoArgs, "multiplies two floats. Stack: value1, value2 -> result"),
0x6b: ("dmul", OpcodeNoArgs, "multiplies two doubles. Stack: value1, value2 -> result"),
0x6c: ("idiv", OpcodeNoArgs, "divides two integers. Stack: value1, value2 -> result"),
0x6d: ("ldiv", OpcodeNoArgs, "divide two longs. Stack: value1, value2 -> result"),
0x6e: ("fdiv", OpcodeNoArgs, "divides two floats. Stack: value1, value2 -> result"),
0x6f: ("ddiv", OpcodeNoArgs, "divides two doubles. Stack: value1, value2 -> result"),
0x70: ("irem", OpcodeNoArgs, "logical int remainder. Stack: value1, value2 -> result"),
0x71: ("lrem", OpcodeNoArgs, "remainder of division of two longs. Stack: value1, value2 -> result"),
0x72: ("frem", OpcodeNoArgs, "gets the remainder from a division between two floats. Stack: value1, value2 -> result"),
0x73: ("drem", OpcodeNoArgs, "gets the remainder from a division between two doubles. Stack: value1, value2 -> result"),
0x74: ("ineg", OpcodeNoArgs, "negate int. Stack: value -> result"),
0x75: ("lneg", OpcodeNoArgs, "negates a long. Stack: value -> result"),
0x76: ("fneg", OpcodeNoArgs, "negates a float. Stack: value -> result"),
0x77: ("dneg", OpcodeNoArgs, "negates a double. Stack: value -> result"),
0x78: ("ishl", OpcodeNoArgs, "int shift left. Stack: value1, value2 -> result"),
0x79: ("lshl", OpcodeNoArgs, "bitwise shift left of a long 'value1' by 'value2' positions. Stack: value1, value2 -> result"),
0x7a: ("ishr", OpcodeNoArgs, "int shift right. Stack: value1, value2 -> result"),
0x7b: ("lshr", OpcodeNoArgs, "bitwise shift right of a long 'value1' by 'value2' positions. Stack: value1, value2 -> result"),
0x7c: ("iushr", OpcodeNoArgs, "int shift right. Stack: value1, value2 -> result"),
0x7d: ("lushr", OpcodeNoArgs, "bitwise shift right of a long 'value1' by 'value2' positions, unsigned. Stack: value1, value2 -> result"),
0x7e: ("iand", OpcodeNoArgs, "performs a logical and on two integers. Stack: value1, value2 -> result"),
0x7f: ("land", OpcodeNoArgs, "bitwise and of two longs. Stack: value1, value2 -> result"),
0x80: ("ior", OpcodeNoArgs, "logical int or. Stack: value1, value2 -> result"),
0x81: ("lor", OpcodeNoArgs, "bitwise or of two longs. Stack: value1, value2 -> result"),
0x82: ("ixor", OpcodeNoArgs, "int xor. Stack: value1, value2 -> result"),
0x83: ("lxor", OpcodeNoArgs, "bitwise exclusive or of two longs. Stack: value1, value2 -> result"),
0x84: ("iinc", OpcodeSpecial_iinc, "increment local variable '#index' by signed byte 'const'. Stack: [No change]"),
0x85: ("i2l", OpcodeNoArgs, "converts an int into a long. Stack: value -> result"),
0x86: ("i2f", OpcodeNoArgs, "converts an int into a float. Stack: value -> result"),
0x87: ("i2d", OpcodeNoArgs, "converts an int into a double. Stack: value -> result"),
0x88: ("l2i", OpcodeNoArgs, "converts a long to an int. Stack: value -> result"),
0x89: ("l2f", OpcodeNoArgs, "converts a long to a float. Stack: value -> result"),
0x8a: ("l2d", OpcodeNoArgs, "converts a long to a double. Stack: value -> result"),
0x8b: ("f2i", OpcodeNoArgs, "converts a float to an int. Stack: value -> result"),
0x8c: ("f2l", OpcodeNoArgs, "converts a float to a long. Stack: value -> result"),
0x8d: ("f2d", OpcodeNoArgs, "converts a float to a double. Stack: value -> result"),
0x8e: ("d2i", OpcodeNoArgs, "converts a double to an int. Stack: value -> result"),
0x8f: ("d2l", OpcodeNoArgs, "converts a double to a long. Stack: value -> result"),
0x90: ("d2f", OpcodeNoArgs, "converts a double to a float. Stack: value -> result"),
0x91: ("i2b", OpcodeNoArgs, "converts an int into a byte. Stack: value -> result"),
0x92: ("i2c", OpcodeNoArgs, "converts an int into a character. Stack: value -> result"),
0x93: ("i2s", OpcodeNoArgs, "converts an int into a short. Stack: value -> result"),
0x94: ("lcmp", OpcodeNoArgs, "compares two longs values. Stack: value1, value2 -> result"),
0x95: ("fcmpl", OpcodeNoArgs, "compares two floats. Stack: value1, value2 -> result"),
0x96: ("fcmpg", OpcodeNoArgs, "compares two floats. Stack: value1, value2 -> result"),
0x97: ("dcmpl", OpcodeNoArgs, "compares two doubles. Stack: value1, value2 -> result"),
0x98: ("dcmpg", OpcodeNoArgs, "compares two doubles. Stack: value1, value2 -> result"),
0x99: ("ifeq", OpcodeShortJump, "if 'value' is 0, branch to the 16-bit instruction offset argument. Stack: value ->"),
0x9a: ("ifne", OpcodeShortJump, "if 'value' is not 0, branch to the 16-bit instruction offset argument. Stack: value ->"),
0x9c: ("ifge", OpcodeShortJump, "if 'value' is greater than or equal to 0, branch to the 16-bit instruction offset argument. Stack: value ->"),
0x9d: ("ifgt", OpcodeShortJump, "if 'value' is greater than 0, branch to the 16-bit instruction offset argument. Stack: value ->"),
0x9e: ("ifle", OpcodeShortJump, "if 'value' is less than or equal to 0, branch to the 16-bit instruction offset argument. Stack: value ->"),
0x9f: ("if_icmpeq", OpcodeShortJump, "if ints are equal, branch to the 16-bit instruction offset argument. Stack: value1, value2 ->"),
0xa0: ("if_icmpne", OpcodeShortJump, "if ints are not equal, branch to the 16-bit instruction offset argument. Stack: value1, value2 ->"),
0xa1: ("if_icmplt", OpcodeShortJump, "if 'value1' is less than 'value2', branch to the 16-bit instruction offset argument. Stack: value1, value2 ->"),
0xa2: ("if_icmpge", OpcodeShortJump, "if 'value1' is greater than or equal to 'value2', branch to the 16-bit instruction offset argument. Stack: value1, value2 ->"),
0xa3: ("if_icmpgt", OpcodeShortJump, "if 'value1' is greater than 'value2', branch to the 16-bit instruction offset argument. Stack: value1, value2 ->"),
0xa4: ("if_icmple", OpcodeShortJump, "if 'value1' is less than or equal to 'value2', branch to the 16-bit instruction offset argument. Stack: value1, value2 ->"),
0xa5: ("if_acmpeq", OpcodeShortJump, "if references are equal, branch to the 16-bit instruction offset argument. Stack: value1, value2 ->"),
0xa6: ("if_acmpne", OpcodeShortJump, "if references are not equal, branch to the 16-bit instruction offset argument. Stack: value1, value2 ->"),
0xa7: ("goto", OpcodeShortJump, "goes to the 16-bit instruction offset argument. Stack: [no change]"),
0xa8: ("jsr", OpcodeShortJump, "jump to subroutine at the 16-bit instruction offset argument and place the return address on the stack. Stack: -> address"),
0xa9: ("ret", OpcodeIndex, "continue execution from address taken from a local variable '#index'. Stack: [No change]"),
0xaa: ("tableswitch", OpcodeSpecial_tableswitch, "continue execution from an address in the table at offset 'index'. Stack: index ->"),
0xab: ("lookupswitch", OpcodeSpecial_lookupswitch, "a target address is looked up from a table using a key and execution continues from the instruction at that address. Stack: key ->"),
0xac: ("ireturn", OpcodeNoArgs, "returns an integer from a method. Stack: value -> [empty]"),
0xad: ("lreturn", OpcodeNoArgs, "returns a long value. Stack: value -> [empty]"),
0xae: ("freturn", OpcodeNoArgs, "returns a float. Stack: value -> [empty]"),
0xaf: ("dreturn", OpcodeNoArgs, "returns a double from a method. Stack: value -> [empty]"),
0xb0: ("areturn", OpcodeNoArgs, "returns a reference from a method. Stack: objectref -> [empty]"),
0xb1: ("return", OpcodeNoArgs, "return void from method. Stack: -> [empty]"),
0xb2: ("getstatic", OpcodeCPIndex, "gets a static field 'value' of a class, where the field is identified by field reference in the constant pool. Stack: -> value"),
0xb3: ("putstatic", OpcodeCPIndex, "set static field to 'value' in a class, where the field is identified by a field reference in constant pool. Stack: value ->"),
0xb4: ("getfield", OpcodeCPIndex, "gets a field 'value' of an object 'objectref', where the field is identified by field reference <argument> in the constant pool. Stack: objectref -> value"),
0xb5: ("putfield", OpcodeCPIndex, "set field to 'value' in an object 'objectref', where the field is identified by a field reference <argument> in constant pool. Stack: objectref, value ->"),
0xb6: ("invokevirtual", OpcodeCPIndex, "invoke virtual method on object 'objectref', where the method is identified by method reference <argument> in constant pool. Stack: objectref, [arg1, arg2, ...] ->"),
0xb7: ("invokespecial", OpcodeCPIndex, "invoke instance method on object 'objectref', where the method is identified by method reference <argument> in constant pool. Stack: objectref, [arg1, arg2, ...] ->"),
0xb8: ("invokestatic", OpcodeCPIndex, "invoke a static method, where the method is identified by method reference <argument> in the constant pool. Stack: [arg1, arg2, ...] ->"),
0xb9: ("invokeinterface", OpcodeSpecial_invokeinterface, "invokes an interface method on object 'objectref', where the interface method is identified by method reference <argument> in constant pool. Stack: objectref, [arg1, arg2, ...] ->"),
0xba: ("xxxunusedxxx", OpcodeNoArgs, "this opcode is reserved for historical reasons. Stack: "),
0xbb: ("new", OpcodeCPIndex, "creates new object of type identified by class reference <argument> in constant pool. Stack: -> objectref"),
0xbc: ("newarray", OpcodeSpecial_newarray, "creates new array with 'count' elements of primitive type given in the argument. Stack: count -> arrayref"),
0xbd: ("anewarray", OpcodeCPIndex, "creates a new array of references of length 'count' and component type identified by the class reference <argument> in the constant pool. Stack: count -> arrayref"),
0xbe: ("arraylength", OpcodeNoArgs, "gets the length of an array. Stack: arrayref -> length"),
0xbf: ("athrow", OpcodeNoArgs, "throws an error or exception (notice that the rest of the stack is cleared, leaving only a reference to the Throwable). Stack: objectref -> [empty], objectref"),
0xc0: ("checkcast", OpcodeCPIndex, "checks whether an 'objectref' is of a certain type, the class reference of which is in the constant pool. Stack: objectref -> objectref"),
0xc1: ("instanceof", OpcodeCPIndex, "determines if an object 'objectref' is of a given type, identified by class reference <argument> in constant pool. Stack: objectref -> result"),
0xc2: ("monitorenter", OpcodeNoArgs, "enter monitor for object (\"grab the lock\" - start of synchronized() section). Stack: objectref -> "),
0xc3: ("monitorexit", OpcodeNoArgs, "exit monitor for object (\"release the lock\" - end of synchronized() section). Stack: objectref -> "),
0xc4: ("wide", OpcodeSpecial_wide, "execute 'opcode', where 'opcode' is either iload, fload, aload, lload, dload, istore, fstore, astore, lstore, dstore, or ret, but assume the 'index' is 16 bit; or execute iinc, where the 'index' is 16 bits and the constant to increment by is a signed 16 bit short. Stack: [same as for corresponding instructions]"),
0xc5: ("multianewarray", OpcodeSpecial_multianewarray, "create a new array of 'dimensions' dimensions with elements of type identified by class reference in constant pool; the sizes of each dimension is identified by 'count1', ['count2', etc]. Stack: count1, [count2,...] -> arrayref"),
0xc6: ("ifnull", OpcodeShortJump, "if 'value' is null, branch to the 16-bit instruction offset argument. Stack: value ->"),
0xc7: ("ifnonnull", OpcodeShortJump, "if 'value' is not null, branch to the 16-bit instruction offset argument. Stack: value ->"),
0xc8: ("goto_w", OpcodeLongJump, "goes to another instruction at the 32-bit branch offset argument. Stack: [no change]"),
0xc9: ("jsr_w", OpcodeLongJump, "jump to subroutine at the 32-bit branch offset argument and place the return address on the stack. Stack: -> address"),
0xca: ("breakpoint", OpcodeNoArgs, "reserved for breakpoints in Java debuggers; should not appear in any class file."),
0xfe: ("impdep1", OpcodeNoArgs, "reserved for implementation-dependent operations within debuggers; should not appear in any class file."),
0xff: ("impdep2", OpcodeNoArgs, "reserved for implementation-dependent operations within debuggers; should not appear in any class file.")}
def __init__(self, parent, name, length):
FieldSet.__init__(self, parent, name)
self._size = length*8
def createFields(self):
while self.current_size < self.size:
bytecode = ord(self.parent.stream.readBytes(self.absolute_address+self.current_size, 1))
op, cls, desc = self.OPCODE_TABLE.get(bytecode,["<reserved_opcode>", OpcodeNoArgs, "Reserved opcode."])
yield cls(self, "bytecode[]", op, desc)
###############################################################################
class CPInfo(FieldSet):
"""
Holds a constant pool entry. Entries all have a type, and various contents
fields depending on their type.
"""
def createFields(self):
yield Enum(UInt8(self, "tag"), self.root.CONSTANT_TYPES)
if self["tag"].value not in self.root.CONSTANT_TYPES:
raise ParserError("Java: unknown constant type (%s)" % self["tag"].value)
self.constant_type = self.root.CONSTANT_TYPES[self["tag"].value]
if self.constant_type == "Utf8":
yield PascalString16(self, "bytes", charset="UTF-8")
elif self.constant_type == "Integer":
yield Int32(self, "bytes")
elif self.constant_type == "Float":
yield Float32(self, "bytes")
elif self.constant_type == "Long":
yield Int64(self, "bytes")
elif self.constant_type == "Double":
yield Float64(self, "bytes")
elif self.constant_type == "Class":
yield CPIndex(self, "name_index", "Class or interface name", target_types="Utf8")
elif self.constant_type == "String":
yield CPIndex(self, "string_index", target_types="Utf8")
elif self.constant_type == "Fieldref":
yield CPIndex(self, "class_index", "Field class or interface name", target_types="Class")
yield CPIndex(self, "name_and_type_index", target_types="NameAndType")
elif self.constant_type == "Methodref":
yield CPIndex(self, "class_index", "Method class name", target_types="Class")
yield CPIndex(self, "name_and_type_index", target_types="NameAndType")
elif self.constant_type == "InterfaceMethodref":
yield CPIndex(self, "class_index", "Method interface name", target_types="Class")
yield CPIndex(self, "name_and_type_index", target_types="NameAndType")
elif self.constant_type == "NameAndType":
yield CPIndex(self, "name_index", target_types="Utf8")
yield CPIndex(self, "descriptor_index", target_types="Utf8")
else:
raise ParserError("Not a valid constant pool element type: "
+ self["tag"].value)
def __str__(self):
"""
Returns a human-readable string representation of the constant pool
entry. It is used for pretty-printing of the CPIndex fields pointing
to it.
"""
if self.constant_type == "Utf8":
return self["bytes"].value
elif self.constant_type in ("Integer", "Float", "Long", "Double"):
return self["bytes"].display
elif self.constant_type == "Class":
class_name = str(self["name_index"].get_cp_entry())
return class_name.replace("/",".")
elif self.constant_type == "String":
return str(self["string_index"].get_cp_entry())
elif self.constant_type == "Fieldref":
return "%s (from %s)" % (self["name_and_type_index"], self["class_index"])
elif self.constant_type == "Methodref":
return "%s (from %s)" % (self["name_and_type_index"], self["class_index"])
elif self.constant_type == "InterfaceMethodref":
return "%s (from %s)" % (self["name_and_type_index"], self["class_index"])
elif self.constant_type == "NameAndType":
return parse_any_descriptor(
str(self["descriptor_index"].get_cp_entry()),
name=str(self["name_index"].get_cp_entry()))
else:
# FIXME: Return "<error>" instead of raising an exception?
raise ParserError("Not a valid constant pool element type: "
+ self["tag"].value)
###############################################################################
# field_info {
# u2 access_flags;
# u2 name_index;
# u2 descriptor_index;
# u2 attributes_count;
# attribute_info attributes[attributes_count];
# }
class FieldInfo(FieldSet):
def createFields(self):
# Access flags (16 bits)
yield NullBits(self, "reserved[]", 8)
yield Bit(self, "transient")
yield Bit(self, "volatile")
yield NullBits(self, "reserved[]", 1)
yield Bit(self, "final")
yield Bit(self, "static")
yield Bit(self, "protected")
yield Bit(self, "private")
yield Bit(self, "public")
yield CPIndex(self, "name_index", "Field name", target_types="Utf8")
yield CPIndex(self, "descriptor_index", "Field descriptor", target_types="Utf8",
target_text_handler=parse_field_descriptor)
yield UInt16(self, "attributes_count", "Number of field attributes")
if self["attributes_count"].value > 0:
yield FieldArray(self, "attributes", AttributeInfo,
self["attributes_count"].value)
###############################################################################
# method_info {
# u2 access_flags;
# u2 name_index;
# u2 descriptor_index;
# u2 attributes_count;
# attribute_info attributes[attributes_count];
# }
class MethodInfo(FieldSet):
def createFields(self):
# Access flags (16 bits)
yield NullBits(self, "reserved[]", 4)
yield Bit(self, "strict")
yield Bit(self, "abstract")
yield NullBits(self, "reserved[]", 1)
yield Bit(self, "native")
yield NullBits(self, "reserved[]", 2)
yield Bit(self, "synchronized")
yield Bit(self, "final")
yield Bit(self, "static")
yield Bit(self, "protected")
yield Bit(self, "private")
yield Bit(self, "public")
yield CPIndex(self, "name_index", "Method name", target_types="Utf8")
yield CPIndex(self, "descriptor_index", "Method descriptor",
target_types="Utf8",
target_text_handler=parse_method_descriptor)
yield UInt16(self, "attributes_count", "Number of method attributes")
if self["attributes_count"].value > 0:
yield FieldArray(self, "attributes", AttributeInfo,
self["attributes_count"].value)
###############################################################################
# attribute_info {
# u2 attribute_name_index;
# u4 attribute_length;
# u1 info[attribute_length];
# }
# [...]
class AttributeInfo(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = (self["attribute_length"].value + 6) * 8
def createFields(self):
yield CPIndex(self, "attribute_name_index", "Attribute name", target_types="Utf8")
yield UInt32(self, "attribute_length", "Length of the attribute")
attr_name = str(self["attribute_name_index"].get_cp_entry())
# ConstantValue_attribute {
# u2 attribute_name_index;
# u4 attribute_length;
# u2 constantvalue_index;
# }
if attr_name == "ConstantValue":
if self["attribute_length"].value != 2:
raise ParserError("Java: Invalid attribute %s length (%s)" \
% (self.path, self["attribute_length"].value))
yield CPIndex(self, "constantvalue_index",
target_types=("Long","Float","Double","Integer","String"))
# Code_attribute {
# u2 attribute_name_index;
# u4 attribute_length;
# u2 max_stack;
# u2 max_locals;
# u4 code_length;
# u1 code[code_length];
# u2 exception_table_length;
# { u2 start_pc;
# u2 end_pc;
# u2 handler_pc;
# u2 catch_type;
# } exception_table[exception_table_length];
# u2 attributes_count;
# attribute_info attributes[attributes_count];
# }
elif attr_name == "Code":
yield UInt16(self, "max_stack")
yield UInt16(self, "max_locals")
yield UInt32(self, "code_length")
if self["code_length"].value > 0:
yield JavaBytecode(self, "code", self["code_length"].value)
yield UInt16(self, "exception_table_length")
if self["exception_table_length"].value > 0:
yield FieldArray(self, "exception_table", ExceptionTableEntry,
self["exception_table_length"].value)
yield UInt16(self, "attributes_count")
if self["attributes_count"].value > 0:
yield FieldArray(self, "attributes", AttributeInfo,
self["attributes_count"].value)
# Exceptions_attribute {
# u2 attribute_name_index;
# u4 attribute_length;
# u2 number_of_exceptions;
# u2 exception_index_table[number_of_exceptions];
# }
elif (attr_name == "Exceptions"):
yield UInt16(self, "number_of_exceptions")
yield FieldArray(self, "exception_index_table", CPIndex,
self["number_of_exceptions"].value, target_types="Class")
assert self["attribute_length"].value == \
2 + self["number_of_exceptions"].value * 2
# InnerClasses_attribute {
# u2 attribute_name_index;
# u4 attribute_length;
# u2 number_of_classes;
# { u2 inner_class_info_index;
# u2 outer_class_info_index;
# u2 inner_name_index;
# u2 inner_class_access_flags;
# } classes[number_of_classes];
# }
elif (attr_name == "InnerClasses"):
yield UInt16(self, "number_of_classes")
if self["number_of_classes"].value > 0:
yield FieldArray(self, "classes", InnerClassesEntry,
self["number_of_classes"].value)
assert self["attribute_length"].value == \
2 + self["number_of_classes"].value * 8
# Synthetic_attribute {
# u2 attribute_name_index;
# u4 attribute_length;
# }
elif (attr_name == "Synthetic"):
assert self["attribute_length"].value == 0
# SourceFile_attribute {
# u2 attribute_name_index;
# u4 attribute_length;
# u2 sourcefile_index;
# }
elif (attr_name == "SourceFile"):
assert self["attribute_length"].value == 2
yield CPIndex(self, "sourcefile_index", target_types="Utf8")
# LineNumberTable_attribute {
# u2 attribute_name_index;
# u4 attribute_length;
# u2 line_number_table_length;
# { u2 start_pc;
# u2 line_number;
# } line_number_table[line_number_table_length];
# }
elif (attr_name == "LineNumberTable"):
yield UInt16(self, "line_number_table_length")
if self["line_number_table_length"].value > 0:
yield FieldArray(self, "line_number_table",
LineNumberTableEntry,
self["line_number_table_length"].value)
assert self["attribute_length"].value == \
2 + self["line_number_table_length"].value * 4
# LocalVariableTable_attribute {
# u2 attribute_name_index;
# u4 attribute_length;
# u2 local_variable_table_length;
# { u2 start_pc;
# u2 length;
# u2 name_index;
# u2 descriptor_index;
# u2 index;
# } local_variable_table[local_variable_table_length];
# }
elif (attr_name == "LocalVariableTable"):
yield UInt16(self, "local_variable_table_length")
if self["local_variable_table_length"].value > 0:
yield FieldArray(self, "local_variable_table",
LocalVariableTableEntry,
self["local_variable_table_length"].value)
assert self["attribute_length"].value == \
2 + self["local_variable_table_length"].value * 10
# Deprecated_attribute {
# u2 attribute_name_index;
# u4 attribute_length;
# }
elif (attr_name == "Deprecated"):
assert self["attribute_length"].value == 0
# Unkown attribute type. They are allowed by the JVM specs, but we
# can't say much about them...
elif self["attribute_length"].value > 0:
yield RawBytes(self, "info", self["attribute_length"].value)
class ExceptionTableEntry(FieldSet):
static_size = 48 + CPIndex.static_size
def createFields(self):
yield textHandler(UInt16(self, "start_pc"), hexadecimal)
yield textHandler(UInt16(self, "end_pc"), hexadecimal)
yield textHandler(UInt16(self, "handler_pc"), hexadecimal)
yield CPIndex(self, "catch_type", target_types="Class")
class InnerClassesEntry(StaticFieldSet):
format = (
(CPIndex, "inner_class_info_index",
{"target_types": "Class", "allow_zero": True}),
(CPIndex, "outer_class_info_index",
{"target_types": "Class", "allow_zero": True}),
(CPIndex, "inner_name_index",
{"target_types": "Utf8", "allow_zero": True}),
# Inner class access flags (16 bits)
(NullBits, "reserved[]", 5),
(Bit, "abstract"),
(Bit, "interface"),
(NullBits, "reserved[]", 3),
(Bit, "super"),
(Bit, "final"),
(Bit, "static"),
(Bit, "protected"),
(Bit, "private"),
(Bit, "public"),
)
class LineNumberTableEntry(StaticFieldSet):
format = (
(UInt16, "start_pc"),
(UInt16, "line_number")
)
class LocalVariableTableEntry(StaticFieldSet):
format = (
(UInt16, "start_pc"),
(UInt16, "length"),
(CPIndex, "name_index", {"target_types": "Utf8"}),
(CPIndex, "descriptor_index", {"target_types": "Utf8",
"target_text_handler": parse_field_descriptor}),
(UInt16, "index")
)
###############################################################################
# ClassFile {
# u4 magic;
# u2 minor_version;
# u2 major_version;
# u2 constant_pool_count;
# cp_info constant_pool[constant_pool_count-1];
# u2 access_flags;
# u2 this_class;
# u2 super_class;
# u2 interfaces_count;
# u2 interfaces[interfaces_count];
# u2 fields_count;
# field_info fields[fields_count];
# u2 methods_count;
# method_info methods[methods_count];
# u2 attributes_count;
# attribute_info attributes[attributes_count];
# }
class JavaCompiledClassFile(Parser):
"""
Root of the .class parser.
"""
endian = BIG_ENDIAN
PARSER_TAGS = {
"id": "java_class",
"category": "program",
"file_ext": ("class",),
"mime": (u"application/java-vm",),
"min_size": (32 + 3*16),
"description": "Compiled Java class"
}
MAGIC = 0xCAFEBABE
KNOWN_VERSIONS = {
"45.3": "JDK 1.1",
"46.0": "JDK 1.2",
"47.0": "JDK 1.3",
"48.0": "JDK 1.4",
"49.0": "JDK 1.5",
"50.0": "JDK 1.6"
}
# Constants go here since they will probably depend on the detected format
# version at some point. Though, if they happen to be really backward
# compatible, they may become module globals.
CONSTANT_TYPES = {
1: "Utf8",
3: "Integer",
4: "Float",
5: "Long",
6: "Double",
7: "Class",
8: "String",
9: "Fieldref",
10: "Methodref",
11: "InterfaceMethodref",
12: "NameAndType"
}
def validate(self):
if self["magic"].value != self.MAGIC:
return "Wrong magic signature!"
version = "%d.%d" % (self["major_version"].value, self["minor_version"].value)
if version not in self.KNOWN_VERSIONS:
return "Unknown version (%s)" % version
return True
def createDescription(self):
version = "%d.%d" % (self["major_version"].value, self["minor_version"].value)
if version in self.KNOWN_VERSIONS:
return "Compiled Java class, %s" % self.KNOWN_VERSIONS[version]
else:
return "Compiled Java class, version %s" % version
def createFields(self):
yield textHandler(UInt32(self, "magic", "Java compiled class signature"),
hexadecimal)
yield UInt16(self, "minor_version", "Class format minor version")
yield UInt16(self, "major_version", "Class format major version")
yield UInt16(self, "constant_pool_count", "Size of the constant pool")
if self["constant_pool_count"].value > 1:
#yield FieldArray(self, "constant_pool", CPInfo,
# (self["constant_pool_count"].value - 1), first_index=1)
# Mmmh... can't use FieldArray actually, because ConstantPool
# requires some specific hacks (skipping some indexes after Long
# and Double entries).
yield ConstantPool(self, "constant_pool",
(self["constant_pool_count"].value))
# Inner class access flags (16 bits)
yield NullBits(self, "reserved[]", 5)
yield Bit(self, "abstract")
yield Bit(self, "interface")
yield NullBits(self, "reserved[]", 3)
yield Bit(self, "super")
yield Bit(self, "final")
yield Bit(self, "static")
yield Bit(self, "protected")
yield Bit(self, "private")
yield Bit(self, "public")
yield CPIndex(self, "this_class", "Class name", target_types="Class")
yield CPIndex(self, "super_class", "Super class name", target_types="Class")
yield UInt16(self, "interfaces_count", "Number of implemented interfaces")
if self["interfaces_count"].value > 0:
yield FieldArray(self, "interfaces", CPIndex,
self["interfaces_count"].value, target_types="Class")
yield UInt16(self, "fields_count", "Number of fields")
if self["fields_count"].value > 0:
yield FieldArray(self, "fields", FieldInfo,
self["fields_count"].value)
yield UInt16(self, "methods_count", "Number of methods")
if self["methods_count"].value > 0:
yield FieldArray(self, "methods", MethodInfo,
self["methods_count"].value)
yield UInt16(self, "attributes_count", "Number of attributes")
if self["attributes_count"].value > 0:
yield FieldArray(self, "attributes", AttributeInfo,
self["attributes_count"].value)
# vim: set expandtab tabstop=4 shiftwidth=4 autoindent smartindent:
| 57,999 | Python | .py | 1,020 | 49.896078 | 351 | 0.639083 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,538 | prc.py | midgetspy_Sick-Beard/lib/hachoir_parser/program/prc.py | """
PRC (Palm resource) parser.
Author: Sebastien Ponce
Creation date: 29 october 2008
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet,
UInt16, UInt32, TimestampMac32,
String, RawBytes)
from lib.hachoir_core.endian import BIG_ENDIAN
class PRCHeader(FieldSet):
static_size = 78*8
def createFields(self):
yield String(self, "name", 32, "Name")
yield UInt16(self, "flags", "Flags")
yield UInt16(self, "version", "Version")
yield TimestampMac32(self, "create_time", "Creation time")
yield TimestampMac32(self, "mod_time", "Modification time")
yield TimestampMac32(self, "backup_time", "Backup time")
yield UInt32(self, "mod_num", "mod num")
yield UInt32(self, "app_info", "app info")
yield UInt32(self, "sort_info", "sort info")
yield UInt32(self, "type", "type")
yield UInt32(self, "id", "id")
yield UInt32(self, "unique_id_seed", "unique_id_seed")
yield UInt32(self, "next_record_list", "next_record_list")
yield UInt16(self, "num_records", "num_records")
class ResourceHeader(FieldSet):
static_size = 10*8
def createFields(self):
yield String(self, "name", 4, "Name of the resource")
yield UInt16(self, "flags", "ID number of the resource")
yield UInt32(self, "offset", "Pointer to the resource data")
def createDescription(self):
return "Resource Header (%s)" % self["name"]
class PRCFile(Parser):
PARSER_TAGS = {
"id": "prc",
"category": "program",
"file_ext": ("prc", ""),
"min_size": ResourceHeader.static_size, # At least one program header
"mime": (
u"application/x-pilot-prc",
u"application/x-palmpilot"),
"description": "Palm Resource File"
}
endian = BIG_ENDIAN
def validate(self):
# FIXME: Implement the validation function!
return False
def createFields(self):
# Parse header and program headers
yield PRCHeader(self, "header", "Header")
lens = []
firstOne = True
poff = 0
for index in xrange(self["header/num_records"].value):
r = ResourceHeader(self, "res_header[]")
if firstOne:
firstOne = False
else:
lens.append(r["offset"].value - poff)
poff = r["offset"].value
yield r
lens.append(self.size/8 - poff)
yield UInt16(self, "placeholder", "Place holder bytes")
for i in range(len(lens)):
yield RawBytes(self, "res[]", lens[i], '"'+self["res_header["+str(i)+"]/name"].value+"\" Resource")
def createDescription(self):
return "Palm Resource file"
| 2,778 | Python | .py | 70 | 31.828571 | 111 | 0.613501 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,539 | exe.py | midgetspy_Sick-Beard/lib/hachoir_parser/program/exe.py | """
Microsoft Windows Portable Executable (PE) file parser.
Informations:
- Microsoft Portable Executable and Common Object File Format Specification:
http://www.microsoft.com/whdc/system/platform/firmware/PECOFF.mspx
Author: Victor Stinner
Creation date: 2006-08-13
"""
from lib.hachoir_parser import HachoirParser
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.field import (FieldSet, RootSeekableFieldSet,
UInt16, UInt32, String,
RawBytes, PaddingBytes)
from lib.hachoir_core.text_handler import textHandler, hexadecimal
from lib.hachoir_parser.program.exe_ne import NE_Header
from lib.hachoir_parser.program.exe_pe import PE_Header, PE_OptHeader, SectionHeader
from lib.hachoir_parser.program.exe_res import PE_Resource, NE_VersionInfoNode
MAX_NB_SECTION = 50
class MSDosHeader(FieldSet):
static_size = 64*8
def createFields(self):
yield String(self, "header", 2, "File header (MZ)", charset="ASCII")
yield UInt16(self, "size_mod_512", "File size in bytes modulo 512")
yield UInt16(self, "size_div_512", "File size in bytes divide by 512")
yield UInt16(self, "reloc_entries", "Number of relocation entries")
yield UInt16(self, "code_offset", "Offset to the code in the file (divided by 16)")
yield UInt16(self, "needed_memory", "Memory needed to run (divided by 16)")
yield UInt16(self, "max_memory", "Maximum memory needed to run (divided by 16)")
yield textHandler(UInt32(self, "init_ss_sp", "Initial value of SP:SS registers"), hexadecimal)
yield UInt16(self, "checksum", "Checksum")
yield textHandler(UInt32(self, "init_cs_ip", "Initial value of CS:IP registers"), hexadecimal)
yield UInt16(self, "reloc_offset", "Offset in file to relocation table")
yield UInt16(self, "overlay_number", "Overlay number")
yield PaddingBytes(self, "reserved[]", 8, "Reserved")
yield UInt16(self, "oem_id", "OEM id")
yield UInt16(self, "oem_info", "OEM info")
yield PaddingBytes(self, "reserved[]", 20, "Reserved")
yield UInt32(self, "next_offset", "Offset to next header (PE or NE)")
def isValid(self):
if 512 <= self["size_mod_512"].value:
return "Invalid field 'size_mod_512' value"
if self["code_offset"].value < 4:
return "Invalid code offset"
looks_pe = self["size_div_512"].value < 4
if looks_pe:
if self["checksum"].value != 0:
return "Invalid value of checksum"
if not (80 <= self["next_offset"].value <= 1024):
return "Invalid value of next_offset"
return ""
class ExeFile(HachoirParser, RootSeekableFieldSet):
PARSER_TAGS = {
"id": "exe",
"category": "program",
"file_ext": ("exe", "dll", "ocx"),
"mime": (u"application/x-dosexec",),
"min_size": 64*8,
#"magic": (("MZ", 0),),
"magic_regex": (("MZ.[\0\1].{4}[^\0\1\2\3]", 0),),
"description": "Microsoft Windows Portable Executable"
}
endian = LITTLE_ENDIAN
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **args)
def validate(self):
if self.stream.readBytes(0, 2) != 'MZ':
return "Wrong header"
err = self["msdos"].isValid()
if err:
return "Invalid MSDOS header: "+err
if self.isPE():
if MAX_NB_SECTION < self["pe_header/nb_section"].value:
return "Invalid number of section (%s)" \
% self["pe_header/nb_section"].value
return True
def createFields(self):
yield MSDosHeader(self, "msdos", "MS-DOS program header")
if self.isPE() or self.isNE():
offset = self["msdos/next_offset"].value
self.seekByte(offset, relative=False)
if self.isPE():
for field in self.parsePortableExecutable():
yield field
elif self.isNE():
for field in self.parseNE_Executable():
yield field
else:
offset = self["msdos/code_offset"].value * 16
self.seekByte(offset, relative=False)
def parseNE_Executable(self):
yield NE_Header(self, "ne_header")
# FIXME: Compute resource offset instead of using searchBytes()
# Ugly hack to get find version info structure
start = self.current_size
addr = self.stream.searchBytes('VS_VERSION_INFO', start)
if addr:
self.seekBit(addr-32)
yield NE_VersionInfoNode(self, "info")
def parsePortableExecutable(self):
# Read PE header
yield PE_Header(self, "pe_header")
# Read PE optional header
size = self["pe_header/opt_hdr_size"].value
rsrc_rva = None
if size:
yield PE_OptHeader(self, "pe_opt_header", size=size*8)
if "pe_opt_header/resource/rva" in self:
rsrc_rva = self["pe_opt_header/resource/rva"].value
# Read section headers
sections = []
for index in xrange(self["pe_header/nb_section"].value):
section = SectionHeader(self, "section_hdr[]")
yield section
if section["phys_size"].value:
sections.append(section)
# Read sections
sections.sort(key=lambda field: field["phys_off"].value)
for section in sections:
self.seekByte(section["phys_off"].value)
size = section["phys_size"].value
if size:
name = section.createSectionName()
if rsrc_rva is not None and section["rva"].value == rsrc_rva:
yield PE_Resource(self, name, section, size=size*8)
else:
yield RawBytes(self, name, size)
def isPE(self):
if not hasattr(self, "_is_pe"):
self._is_pe = False
offset = self["msdos/next_offset"].value * 8
if 2*8 <= offset \
and (offset+PE_Header.static_size) <= self.size \
and self.stream.readBytes(offset, 4) == 'PE\0\0':
self._is_pe = True
return self._is_pe
def isNE(self):
if not hasattr(self, "_is_ne"):
self._is_ne = False
offset = self["msdos/next_offset"].value * 8
if 64*8 <= offset \
and (offset+NE_Header.static_size) <= self.size \
and self.stream.readBytes(offset, 2) == 'NE':
self._is_ne = True
return self._is_ne
def getResource(self):
# MS-DOS program: no resource
if not self.isPE():
return None
# Check if PE has resource or not
if "pe_opt_header/resource/size" in self:
if not self["pe_opt_header/resource/size"].value:
return None
if "section_rsrc" in self:
return self["section_rsrc"]
return None
def createDescription(self):
if self.isPE():
if self["pe_header/is_dll"].value:
text = u"Microsoft Windows DLL"
else:
text = u"Microsoft Windows Portable Executable"
info = [self["pe_header/cpu"].display]
if "pe_opt_header" in self:
hdr = self["pe_opt_header"]
info.append(hdr["subsystem"].display)
if self["pe_header/is_stripped"].value:
info.append(u"stripped")
return u"%s: %s" % (text, ", ".join(info))
elif self.isNE():
return u"New-style Executable (NE) for Microsoft MS Windows 3.x"
else:
return u"MS-DOS executable"
def createContentSize(self):
if self.isPE():
size = 0
for index in xrange(self["pe_header/nb_section"].value):
section = self["section_hdr[%u]" % index]
section_size = section["phys_size"].value
if not section_size:
continue
section_size = (section_size + section["phys_off"].value) * 8
if size:
size = max(size, section_size)
else:
size = section_size
if size:
return size
else:
return None
elif self.isNE():
# TODO: Guess NE size
return None
else:
size = self["msdos/size_mod_512"].value + (self["msdos/size_div_512"].value-1) * 512
if size < 0:
return None
return size*8
| 8,701 | Python | .py | 198 | 33.388889 | 102 | 0.583579 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,540 | __init__.py | midgetspy_Sick-Beard/lib/hachoir_parser/program/__init__.py | from lib.hachoir_parser.program.elf import ElfFile
from lib.hachoir_parser.program.exe import ExeFile
from lib.hachoir_parser.program.python import PythonCompiledFile
from lib.hachoir_parser.program.java import JavaCompiledClassFile
from lib.hachoir_parser.program.prc import PRCFile
| 285 | Python | .py | 5 | 55.8 | 65 | 0.874552 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,541 | exe_res.py | midgetspy_Sick-Beard/lib/hachoir_parser/program/exe_res.py | """
Parser for resource of Microsoft Windows Portable Executable (PE).
Documentation:
- Wine project
VS_FIXEDFILEINFO structure, file include/winver.h
Author: Victor Stinner
Creation date: 2007-01-19
"""
from lib.hachoir_core.field import (FieldSet, ParserError, Enum,
Bit, Bits, SeekableFieldSet,
UInt16, UInt32, TimestampUnix32,
RawBytes, PaddingBytes, NullBytes, NullBits,
CString, String)
from lib.hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal
from lib.hachoir_core.tools import createDict, paddingSize, alignValue, makePrintable
from lib.hachoir_core.error import HACHOIR_ERRORS
from lib.hachoir_parser.common.win32 import BitmapInfoHeader
MAX_DEPTH = 5
MAX_INDEX_PER_HEADER = 300
MAX_NAME_PER_HEADER = MAX_INDEX_PER_HEADER
class Version(FieldSet):
static_size = 32
def createFields(self):
yield textHandler(UInt16(self, "minor", "Minor version number"), hexadecimal)
yield textHandler(UInt16(self, "major", "Major version number"), hexadecimal)
def createValue(self):
return self["major"].value + float(self["minor"].value) / 10000
MAJOR_OS_NAME = {
1: "DOS",
2: "OS/2 16-bit",
3: "OS/2 32-bit",
4: "Windows NT",
}
MINOR_OS_BASE = 0
MINOR_OS_NAME = {
0: "Base",
1: "Windows 16-bit",
2: "Presentation Manager 16-bit",
3: "Presentation Manager 32-bit",
4: "Windows 32-bit",
}
FILETYPE_DRIVER = 3
FILETYPE_FONT = 4
FILETYPE_NAME = {
1: "Application",
2: "DLL",
3: "Driver",
4: "Font",
5: "VXD",
7: "Static library",
}
DRIVER_SUBTYPE_NAME = {
1: "Printer",
2: "Keyboard",
3: "Language",
4: "Display",
5: "Mouse",
6: "Network",
7: "System",
8: "Installable",
9: "Sound",
10: "Communications",
}
FONT_SUBTYPE_NAME = {
1: "Raster",
2: "Vector",
3: "TrueType",
}
class VersionInfoBinary(FieldSet):
def createFields(self):
yield textHandler(UInt32(self, "magic", "File information magic (0xFEEF04BD)"), hexadecimal)
if self["magic"].value != 0xFEEF04BD:
raise ParserError("EXE resource: invalid file info magic")
yield Version(self, "struct_ver", "Structure version (1.0)")
yield Version(self, "file_ver_ms", "File version MS")
yield Version(self, "file_ver_ls", "File version LS")
yield Version(self, "product_ver_ms", "Product version MS")
yield Version(self, "product_ver_ls", "Product version LS")
yield textHandler(UInt32(self, "file_flags_mask"), hexadecimal)
yield Bit(self, "debug")
yield Bit(self, "prerelease")
yield Bit(self, "patched")
yield Bit(self, "private_build")
yield Bit(self, "info_inferred")
yield Bit(self, "special_build")
yield NullBits(self, "reserved", 26)
yield Enum(textHandler(UInt16(self, "file_os_major"), hexadecimal), MAJOR_OS_NAME)
yield Enum(textHandler(UInt16(self, "file_os_minor"), hexadecimal), MINOR_OS_NAME)
yield Enum(textHandler(UInt32(self, "file_type"), hexadecimal), FILETYPE_NAME)
field = textHandler(UInt32(self, "file_subfile"), hexadecimal)
if field.value == FILETYPE_DRIVER:
field = Enum(field, DRIVER_SUBTYPE_NAME)
elif field.value == FILETYPE_FONT:
field = Enum(field, FONT_SUBTYPE_NAME)
yield field
yield TimestampUnix32(self, "date_ms")
yield TimestampUnix32(self, "date_ls")
class VersionInfoNode(FieldSet):
TYPE_STRING = 1
TYPE_NAME = {
0: "binary",
1: "string",
}
def __init__(self, parent, name, is_32bit=True):
FieldSet.__init__(self, parent, name)
self._size = alignValue(self["size"].value, 4) * 8
self.is_32bit = is_32bit
def createFields(self):
yield UInt16(self, "size", "Node size (in bytes)")
yield UInt16(self, "data_size")
yield Enum(UInt16(self, "type"), self.TYPE_NAME)
yield CString(self, "name", charset="UTF-16-LE")
size = paddingSize(self.current_size//8, 4)
if size:
yield NullBytes(self, "padding[]", size)
size = self["data_size"].value
if size:
if self["type"].value == self.TYPE_STRING:
if self.is_32bit:
size *= 2
yield String(self, "value", size, charset="UTF-16-LE", truncate="\0")
elif self["name"].value == "VS_VERSION_INFO":
yield VersionInfoBinary(self, "value", size=size*8)
if self["value/file_flags_mask"].value == 0:
self.is_32bit = False
else:
yield RawBytes(self, "value", size)
while 12 <= (self.size - self.current_size) // 8:
yield VersionInfoNode(self, "node[]", self.is_32bit)
size = (self.size - self.current_size) // 8
if size:
yield NullBytes(self, "padding[]", size)
def createDescription(self):
text = "Version info node: %s" % self["name"].value
if self["type"].value == self.TYPE_STRING and "value" in self:
text += "=%s" % self["value"].value
return text
def parseVersionInfo(parent):
yield VersionInfoNode(parent, "node[]")
def parseIcon(parent):
yield BitmapInfoHeader(parent, "bmp_header")
size = (parent.size - parent.current_size) // 8
if size:
yield RawBytes(parent, "raw", size)
class WindowsString(FieldSet):
def createFields(self):
yield UInt16(self, "length", "Number of 16-bit characters")
size = self["length"].value * 2
if size:
yield String(self, "text", size, charset="UTF-16-LE")
def createValue(self):
if "text" in self:
return self["text"].value
else:
return u""
def createDisplay(self):
return makePrintable(self.value, "UTF-8", to_unicode=True, quote='"')
def parseStringTable(parent):
while not parent.eof:
yield WindowsString(parent, "string[]")
RESOURCE_TYPE = {
1: ("cursor[]", "Cursor", None),
2: ("bitmap[]", "Bitmap", None),
3: ("icon[]", "Icon", parseIcon),
4: ("menu[]", "Menu", None),
5: ("dialog[]", "Dialog", None),
6: ("string_table[]", "String table", parseStringTable),
7: ("font_dir[]", "Font directory", None),
8: ("font[]", "Font", None),
9: ("accelerators[]", "Accelerators", None),
10: ("raw_res[]", "Unformatted resource data", None),
11: ("message_table[]", "Message table", None),
12: ("group_cursor[]", "Group cursor", None),
14: ("group_icon[]", "Group icon", None),
16: ("version_info", "Version information", parseVersionInfo),
}
class Entry(FieldSet):
static_size = 16*8
def __init__(self, parent, name, inode=None):
FieldSet.__init__(self, parent, name)
self.inode = inode
def createFields(self):
yield textHandler(UInt32(self, "rva"), hexadecimal)
yield filesizeHandler(UInt32(self, "size"))
yield UInt32(self, "codepage")
yield NullBytes(self, "reserved", 4)
def createDescription(self):
return "Entry #%u: offset=%s size=%s" % (
self.inode["offset"].value, self["rva"].display, self["size"].display)
class NameOffset(FieldSet):
def createFields(self):
yield UInt32(self, "name")
yield Bits(self, "offset", 31)
yield Bit(self, "is_name")
class IndexOffset(FieldSet):
TYPE_DESC = createDict(RESOURCE_TYPE, 1)
def __init__(self, parent, name, res_type=None):
FieldSet.__init__(self, parent, name)
self.res_type = res_type
def createFields(self):
yield Enum(UInt32(self, "type"), self.TYPE_DESC)
yield Bits(self, "offset", 31)
yield Bit(self, "is_subdir")
def createDescription(self):
if self["is_subdir"].value:
return "Sub-directory: %s at %s" % (self["type"].display, self["offset"].value)
else:
return "Index: ID %s at %s" % (self["type"].display, self["offset"].value)
class ResourceContent(FieldSet):
def __init__(self, parent, name, entry, size=None):
FieldSet.__init__(self, parent, name, size=entry["size"].value*8)
self.entry = entry
res_type = self.getResType()
if res_type in RESOURCE_TYPE:
self._name, description, self._parser = RESOURCE_TYPE[res_type]
else:
self._parser = None
def getResID(self):
return self.entry.inode["offset"].value
def getResType(self):
return self.entry.inode.res_type
def createFields(self):
if self._parser:
for field in self._parser(self):
yield field
else:
yield RawBytes(self, "content", self.size//8)
def createDescription(self):
return "Resource #%u content: type=%s" % (
self.getResID(), self.getResType())
class Header(FieldSet):
static_size = 16*8
def createFields(self):
yield NullBytes(self, "options", 4)
yield TimestampUnix32(self, "creation_date")
yield UInt16(self, "maj_ver", "Major version")
yield UInt16(self, "min_ver", "Minor version")
yield UInt16(self, "nb_name", "Number of named entries")
yield UInt16(self, "nb_index", "Number of indexed entries")
def createDescription(self):
text = "Resource header"
info = []
if self["nb_name"].value:
info.append("%u name" % self["nb_name"].value)
if self["nb_index"].value:
info.append("%u index" % self["nb_index"].value)
if self["creation_date"].value:
info.append(self["creation_date"].display)
if info:
return "%s: %s" % (text, ", ".join(info))
else:
return text
class Name(FieldSet):
def createFields(self):
yield UInt16(self, "length")
size = min(self["length"].value, 255)
if size:
yield String(self, "name", size, charset="UTF-16LE")
class Directory(FieldSet):
def __init__(self, parent, name, res_type=None):
FieldSet.__init__(self, parent, name)
nb_entries = self["header/nb_name"].value + self["header/nb_index"].value
self._size = Header.static_size + nb_entries * 64
self.res_type = res_type
def createFields(self):
yield Header(self, "header")
if MAX_NAME_PER_HEADER < self["header/nb_name"].value:
raise ParserError("EXE resource: invalid number of name (%s)"
% self["header/nb_name"].value)
if MAX_INDEX_PER_HEADER < self["header/nb_index"].value:
raise ParserError("EXE resource: invalid number of index (%s)"
% self["header/nb_index"].value)
hdr = self["header"]
for index in xrange(hdr["nb_name"].value):
yield NameOffset(self, "name[]")
for index in xrange(hdr["nb_index"].value):
yield IndexOffset(self, "index[]", self.res_type)
def createDescription(self):
return self["header"].description
class PE_Resource(SeekableFieldSet):
def __init__(self, parent, name, section, size):
SeekableFieldSet.__init__(self, parent, name, size=size)
self.section = section
def parseSub(self, directory, name, depth):
indexes = []
for index in directory.array("index"):
if index["is_subdir"].value:
indexes.append(index)
#indexes.sort(key=lambda index: index["offset"].value)
for index in indexes:
self.seekByte(index["offset"].value)
if depth == 1:
res_type = index["type"].value
else:
res_type = directory.res_type
yield Directory(self, name, res_type)
def createFields(self):
# Parse directories
depth = 0
subdir = Directory(self, "root")
yield subdir
subdirs = [subdir]
alldirs = [subdir]
while subdirs:
depth += 1
if MAX_DEPTH < depth:
self.error("EXE resource: depth too high (%s), stop parsing directories" % depth)
break
newsubdirs = []
for index, subdir in enumerate(subdirs):
name = "directory[%u][%u][]" % (depth, index)
try:
for field in self.parseSub(subdir, name, depth):
if field.__class__ == Directory:
newsubdirs.append(field)
yield field
except HACHOIR_ERRORS, err:
self.error("Unable to create directory %s: %s" % (name, err))
subdirs = newsubdirs
alldirs.extend(subdirs)
# Create resource list
resources = []
for directory in alldirs:
for index in directory.array("index"):
if not index["is_subdir"].value:
resources.append(index)
# Parse entries
entries = []
for resource in resources:
offset = resource["offset"].value
if offset is None:
continue
self.seekByte(offset)
entry = Entry(self, "entry[]", inode=resource)
yield entry
entries.append(entry)
entries.sort(key=lambda entry: entry["rva"].value)
# Parse resource content
for entry in entries:
try:
offset = self.section.rva2file(entry["rva"].value)
padding = self.seekByte(offset, relative=False)
if padding:
yield padding
yield ResourceContent(self, "content[]", entry)
except HACHOIR_ERRORS, err:
self.warning("Error when parsing entry %s: %s" % (entry.path, err))
size = (self.size - self.current_size) // 8
if size:
yield PaddingBytes(self, "padding_end", size)
class NE_VersionInfoNode(FieldSet):
TYPE_STRING = 1
TYPE_NAME = {
0: "binary",
1: "string",
}
def __init__(self, parent, name):
FieldSet.__init__(self, parent, name)
self._size = alignValue(self["size"].value, 4) * 8
def createFields(self):
yield UInt16(self, "size", "Node size (in bytes)")
yield UInt16(self, "data_size")
yield CString(self, "name", charset="ISO-8859-1")
size = paddingSize(self.current_size//8, 4)
if size:
yield NullBytes(self, "padding[]", size)
size = self["data_size"].value
if size:
if self["name"].value == "VS_VERSION_INFO":
yield VersionInfoBinary(self, "value", size=size*8)
else:
yield String(self, "value", size, charset="ISO-8859-1")
while 12 <= (self.size - self.current_size) // 8:
yield NE_VersionInfoNode(self, "node[]")
size = (self.size - self.current_size) // 8
if size:
yield NullBytes(self, "padding[]", size)
def createDescription(self):
text = "Version info node: %s" % self["name"].value
# if self["type"].value == self.TYPE_STRING and "value" in self:
# text += "=%s" % self["value"].value
return text
| 15,312 | Python | .py | 382 | 31.455497 | 100 | 0.593529 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,542 | exe_pe.py | midgetspy_Sick-Beard/lib/hachoir_parser/program/exe_pe.py | from lib.hachoir_core.field import (FieldSet, ParserError,
Bit, UInt8, UInt16, UInt32, TimestampUnix32,
Bytes, String, Enum,
PaddingBytes, PaddingBits, NullBytes, NullBits)
from lib.hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler
from lib.hachoir_core.error import HACHOIR_ERRORS
class SectionHeader(FieldSet):
static_size = 40 * 8
def createFields(self):
yield String(self, "name", 8, charset="ASCII", strip="\0 ")
yield filesizeHandler(UInt32(self, "mem_size", "Size in memory"))
yield textHandler(UInt32(self, "rva", "RVA (location) in memory"), hexadecimal)
yield filesizeHandler(UInt32(self, "phys_size", "Physical size (on disk)"))
yield filesizeHandler(UInt32(self, "phys_off", "Physical location (on disk)"))
yield PaddingBytes(self, "reserved", 12)
# 0x0000000#
yield NullBits(self, "reserved[]", 4)
# 0x000000#0
yield NullBits(self, "reserved[]", 1)
yield Bit(self, "has_code", "Contains code")
yield Bit(self, "has_init_data", "Contains initialized data")
yield Bit(self, "has_uninit_data", "Contains uninitialized data")
# 0x00000#00
yield NullBits(self, "reserved[]", 1)
yield Bit(self, "has_comment", "Contains comments?")
yield NullBits(self, "reserved[]", 1)
yield Bit(self, "remove", "Contents will not become part of image")
# 0x0000#000
yield Bit(self, "has_comdata", "Contains comdat?")
yield NullBits(self, "reserved[]", 1)
yield Bit(self, "no_defer_spec_exc", "Reset speculative exceptions handling bits in the TLB entries")
yield Bit(self, "gp_rel", "Content can be accessed relative to GP")
# 0x000#0000
yield NullBits(self, "reserved[]", 4)
# 0x00#00000
yield NullBits(self, "reserved[]", 4)
# 0x0#000000
yield Bit(self, "ext_reloc", "Contains extended relocations?")
yield Bit(self, "discarded", "Can be discarded?")
yield Bit(self, "is_not_cached", "Is not cachable?")
yield Bit(self, "is_not_paged", "Is not pageable?")
# 0x#0000000
yield Bit(self, "is_shareable", "Is shareable?")
yield Bit(self, "is_executable", "Is executable?")
yield Bit(self, "is_readable", "Is readable?")
yield Bit(self, "is_writable", "Is writable?")
def rva2file(self, rva):
return self["phys_off"].value + (rva - self["rva"].value)
def createDescription(self):
rva = self["rva"].value
size = self["mem_size"].value
info = [
"rva=0x%08x..0x%08x" % (rva, rva+size),
"size=%s" % self["mem_size"].display,
]
if self["is_executable"].value:
info.append("exec")
if self["is_readable"].value:
info.append("read")
if self["is_writable"].value:
info.append("write")
return 'Section "%s": %s' % (self["name"].value, ", ".join(info))
def createSectionName(self):
try:
name = str(self["name"].value.strip("."))
if name:
return "section_%s" % name
except HACHOIR_ERRORS, err:
self.warning(unicode(err))
return "section[]"
class DataDirectory(FieldSet):
def createFields(self):
yield textHandler(UInt32(self, "rva", "Virtual address"), hexadecimal)
yield filesizeHandler(UInt32(self, "size"))
def createDescription(self):
if self["size"].value:
return "Directory at %s (%s)" % (
self["rva"].display, self["size"].display)
else:
return "(empty directory)"
class PE_Header(FieldSet):
static_size = 24*8
cpu_name = {
0x0184: u"Alpha AXP",
0x01c0: u"ARM",
0x014C: u"Intel 80386",
0x014D: u"Intel 80486",
0x014E: u"Intel Pentium",
0x0200: u"Intel IA64",
0x0268: u"Motorola 68000",
0x0266: u"MIPS",
0x0284: u"Alpha AXP 64 bits",
0x0366: u"MIPS with FPU",
0x0466: u"MIPS16 with FPU",
0x01f0: u"PowerPC little endian",
0x0162: u"R3000",
0x0166: u"MIPS little endian (R4000)",
0x0168: u"R10000",
0x01a2: u"Hitachi SH3",
0x01a6: u"Hitachi SH4",
0x0160: u"R3000 (MIPS), big endian",
0x0162: u"R3000 (MIPS), little endian",
0x0166: u"R4000 (MIPS), little endian",
0x0168: u"R10000 (MIPS), little endian",
0x0184: u"DEC Alpha AXP",
0x01F0: u"IBM Power PC, little endian",
}
def createFields(self):
yield Bytes(self, "header", 4, r"PE header signature (PE\0\0)")
if self["header"].value != "PE\0\0":
raise ParserError("Invalid PE header signature")
yield Enum(UInt16(self, "cpu", "CPU type"), self.cpu_name)
yield UInt16(self, "nb_section", "Number of sections")
yield TimestampUnix32(self, "creation_date", "Creation date")
yield UInt32(self, "ptr_to_sym", "Pointer to symbol table")
yield UInt32(self, "nb_symbols", "Number of symbols")
yield UInt16(self, "opt_hdr_size", "Optional header size")
yield Bit(self, "reloc_stripped", "If true, don't contain base relocations.")
yield Bit(self, "exec_image", "Executable image?")
yield Bit(self, "line_nb_stripped", "COFF line numbers stripped?")
yield Bit(self, "local_sym_stripped", "COFF symbol table entries stripped?")
yield Bit(self, "aggr_ws", "Aggressively trim working set")
yield Bit(self, "large_addr", "Application can handle addresses greater than 2 GB")
yield NullBits(self, "reserved", 1)
yield Bit(self, "reverse_lo", "Little endian: LSB precedes MSB in memory")
yield Bit(self, "32bit", "Machine based on 32-bit-word architecture")
yield Bit(self, "is_stripped", "Debugging information removed?")
yield Bit(self, "swap", "If image is on removable media, copy and run from swap file")
yield PaddingBits(self, "reserved2", 1)
yield Bit(self, "is_system", "It's a system file")
yield Bit(self, "is_dll", "It's a dynamic-link library (DLL)")
yield Bit(self, "up", "File should be run only on a UP machine")
yield Bit(self, "reverse_hi", "Big endian: MSB precedes LSB in memory")
class PE_OptHeader(FieldSet):
SUBSYSTEM_NAME = {
1: u"Native",
2: u"Windows GUI",
3: u"Windows CUI",
5: u"OS/2 CUI",
7: u"POSIX CUI",
8: u"Native Windows",
9: u"Windows CE GUI",
10: u"EFI application",
11: u"EFI boot service driver",
12: u"EFI runtime driver",
13: u"EFI ROM",
14: u"XBOX",
16: u"Windows boot application",
}
DIRECTORY_NAME = {
0: "export",
1: "import",
2: "resource",
3: "exception",
4: "certificate",
5: "relocation",
6: "debug",
7: "description",
8: "global_ptr",
9: "tls", # Thread local storage
10: "load_config",
11: "bound_import",
12: "import_address",
}
def createFields(self):
yield UInt16(self, "signature", "PE optional header signature (0x010b)")
# TODO: Support PE32+ (signature=0x020b)
if self["signature"].value != 0x010b:
raise ParserError("Invalid PE optional header signature")
yield UInt8(self, "maj_lnk_ver", "Major linker version")
yield UInt8(self, "min_lnk_ver", "Minor linker version")
yield filesizeHandler(UInt32(self, "size_code", "Size of code"))
yield filesizeHandler(UInt32(self, "size_init_data", "Size of initialized data"))
yield filesizeHandler(UInt32(self, "size_uninit_data", "Size of uninitialized data"))
yield textHandler(UInt32(self, "entry_point", "Address (RVA) of the code entry point"), hexadecimal)
yield textHandler(UInt32(self, "base_code", "Base (RVA) of code"), hexadecimal)
yield textHandler(UInt32(self, "base_data", "Base (RVA) of data"), hexadecimal)
yield textHandler(UInt32(self, "image_base", "Image base (RVA)"), hexadecimal)
yield filesizeHandler(UInt32(self, "sect_align", "Section alignment"))
yield filesizeHandler(UInt32(self, "file_align", "File alignment"))
yield UInt16(self, "maj_os_ver", "Major OS version")
yield UInt16(self, "min_os_ver", "Minor OS version")
yield UInt16(self, "maj_img_ver", "Major image version")
yield UInt16(self, "min_img_ver", "Minor image version")
yield UInt16(self, "maj_subsys_ver", "Major subsystem version")
yield UInt16(self, "min_subsys_ver", "Minor subsystem version")
yield NullBytes(self, "reserved", 4)
yield filesizeHandler(UInt32(self, "size_img", "Size of image"))
yield filesizeHandler(UInt32(self, "size_hdr", "Size of headers"))
yield textHandler(UInt32(self, "checksum"), hexadecimal)
yield Enum(UInt16(self, "subsystem"), self.SUBSYSTEM_NAME)
yield UInt16(self, "dll_flags")
yield filesizeHandler(UInt32(self, "size_stack_reserve"))
yield filesizeHandler(UInt32(self, "size_stack_commit"))
yield filesizeHandler(UInt32(self, "size_heap_reserve"))
yield filesizeHandler(UInt32(self, "size_heap_commit"))
yield UInt32(self, "loader_flags")
yield UInt32(self, "nb_directory", "Number of RVA and sizes")
for index in xrange(self["nb_directory"].value):
try:
name = self.DIRECTORY_NAME[index]
except KeyError:
name = "data_dir[%u]" % index
yield DataDirectory(self, name)
def createDescription(self):
return "PE optional header: %s, entry point %s" % (
self["subsystem"].display,
self["entry_point"].display)
| 9,941 | Python | .py | 208 | 38.802885 | 109 | 0.613683 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,543 | cab.py | midgetspy_Sick-Beard/lib/hachoir_parser/archive/cab.py | """
Microsoft Cabinet (CAB) archive.
Author: Victor Stinner
Creation date: 31 january 2007
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, Enum,
CString, String,
UInt16, UInt32, Bit, Bits, PaddingBits, NullBits,
DateTimeMSDOS32, RawBytes)
from lib.hachoir_parser.common.msdos import MSDOSFileAttr16
from lib.hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler
from lib.hachoir_core.endian import LITTLE_ENDIAN
MAX_NB_FOLDER = 30
COMPRESSION_NONE = 0
COMPRESSION_NAME = {
0: "Uncompressed",
1: "Deflate",
2: "Quantum",
3: "LZX",
}
class Folder(FieldSet):
def createFields(self):
yield UInt32(self, "off_data", "Offset of data")
yield UInt16(self, "cf_data")
yield Enum(Bits(self, "compr_method", 4, "Compression method"), COMPRESSION_NAME)
yield Bits(self, "compr_level", 5, "Compression level")
yield PaddingBits(self, "padding", 7)
def createDescription(self):
text= "Folder: compression %s" % self["compr_method"].display
if self["compr_method"].value != COMPRESSION_NONE:
text += " (level %u)" % self["compr_level"].value
return text
class File(FieldSet):
def createFields(self):
yield filesizeHandler(UInt32(self, "filesize", "Uncompressed file size"))
yield UInt32(self, "offset", "File offset after decompression")
yield UInt16(self, "iFolder", "file control id")
yield DateTimeMSDOS32(self, "timestamp")
yield MSDOSFileAttr16(self, "attributes")
yield CString(self, "filename", charset="ASCII")
def createDescription(self):
return "File %s (%s)" % (
self["filename"].display, self["filesize"].display)
class Reserved(FieldSet):
def createFields(self):
yield UInt32(self, "size")
size = self["size"].value
if size:
yield RawBytes(self, "data", size)
class Flags(FieldSet):
static_size = 16
def createFields(self):
yield Bit(self, "has_previous")
yield Bit(self, "has_next")
yield Bit(self, "has_reserved")
yield NullBits(self, "padding", 13)
class CabFile(Parser):
endian = LITTLE_ENDIAN
MAGIC = "MSCF"
PARSER_TAGS = {
"id": "cab",
"category": "archive",
"file_ext": ("cab",),
"mime": (u"application/vnd.ms-cab-compressed",),
"magic": ((MAGIC, 0),),
"min_size": 1*8, # header + file entry
"description": "Microsoft Cabinet archive"
}
def validate(self):
if self.stream.readBytes(0, 4) != self.MAGIC:
return "Invalid magic"
if self["cab_version"].value != 0x0103:
return "Unknown version (%s)" % self["cab_version"].display
if not (1 <= self["nb_folder"].value <= MAX_NB_FOLDER):
return "Invalid number of folder (%s)" % self["nb_folder"].value
return True
def createFields(self):
yield String(self, "magic", 4, "Magic (MSCF)", charset="ASCII")
yield textHandler(UInt32(self, "hdr_checksum", "Header checksum (0 if not used)"), hexadecimal)
yield filesizeHandler(UInt32(self, "filesize", "Cabinet file size"))
yield textHandler(UInt32(self, "fld_checksum", "Folders checksum (0 if not used)"), hexadecimal)
yield UInt32(self, "off_file", "Offset of first file")
yield textHandler(UInt32(self, "files_checksum", "Files checksum (0 if not used)"), hexadecimal)
yield textHandler(UInt16(self, "cab_version", "Cabinet version"), hexadecimal)
yield UInt16(self, "nb_folder", "Number of folders")
yield UInt16(self, "nb_files", "Number of files")
yield Flags(self, "flags")
yield UInt16(self, "setid")
yield UInt16(self, "number", "Zero-based cabinet number")
# --- TODO: Support flags
if self["flags/has_reserved"].value:
yield Reserved(self, "reserved")
#(3) Previous cabinet name, if CAB_HEADER.flags & CAB_FLAG_HASPREV
#(4) Previous disk name, if CAB_HEADER.flags & CAB_FLAG_HASPREV
#(5) Next cabinet name, if CAB_HEADER.flags & CAB_FLAG_HASNEXT
#(6) Next disk name, if CAB_HEADER.flags & CAB_FLAG_HASNEXT
# ----
for index in xrange(self["nb_folder"].value):
yield Folder(self, "folder[]")
for index in xrange(self["nb_files"].value):
yield File(self, "file[]")
end = self.seekBit(self.size, "endraw")
if end:
yield end
def createContentSize(self):
return self["filesize"].value * 8
| 4,627 | Python | .py | 107 | 35.906542 | 104 | 0.637939 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,544 | sevenzip.py | midgetspy_Sick-Beard/lib/hachoir_parser/archive/sevenzip.py | """
7zip file parser
Informations:
- File 7zformat.txt of 7-zip SDK:
http://www.7-zip.org/sdk.html
Author: Olivier SCHWAB
Creation date: 6 december 2006
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (Field, FieldSet, ParserError,
GenericVector,
Enum, UInt8, UInt32, UInt64,
Bytes, RawBytes)
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler
class SZUInt64(Field):
"""
Variable length UInt64, where the first byte gives both the number of bytes
needed and the upper byte value.
"""
def __init__(self, parent, name, max_size=None, description=None):
Field.__init__(self, parent, name, size=8, description=description)
value = 0
addr = self.absolute_address
mask = 0x80
firstByte = parent.stream.readBits(addr, 8, LITTLE_ENDIAN)
for i in xrange(8):
addr += 8
if not (firstByte & mask):
value += ((firstByte & (mask-1)) << (8*i))
break
value |= (parent.stream.readBits(addr, 8, LITTLE_ENDIAN) << (8*i))
mask >>= 1
self._size += 8
self.createValue = lambda: value
ID_END, ID_HEADER, ID_ARCHIVE_PROPS, ID_ADD_STREAM_INFO, ID_MAIN_STREAM_INFO, \
ID_FILES_INFO, ID_PACK_INFO, ID_UNPACK_INFO, ID_SUBSTREAMS_INFO, ID_SIZE, \
ID_CRC, ID_FOLDER, ID_CODERS_UNPACK_SIZE, ID_NUM_UNPACK_STREAMS, \
ID_EMPTY_STREAM, ID_EMPTY_FILE, ID_ANTI, ID_NAME, ID_CREATION_TIME, \
ID_LAST_ACCESS_TIME, ID_LAST_WRITE_TIME, ID_WIN_ATTR, ID_COMMENT, \
ID_ENCODED_HEADER = xrange(24)
ID_INFO = {
ID_END : "End",
ID_HEADER : "Header embedding another one",
ID_ARCHIVE_PROPS : "Archive Properties",
ID_ADD_STREAM_INFO : "Additional Streams Info",
ID_MAIN_STREAM_INFO : "Main Streams Info",
ID_FILES_INFO : "Files Info",
ID_PACK_INFO : "Pack Info",
ID_UNPACK_INFO : "Unpack Info",
ID_SUBSTREAMS_INFO : "Substreams Info",
ID_SIZE : "Size",
ID_CRC : "CRC",
ID_FOLDER : "Folder",
ID_CODERS_UNPACK_SIZE: "Coders Unpacked size",
ID_NUM_UNPACK_STREAMS: "Number of Unpacked Streams",
ID_EMPTY_STREAM : "Empty Stream",
ID_EMPTY_FILE : "Empty File",
ID_ANTI : "Anti",
ID_NAME : "Name",
ID_CREATION_TIME : "Creation Time",
ID_LAST_ACCESS_TIME : "Last Access Time",
ID_LAST_WRITE_TIME : "Last Write Time",
ID_WIN_ATTR : "Win Attributes",
ID_COMMENT : "Comment",
ID_ENCODED_HEADER : "Header holding encoded data info",
}
class SkippedData(FieldSet):
def createFields(self):
yield Enum(UInt8(self, "id[]"), ID_INFO)
size = SZUInt64(self, "size")
yield size
if size.value > 0:
yield RawBytes(self, "data", size.value)
def waitForID(s, wait_id, wait_name="waited_id[]"):
while not s.eof:
addr = s.absolute_address+s.current_size
uid = s.stream.readBits(addr, 8, LITTLE_ENDIAN)
if uid == wait_id:
yield Enum(UInt8(s, wait_name), ID_INFO)
s.info("Found ID %s (%u)" % (ID_INFO[uid], uid))
return
s.info("Skipping ID %u!=%u" % (uid, wait_id))
yield SkippedData(s, "skipped_id[]", "%u != %u" % (uid, wait_id))
class HashDigest(FieldSet):
def __init__(self, parent, name, num_digests, desc=None):
FieldSet.__init__(self, parent, name, desc)
self.num_digests = num_digests
def createFields(self):
yield Enum(UInt8(self, "id"), ID_INFO)
bytes = self.stream.readBytes(self.absolute_address, self.num_digests)
if self.num_digests > 0:
yield GenericVector(self, "defined[]", self.num_digests, UInt8, "bool")
for index in xrange(self.num_digests):
if bytes[index]:
yield textHandler(UInt32(self, "hash[]",
"Hash for digest %u" % index), hexadecimal)
class PackInfo(FieldSet):
def createFields(self):
yield Enum(UInt8(self, "id"), ID_INFO)
# Very important, helps determine where the data is
yield SZUInt64(self, "pack_pos", "Position of the packs")
num = SZUInt64(self, "num_pack_streams")
yield num
num = num.value
for field in waitForID(self, ID_SIZE, "size_marker"):
yield field
for size in xrange(num):
yield SZUInt64(self, "pack_size[]")
while not self.eof:
addr = self.absolute_address+self.current_size
uid = self.stream.readBits(addr, 8, LITTLE_ENDIAN)
if uid == ID_END:
yield Enum(UInt8(self, "end_marker"), ID_INFO)
break
elif uid == ID_CRC:
yield HashDigest(self, "hash_digest", size)
else:
yield SkippedData(self, "skipped_data")
def lzmaParams(value):
param = value.value
remainder = param / 9
# Literal coder context bits
lc = param % 9
# Position state bits
pb = remainder / 5
# Literal coder position bits
lp = remainder % 5
return "lc=%u pb=%u lp=%u" % (lc, lp, pb)
class CoderID(FieldSet):
CODECS = {
# Only 2 methods ... and what about PPMD ?
"\0" : "copy",
"\3\1\1": "lzma",
}
def createFields(self):
byte = UInt8(self, "id_size")
yield byte
byte = byte.value
self.info("ID=%u" % byte)
size = byte & 0xF
if size > 0:
name = self.stream.readBytes(self.absolute_address+self.current_size, size)
if name in self.CODECS:
name = self.CODECS[name]
self.info("Codec is %s" % name)
else:
self.info("Undetermined codec %s" % name)
name = "unknown"
yield RawBytes(self, name, size)
#yield textHandler(Bytes(self, "id", size), lambda: name)
if byte & 0x10:
yield SZUInt64(self, "num_stream_in")
yield SZUInt64(self, "num_stream_out")
self.info("Streams: IN=%u OUT=%u" % \
(self["num_stream_in"].value, self["num_stream_out"].value))
if byte & 0x20:
size = SZUInt64(self, "properties_size[]")
yield size
if size.value == 5:
#[email protected]
yield textHandler(UInt8(self, "parameters"), lzmaParams)
yield filesizeHandler(UInt32(self, "dictionary_size"))
elif size.value > 0:
yield RawBytes(self, "properties[]", size.value)
class CoderInfo(FieldSet):
def __init__(self, parent, name, desc=None):
FieldSet.__init__(self, parent, name, desc)
self.in_streams = 1
self.out_streams = 1
def createFields(self):
# The real ID
addr = self.absolute_address + self.current_size
b = self.parent.stream.readBits(addr, 8, LITTLE_ENDIAN)
cid = CoderID(self, "coder_id")
yield cid
if b&0x10: # Work repeated, ...
self.in_streams = cid["num_stream_in"].value
self.out_streams = cid["num_stream_out"].value
# Skip other IDs
while b&0x80:
addr = self.absolute_address + self.current_size
b = self.parent.stream.readBits(addr, 8, LITTLE_ENDIAN)
yield CoderID(self, "unused_codec_id[]")
class BindPairInfo(FieldSet):
def createFields(self):
# 64 bits values then cast to 32 in fact
yield SZUInt64(self, "in_index")
yield SZUInt64(self, "out_index")
self.info("Indexes: IN=%u OUT=%u" % \
(self["in_index"].value, self["out_index"].value))
class FolderItem(FieldSet):
def __init__(self, parent, name, desc=None):
FieldSet.__init__(self, parent, name, desc)
self.in_streams = 0
self.out_streams = 0
def createFields(self):
yield SZUInt64(self, "num_coders")
num = self["num_coders"].value
self.info("Folder: %u codecs" % num)
# Coders info
for index in xrange(num):
ci = CoderInfo(self, "coder_info[]")
yield ci
self.in_streams += ci.in_streams
self.out_streams += ci.out_streams
# Bin pairs
self.info("out streams: %u" % self.out_streams)
for index in xrange(self.out_streams-1):
yield BindPairInfo(self, "bind_pair[]")
# Packed streams
# @todo: Actually find mapping
packed_streams = self.in_streams - self.out_streams + 1
if packed_streams == 1:
pass
else:
for index in xrange(packed_streams):
yield SZUInt64(self, "pack_stream[]")
class UnpackInfo(FieldSet):
def createFields(self):
yield Enum(UInt8(self, "id"), ID_INFO)
# Wait for synch
for field in waitForID(self, ID_FOLDER, "folder_marker"):
yield field
yield SZUInt64(self, "num_folders")
# Get generic info
num = self["num_folders"].value
self.info("%u folders" % num)
yield UInt8(self, "is_external")
# Read folder items
for folder_index in xrange(num):
yield FolderItem(self, "folder_item[]")
# Get unpack sizes for each coder of each folder
for field in waitForID(self, ID_CODERS_UNPACK_SIZE, "coders_unpsize_marker"):
yield field
for folder_index in xrange(num):
folder_item = self["folder_item[%u]" % folder_index]
for index in xrange(folder_item.out_streams):
#yield UInt8(self, "unpack_size[]")
yield SZUInt64(self, "unpack_size[]")
# Extract digests
while not self.eof:
addr = self.absolute_address+self.current_size
uid = self.stream.readBits(addr, 8, LITTLE_ENDIAN)
if uid == ID_END:
yield Enum(UInt8(self, "end_marker"), ID_INFO)
break
elif uid == ID_CRC:
yield HashDigest(self, "hash_digest", num)
else:
yield SkippedData(self, "skip_data")
class SubStreamInfo(FieldSet):
def createFields(self):
yield Enum(UInt8(self, "id"), ID_INFO)
raise ParserError("SubStreamInfo not implemented yet")
class EncodedHeader(FieldSet):
def createFields(self):
yield Enum(UInt8(self, "id"), ID_INFO)
while not self.eof:
addr = self.absolute_address+self.current_size
uid = self.stream.readBits(addr, 8, LITTLE_ENDIAN)
if uid == ID_END:
yield Enum(UInt8(self, "end_marker"), ID_INFO)
break
elif uid == ID_PACK_INFO:
yield PackInfo(self, "pack_info", ID_INFO[ID_PACK_INFO])
elif uid == ID_UNPACK_INFO:
yield UnpackInfo(self, "unpack_info", ID_INFO[ID_UNPACK_INFO])
elif uid == ID_SUBSTREAMS_INFO:
yield SubStreamInfo(self, "substreams_info", ID_INFO[ID_SUBSTREAMS_INFO])
else:
self.info("Unexpected ID (%i)" % uid)
break
class IDHeader(FieldSet):
def createFields(self):
yield Enum(UInt8(self, "id"), ID_INFO)
ParserError("IDHeader not implemented")
class NextHeader(FieldSet):
def __init__(self, parent, name, desc="Next header"):
FieldSet.__init__(self, parent, name, desc)
self._size = 8*self["/signature/start_hdr/next_hdr_size"].value
# Less work, as much interpretable information as the other
# version... what an obnoxious format
def createFields2(self):
yield Enum(UInt8(self, "header_type"), ID_INFO)
yield RawBytes(self, "header_data", self._size-1)
def createFields(self):
uid = self.stream.readBits(self.absolute_address, 8, LITTLE_ENDIAN)
if uid == ID_HEADER:
yield IDHeader(self, "header", ID_INFO[ID_HEADER])
elif uid == ID_ENCODED_HEADER:
yield EncodedHeader(self, "encoded_hdr", ID_INFO[ID_ENCODED_HEADER])
# Game Over: this is usually encoded using LZMA, not copy
# See SzReadAndDecodePackedStreams/SzDecode being called with the
# data position from "/next_hdr/encoded_hdr/pack_info/pack_pos"
# We should process further, yet we can't...
else:
ParserError("Unexpected ID %u" % uid)
size = self._size - self.current_size
if size > 0:
yield RawBytes(self, "next_hdr_data", size//8, "Next header's data")
class Body(FieldSet):
def __init__(self, parent, name, desc="Body data"):
FieldSet.__init__(self, parent, name, desc)
self._size = 8*self["/signature/start_hdr/next_hdr_offset"].value
def createFields(self):
if "encoded_hdr" in self["/next_hdr/"]:
pack_size = sum([s.value for s in self.array("/next_hdr/encoded_hdr/pack_info/pack_size")])
body_size = self["/next_hdr/encoded_hdr/pack_info/pack_pos"].value
yield RawBytes(self, "compressed_data", body_size, "Compressed data")
# Here we could check if copy method was used to "compress" it,
# but this never happens, so just output "compressed file info"
yield RawBytes(self, "compressed_file_info", pack_size,
"Compressed file information")
size = (self._size//8) - pack_size - body_size
if size > 0:
yield RawBytes(self, "unknown_data", size)
elif "header" in self["/next_hdr"]:
yield RawBytes(self, "compressed_data", self._size//8, "Compressed data")
class StartHeader(FieldSet):
static_size = 160
def createFields(self):
yield textHandler(UInt64(self, "next_hdr_offset",
"Next header offset"), hexadecimal)
yield UInt64(self, "next_hdr_size", "Next header size")
yield textHandler(UInt32(self, "next_hdr_crc",
"Next header CRC"), hexadecimal)
class SignatureHeader(FieldSet):
static_size = 96 + StartHeader.static_size
def createFields(self):
yield Bytes(self, "signature", 6, "Signature Header")
yield UInt8(self, "major_ver", "Archive major version")
yield UInt8(self, "minor_ver", "Archive minor version")
yield textHandler(UInt32(self, "start_hdr_crc",
"Start header CRC"), hexadecimal)
yield StartHeader(self, "start_hdr", "Start header")
class SevenZipParser(Parser):
PARSER_TAGS = {
"id": "7zip",
"category": "archive",
"file_ext": ("7z",),
"mime": (u"application/x-7z-compressed",),
"min_size": 32*8,
"magic": (("7z\xbc\xaf\x27\x1c", 0),),
"description": "Compressed archive in 7z format"
}
endian = LITTLE_ENDIAN
def createFields(self):
yield SignatureHeader(self, "signature", "Signature Header")
yield Body(self, "body_data")
yield NextHeader(self, "next_hdr")
def validate(self):
if self.stream.readBytes(0,6) != "7z\xbc\xaf'\x1c":
return "Invalid signature"
return True
def createContentSize(self):
size = self["/signature/start_hdr/next_hdr_offset"].value
size += self["/signature/start_hdr/next_hdr_size"].value
size += 12 # Signature size
size += 20 # Start header size
return size*8
| 15,608 | Python | .py | 361 | 34.066482 | 103 | 0.592951 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,545 | bzip2_parser.py | midgetspy_Sick-Beard/lib/hachoir_parser/archive/bzip2_parser.py | """
BZIP2 archive file
Author: Victor Stinner
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (ParserError, String,
Bytes, Character, UInt8, UInt32, CompressedField)
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.text_handler import textHandler, hexadecimal
try:
from bz2 import BZ2Decompressor
class Bunzip2:
def __init__(self, stream):
self.bzip2 = BZ2Decompressor()
def __call__(self, size, data=''):
try:
return self.bzip2.decompress(data)
except EOFError:
return ''
has_deflate = True
except ImportError:
has_deflate = False
class Bzip2Parser(Parser):
PARSER_TAGS = {
"id": "bzip2",
"category": "archive",
"file_ext": ("bz2",),
"mime": (u"application/x-bzip2",),
"min_size": 10*8,
"magic": (('BZh', 0),),
"description": "bzip2 archive"
}
endian = LITTLE_ENDIAN
def validate(self):
if self.stream.readBytes(0, 3) != 'BZh':
return "Wrong file signature"
if not("1" <= self["blocksize"].value <= "9"):
return "Wrong blocksize"
return True
def createFields(self):
yield String(self, "id", 3, "Identifier (BZh)", charset="ASCII")
yield Character(self, "blocksize", "Block size (KB of memory needed to uncompress)")
yield UInt8(self, "blockheader", "Block header")
if self["blockheader"].value == 0x17:
yield String(self, "id2", 4, "Identifier2 (re8P)", charset="ASCII")
yield UInt8(self, "id3", "Identifier3 (0x90)")
elif self["blockheader"].value == 0x31:
yield String(self, "id2", 5, "Identifier 2 (AY&SY)", charset="ASCII")
if self["id2"].value != "AY&SY":
raise ParserError("Invalid identifier 2 (AY&SY)!")
else:
raise ParserError("Invalid block header!")
yield textHandler(UInt32(self, "crc32", "CRC32"), hexadecimal)
if self._size is None: # TODO: is it possible to handle piped input?
raise NotImplementedError
size = (self._size - self.current_size)/8
if size:
for tag, filename in self.stream.tags:
if tag == "filename" and filename.endswith(".bz2"):
filename = filename[:-4]
break
else:
filename = None
data = Bytes(self, "file", size)
if has_deflate:
CompressedField(self, Bunzip2)
def createInputStream(**args):
if filename:
args.setdefault("tags",[]).append(("filename", filename))
return self._createInputStream(**args)
data._createInputStream = createInputStream
yield data
| 2,896 | Python | .py | 72 | 30.152778 | 92 | 0.577019 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,546 | tar.py | midgetspy_Sick-Beard/lib/hachoir_parser/archive/tar.py | """
Tar archive parser.
Author: Victor Stinner
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet,
Enum, UInt8, SubFile, String, NullBytes)
from lib.hachoir_core.tools import humanFilesize, paddingSize, timestampUNIX
from lib.hachoir_core.endian import BIG_ENDIAN
import re
class FileEntry(FieldSet):
type_name = {
# 48 is "0", 49 is "1", ...
0: u"Normal disk file (old format)",
48: u"Normal disk file",
49: u"Link to previously dumped file",
50: u"Symbolic link",
51: u"Character special file",
52: u"Block special file",
53: u"Directory",
54: u"FIFO special file",
55: u"Contiguous file"
}
def getOctal(self, name):
return self.octal2int(self[name].value)
def getDatetime(self):
"""
Create modification date as Unicode string, may raise ValueError.
"""
timestamp = self.getOctal("mtime")
return timestampUNIX(timestamp)
def createFields(self):
yield String(self, "name", 100, "Name", strip="\0", charset="ISO-8859-1")
yield String(self, "mode", 8, "Mode", strip=" \0", charset="ASCII")
yield String(self, "uid", 8, "User ID", strip=" \0", charset="ASCII")
yield String(self, "gid", 8, "Group ID", strip=" \0", charset="ASCII")
yield String(self, "size", 12, "Size", strip=" \0", charset="ASCII")
yield String(self, "mtime", 12, "Modification time", strip=" \0", charset="ASCII")
yield String(self, "check_sum", 8, "Check sum", strip=" \0", charset="ASCII")
yield Enum(UInt8(self, "type", "Type"), self.type_name)
yield String(self, "lname", 100, "Link name", strip=" \0", charset="ISO-8859-1")
yield String(self, "magic", 8, "Magic", strip=" \0", charset="ASCII")
yield String(self, "uname", 32, "User name", strip=" \0", charset="ISO-8859-1")
yield String(self, "gname", 32, "Group name", strip=" \0", charset="ISO-8859-1")
yield String(self, "devmajor", 8, "Dev major", strip=" \0", charset="ASCII")
yield String(self, "devminor", 8, "Dev minor", strip=" \0", charset="ASCII")
yield NullBytes(self, "padding", 167, "Padding (zero)")
filesize = self.getOctal("size")
if filesize:
yield SubFile(self, "content", filesize, filename=self["name"].value)
size = paddingSize(self.current_size//8, 512)
if size:
yield NullBytes(self, "padding_end", size, "Padding (512 align)")
def convertOctal(self, chunk):
return self.octal2int(chunk.value)
def isEmpty(self):
return self["name"].value == ""
def octal2int(self, text):
try:
return int(text, 8)
except ValueError:
return 0
def createDescription(self):
if self.isEmpty():
desc = "(terminator, empty header)"
else:
filename = self["name"].value
filesize = humanFilesize(self.getOctal("size"))
desc = "(%s: %s, %s)" % \
(filename, self["type"].display, filesize)
return "Tar File " + desc
class TarFile(Parser):
endian = BIG_ENDIAN
PARSER_TAGS = {
"id": "tar",
"category": "archive",
"file_ext": ("tar",),
"mime": (u"application/x-tar", u"application/x-gtar"),
"min_size": 512*8,
"magic": (("ustar \0", 257*8),),
"subfile": "skip",
"description": "TAR archive",
}
_sign = re.compile("ustar *\0|[ \0]*$")
def validate(self):
if not self._sign.match(self.stream.readBytes(257*8, 8)):
return "Invalid magic number"
if self[0].name == "terminator":
return "Don't contain any file"
try:
int(self["file[0]/uid"].value, 8)
int(self["file[0]/gid"].value, 8)
int(self["file[0]/size"].value, 8)
except ValueError:
return "Invalid file size"
return True
def createFields(self):
while not self.eof:
field = FileEntry(self, "file[]")
if field.isEmpty():
yield NullBytes(self, "terminator", 512)
break
yield field
if self.current_size < self._size:
yield self.seekBit(self._size, "end")
def createContentSize(self):
return self["terminator"].address + self["terminator"].size
| 4,459 | Python | .py | 107 | 33.065421 | 90 | 0.582238 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,547 | gzip_parser.py | midgetspy_Sick-Beard/lib/hachoir_parser/archive/gzip_parser.py | """
GZIP archive parser.
Author: Victor Stinner
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (
UInt8, UInt16, UInt32, Enum, TimestampUnix32,
Bit, CString, SubFile,
NullBits, Bytes, RawBytes)
from lib.hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_parser.common.deflate import Deflate
class GzipParser(Parser):
endian = LITTLE_ENDIAN
PARSER_TAGS = {
"id": "gzip",
"category": "archive",
"file_ext": ("gz",),
"mime": (u"application/x-gzip",),
"min_size": 18*8,
#"magic": (('\x1F\x8B\x08', 0),),
"magic_regex": (
# (magic, compression=deflate, <flags>, <mtime>, )
('\x1F\x8B\x08.{5}[\0\2\4\6][\x00-\x0D]', 0),
),
"description": u"gzip archive",
}
os_name = {
0: u"FAT filesystem",
1: u"Amiga",
2: u"VMS (or OpenVMS)",
3: u"Unix",
4: u"VM/CMS",
5: u"Atari TOS",
6: u"HPFS filesystem (OS/2, NT)",
7: u"Macintosh",
8: u"Z-System",
9: u"CP/M",
10: u"TOPS-20",
11: u"NTFS filesystem (NT)",
12: u"QDOS",
13: u"Acorn RISCOS",
}
COMPRESSION_NAME = {
8: u"deflate",
}
def validate(self):
if self["signature"].value != '\x1F\x8B':
return "Invalid signature"
if self["compression"].value not in self.COMPRESSION_NAME:
return "Unknown compression method (%u)" % self["compression"].value
if self["reserved[0]"].value != 0:
return "Invalid reserved[0] value"
if self["reserved[1]"].value != 0:
return "Invalid reserved[1] value"
if self["reserved[2]"].value != 0:
return "Invalid reserved[2] value"
return True
def createFields(self):
# Gzip header
yield Bytes(self, "signature", 2, r"GZip file signature (\x1F\x8B)")
yield Enum(UInt8(self, "compression", "Compression method"), self.COMPRESSION_NAME)
# Flags
yield Bit(self, "is_text", "File content is probably ASCII text")
yield Bit(self, "has_crc16", "Header CRC16")
yield Bit(self, "has_extra", "Extra informations (variable size)")
yield Bit(self, "has_filename", "Contains filename?")
yield Bit(self, "has_comment", "Contains comment?")
yield NullBits(self, "reserved[]", 3)
yield TimestampUnix32(self, "mtime", "Modification time")
# Extra flags
yield NullBits(self, "reserved[]", 1)
yield Bit(self, "slowest", "Compressor used maximum compression (slowest)")
yield Bit(self, "fastest", "Compressor used the fastest compression")
yield NullBits(self, "reserved[]", 5)
yield Enum(UInt8(self, "os", "Operating system"), self.os_name)
# Optional fields
if self["has_extra"].value:
yield UInt16(self, "extra_length", "Extra length")
yield RawBytes(self, "extra", self["extra_length"].value, "Extra")
if self["has_filename"].value:
yield CString(self, "filename", "Filename", charset="ISO-8859-1")
if self["has_comment"].value:
yield CString(self, "comment", "Comment")
if self["has_crc16"].value:
yield textHandler(UInt16(self, "hdr_crc16", "CRC16 of the header"),
hexadecimal)
if self._size is None: # TODO: is it possible to handle piped input?
raise NotImplementedError()
# Read file
size = (self._size - self.current_size) // 8 - 8 # -8: crc32+size
if 0 < size:
if self["has_filename"].value:
filename = self["filename"].value
else:
for tag, filename in self.stream.tags:
if tag == "filename" and filename.endswith(".gz"):
filename = filename[:-3]
break
else:
filename = None
yield Deflate(SubFile(self, "file", size, filename=filename))
# Footer
yield textHandler(UInt32(self, "crc32",
"Uncompressed data content CRC32"), hexadecimal)
yield filesizeHandler(UInt32(self, "size", "Uncompressed size"))
def createDescription(self):
desc = u"gzip archive"
info = []
if "filename" in self:
info.append('filename "%s"' % self["filename"].value)
if "size" in self:
info.append("was %s" % self["size"].display)
if self["mtime"].value:
info.append(self["mtime"].display)
return "%s: %s" % (desc, ", ".join(info))
| 4,762 | Python | .py | 116 | 31.543103 | 91 | 0.572415 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,548 | ar.py | midgetspy_Sick-Beard/lib/hachoir_parser/archive/ar.py | """
GNU ar archive : archive file (.a) and Debian (.deb) archive.
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
String, RawBytes, UnixLine)
from lib.hachoir_core.endian import BIG_ENDIAN
class ArchiveFileEntry(FieldSet):
def createFields(self):
yield UnixLine(self, "header", "Header")
info = self["header"].value.split()
if len(info) != 7:
raise ParserError("Invalid file entry header")
size = int(info[5])
if 0 < size:
yield RawBytes(self, "content", size, "File data")
def createDescription(self):
return "File entry (%s)" % self["header"].value.split()[0]
class ArchiveFile(Parser):
endian = BIG_ENDIAN
MAGIC = '!<arch>\n'
PARSER_TAGS = {
"id": "unix_archive",
"category": "archive",
"file_ext": ("a", "deb"),
"mime":
(u"application/x-debian-package",
u"application/x-archive",
u"application/x-dpkg"),
"min_size": (8 + 13)*8, # file signature + smallest file as possible
"magic": ((MAGIC, 0),),
"description": "Unix archive"
}
def validate(self):
if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC:
return "Invalid magic string"
return True
def createFields(self):
yield String(self, "id", 8, "Unix archive identifier (\"<!arch>\")", charset="ASCII")
while not self.eof:
data = self.stream.readBytes(self.current_size, 1)
if data == "\n":
yield RawBytes(self, "empty_line[]", 1, "Empty line")
else:
yield ArchiveFileEntry(self, "file[]", "File")
| 1,734 | Python | .py | 45 | 30.4 | 93 | 0.58918 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,549 | ace.py | midgetspy_Sick-Beard/lib/hachoir_parser/archive/ace.py | """
ACE parser
From wotsit.org and the SDK header (bitflags)
Partial study of a new block type (5) I've called "new_recovery", as its
syntax is very close to the former one (of type 2).
Status: can only read totally file and header blocks.
Author: Christophe Gisquet <[email protected]>
Creation date: 19 january 2006
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (StaticFieldSet, FieldSet,
Bit, Bits, NullBits, RawBytes, Enum,
UInt8, UInt16, UInt32,
PascalString8, PascalString16, String,
TimeDateMSDOS32)
from lib.hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_parser.common.msdos import MSDOSFileAttr32
MAGIC = "**ACE**"
OS_MSDOS = 0
OS_WIN32 = 2
HOST_OS = {
0: "MS-DOS",
1: "OS/2",
2: "Win32",
3: "Unix",
4: "MAC-OS",
5: "Win NT",
6: "Primos",
7: "APPLE GS",
8: "ATARI",
9: "VAX VMS",
10: "AMIGA",
11: "NEXT",
}
COMPRESSION_TYPE = {
0: "Store",
1: "Lempel-Ziv 77",
2: "ACE v2.0",
}
COMPRESSION_MODE = {
0: "fastest",
1: "fast",
2: "normal",
3: "good",
4: "best",
}
# TODO: Computing the CRC16 would also prove useful
#def markerValidate(self):
# return not self["extend"].value and self["signature"].value == MAGIC and \
# self["host_os"].value<12
class MarkerFlags(StaticFieldSet):
format = (
(Bit, "extend", "Whether the header is extended"),
(Bit, "has_comment", "Whether the archive has a comment"),
(NullBits, "unused", 7, "Reserved bits"),
(Bit, "sfx", "SFX"),
(Bit, "limited_dict", "Junior SFX with 256K dictionary"),
(Bit, "multi_volume", "Part of a set of ACE archives"),
(Bit, "has_av_string", "This header holds an AV-string"),
(Bit, "recovery_record", "Recovery record preset"),
(Bit, "locked", "Archive is locked"),
(Bit, "solid", "Archive uses solid compression")
)
def markerFlags(self):
yield MarkerFlags(self, "flags", "Marker flags")
def markerHeader(self):
yield String(self, "signature", 7, "Signature")
yield UInt8(self, "ver_extract", "Version needed to extract archive")
yield UInt8(self, "ver_created", "Version used to create archive")
yield Enum(UInt8(self, "host_os", "OS where the files were compressed"), HOST_OS)
yield UInt8(self, "vol_num", "Volume number")
yield TimeDateMSDOS32(self, "time", "Date and time (MS DOS format)")
yield Bits(self, "reserved", 64, "Reserved size for future extensions")
flags = self["flags"]
if flags["has_av_string"].value:
yield PascalString8(self, "av_string", "AV String")
if flags["has_comment"].value:
size = filesizeHandler(UInt16(self, "comment_size", "Comment size"))
yield size
if size.value > 0:
yield RawBytes(self, "compressed_comment", size.value, \
"Compressed comment")
class FileFlags(StaticFieldSet):
format = (
(Bit, "extend", "Whether the header is extended"),
(Bit, "has_comment", "Presence of file comment"),
(Bits, "unused", 10, "Unused bit flags"),
(Bit, "encrypted", "File encrypted with password"),
(Bit, "previous", "File continued from previous volume"),
(Bit, "next", "File continues on the next volume"),
(Bit, "solid", "File compressed using previously archived files")
)
def fileFlags(self):
yield FileFlags(self, "flags", "File flags")
def fileHeader(self):
yield filesizeHandler(UInt32(self, "compressed_size", "Size of the compressed file"))
yield filesizeHandler(UInt32(self, "uncompressed_size", "Uncompressed file size"))
yield TimeDateMSDOS32(self, "ftime", "Date and time (MS DOS format)")
if self["/header/host_os"].value in (OS_MSDOS, OS_WIN32):
yield MSDOSFileAttr32(self, "file_attr", "File attributes")
else:
yield textHandler(UInt32(self, "file_attr", "File attributes"), hexadecimal)
yield textHandler(UInt32(self, "file_crc32", "CRC32 checksum over the compressed file)"), hexadecimal)
yield Enum(UInt8(self, "compression_type", "Type of compression"), COMPRESSION_TYPE)
yield Enum(UInt8(self, "compression_mode", "Quality of compression"), COMPRESSION_MODE)
yield textHandler(UInt16(self, "parameters", "Compression parameters"), hexadecimal)
yield textHandler(UInt16(self, "reserved", "Reserved data"), hexadecimal)
# Filename
yield PascalString16(self, "filename", "Filename")
# Comment
if self["flags/has_comment"].value:
yield filesizeHandler(UInt16(self, "comment_size", "Size of the compressed comment"))
if self["comment_size"].value > 0:
yield RawBytes(self, "comment_data", self["comment_size"].value, "Comment data")
def fileBody(self):
size = self["compressed_size"].value
if size > 0:
yield RawBytes(self, "compressed_data", size, "Compressed data")
def fileDesc(self):
return "File entry: %s (%s)" % (self["filename"].value, self["compressed_size"].display)
def recoveryHeader(self):
yield filesizeHandler(UInt32(self, "rec_blk_size", "Size of recovery data"))
self.body_size = self["rec_blk_size"].size
yield String(self, "signature", 7, "Signature, normally '**ACE**'")
yield textHandler(UInt32(self, "relative_start",
"Relative start (to this block) of the data this block is mode of"),
hexadecimal)
yield UInt32(self, "num_blocks", "Number of blocks the data is split into")
yield UInt32(self, "size_blocks", "Size of these blocks")
yield UInt16(self, "crc16_blocks", "CRC16 over recovery data")
# size_blocks blocks of size size_blocks follow
# The ultimate data is the xor data of all those blocks
size = self["size_blocks"].value
for index in xrange(self["num_blocks"].value):
yield RawBytes(self, "data[]", size, "Recovery block %i" % index)
yield RawBytes(self, "xor_data", size, "The XOR value of the above data blocks")
def recoveryDesc(self):
return "Recovery block, size=%u" % self["body_size"].display
def newRecoveryHeader(self):
"""
This header is described nowhere
"""
if self["flags/extend"].value:
yield filesizeHandler(UInt32(self, "body_size", "Size of the unknown body following"))
self.body_size = self["body_size"].value
yield textHandler(UInt32(self, "unknown[]", "Unknown field, probably 0"),
hexadecimal)
yield String(self, "signature", 7, "Signature, normally '**ACE**'")
yield textHandler(UInt32(self, "relative_start",
"Offset (=crc16's) of this block in the file"), hexadecimal)
yield textHandler(UInt32(self, "unknown[]",
"Unknown field, probably 0"), hexadecimal)
class BaseFlags(StaticFieldSet):
format = (
(Bit, "extend", "Whether the header is extended"),
(NullBits, "unused", 15, "Unused bit flags")
)
def parseFlags(self):
yield BaseFlags(self, "flags", "Unknown flags")
def parseHeader(self):
if self["flags/extend"].value:
yield filesizeHandler(UInt32(self, "body_size", "Size of the unknown body following"))
self.body_size = self["body_size"].value
def parseBody(self):
if self.body_size > 0:
yield RawBytes(self, "body_data", self.body_size, "Body data, unhandled")
class Block(FieldSet):
TAG_INFO = {
0: ("header", "Archiver header", markerFlags, markerHeader, None),
1: ("file[]", fileDesc, fileFlags, fileHeader, fileBody),
2: ("recovery[]", recoveryDesc, recoveryHeader, None, None),
5: ("new_recovery[]", None, None, newRecoveryHeader, None)
}
def __init__(self, parent, name, description=None):
FieldSet.__init__(self, parent, name, description)
self.body_size = 0
self.desc_func = None
type = self["block_type"].value
if type in self.TAG_INFO:
self._name, desc, self.parseFlags, self.parseHeader, self.parseBody = self.TAG_INFO[type]
if desc:
if isinstance(desc, str):
self._description = desc
else:
self.desc_func = desc
else:
self.warning("Processing as unknown block block of type %u" % type)
if not self.parseFlags:
self.parseFlags = parseFlags
if not self.parseHeader:
self.parseHeader = parseHeader
if not self.parseBody:
self.parseBody = parseBody
def createFields(self):
yield textHandler(UInt16(self, "crc16", "Archive CRC16 (from byte 4 on)"), hexadecimal)
yield filesizeHandler(UInt16(self, "head_size", "Block size (from byte 4 on)"))
yield UInt8(self, "block_type", "Block type")
# Flags
for flag in self.parseFlags(self):
yield flag
# Rest of the header
for field in self.parseHeader(self):
yield field
size = self["head_size"].value - (self.current_size//8) + (2+2)
if size > 0:
yield RawBytes(self, "extra_data", size, "Extra header data, unhandled")
# Body in itself
for field in self.parseBody(self):
yield field
def createDescription(self):
if self.desc_func:
return self.desc_func(self)
else:
return "Block: %s" % self["type"].display
class AceFile(Parser):
endian = LITTLE_ENDIAN
PARSER_TAGS = {
"id": "ace",
"category": "archive",
"file_ext": ("ace",),
"mime": (u"application/x-ace-compressed",),
"min_size": 50*8,
"description": "ACE archive"
}
def validate(self):
if self.stream.readBytes(7*8, len(MAGIC)) != MAGIC:
return "Invalid magic"
return True
def createFields(self):
while not self.eof:
yield Block(self, "block[]")
| 9,964 | Python | .py | 232 | 36.37931 | 106 | 0.646798 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,550 | rar.py | midgetspy_Sick-Beard/lib/hachoir_parser/archive/rar.py | """
RAR parser
Status: can only read higher-level attructures
Author: Christophe Gisquet
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (StaticFieldSet, FieldSet,
Bit, Bits, Enum,
UInt8, UInt16, UInt32, UInt64,
String, TimeDateMSDOS32,
NullBytes, NullBits, RawBytes)
from lib.hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_parser.common.msdos import MSDOSFileAttr32
MAX_FILESIZE = 1000 * 1024 * 1024
BLOCK_NAME = {
0x72: "Marker",
0x73: "Archive",
0x74: "File",
0x75: "Comment",
0x76: "Extra info",
0x77: "Subblock",
0x78: "Recovery record",
0x79: "Archive authenticity",
0x7A: "New-format subblock",
0x7B: "Archive end",
}
COMPRESSION_NAME = {
0x30: "Storing",
0x31: "Fastest compression",
0x32: "Fast compression",
0x33: "Normal compression",
0x34: "Good compression",
0x35: "Best compression"
}
OS_MSDOS = 0
OS_WIN32 = 2
OS_NAME = {
0: "MS DOS",
1: "OS/2",
2: "Win32",
3: "Unix",
}
DICTIONARY_SIZE = {
0: "Dictionary size 64 Kb",
1: "Dictionary size 128 Kb",
2: "Dictionary size 256 Kb",
3: "Dictionary size 512 Kb",
4: "Dictionary size 1024 Kb",
7: "File is a directory",
}
def formatRARVersion(field):
"""
Decodes the RAR version stored on 1 byte
"""
return "%u.%u" % divmod(field.value, 10)
def commonFlags(s):
yield Bit(s, "has_added_size", "Additional field indicating additional size")
yield Bit(s, "is_ignorable", "Old versions of RAR should ignore this block when copying data")
class ArchiveFlags(StaticFieldSet):
format = (
(Bit, "vol", "Archive volume"),
(Bit, "has_comment", "Whether there is a comment"),
(Bit, "is_locked", "Archive volume"),
(Bit, "is_solid", "Whether files can be extracted separately"),
(Bit, "new_numbering", "New numbering, or compressed comment"), # From unrar
(Bit, "has_authenticity_information", "The integrity/authenticity of the archive can be checked"),
(Bit, "is_protected", "The integrity/authenticity of the archive can be checked"),
(Bit, "is_passworded", "Needs a password to be decrypted"),
(Bit, "is_first_vol", "Whether it is the first volume"),
(Bit, "is_encrypted", "Whether the encryption version is present"),
(NullBits, "internal", 6, "Reserved for 'internal use'")
)
def archiveFlags(s):
yield ArchiveFlags(s, "flags", "Archiver block flags")
def archiveHeader(s):
yield NullBytes(s, "reserved[]", 2, "Reserved word")
yield NullBytes(s, "reserved[]", 4, "Reserved dword")
def commentHeader(s):
yield filesizeHandler(UInt16(s, "total_size", "Comment header size + comment size"))
yield filesizeHandler(UInt16(s, "uncompressed_size", "Uncompressed comment size"))
yield UInt8(s, "required_version", "RAR version needed to extract comment")
yield UInt8(s, "packing_method", "Comment packing method")
yield UInt16(s, "comment_crc16", "Comment CRC")
def commentBody(s):
size = s["total_size"].value - s.current_size
if size > 0:
yield RawBytes(s, "comment_data", size, "Compressed comment data")
def signatureHeader(s):
yield TimeDateMSDOS32(s, "creation_time")
yield filesizeHandler(UInt16(s, "arc_name_size"))
yield filesizeHandler(UInt16(s, "user_name_size"))
def recoveryHeader(s):
yield filesizeHandler(UInt32(s, "total_size"))
yield textHandler(UInt8(s, "version"), hexadecimal)
yield UInt16(s, "rec_sectors")
yield UInt32(s, "total_blocks")
yield RawBytes(s, "mark", 8)
def avInfoHeader(s):
yield filesizeHandler(UInt16(s, "total_size", "Total block size"))
yield UInt8(s, "version", "Version needed to decompress", handler=hexadecimal)
yield UInt8(s, "method", "Compression method", handler=hexadecimal)
yield UInt8(s, "av_version", "Version for AV", handler=hexadecimal)
yield UInt32(s, "av_crc", "AV info CRC32", handler=hexadecimal)
def avInfoBody(s):
size = s["total_size"].value - s.current_size
if size > 0:
yield RawBytes(s, "av_info_data", size, "AV info")
class FileFlags(FieldSet):
static_size = 16
def createFields(self):
yield Bit(self, "continued_from", "File continued from previous volume")
yield Bit(self, "continued_in", "File continued in next volume")
yield Bit(self, "is_encrypted", "File encrypted with password")
yield Bit(self, "has_comment", "File comment present")
yield Bit(self, "is_solid", "Information from previous files is used (solid flag)")
# The 3 following lines are what blocks more staticity
yield Enum(Bits(self, "dictionary_size", 3, "Dictionary size"), DICTIONARY_SIZE)
for bit in commonFlags(self):
yield bit
yield Bit(self, "is_large", "file64 operations needed")
yield Bit(self, "is_unicode", "Filename also encoded using Unicode")
yield Bit(self, "has_salt", "Has salt for encryption")
yield Bit(self, "uses_file_version", "File versioning is used")
yield Bit(self, "has_ext_time", "Extra time ??")
yield Bit(self, "has_ext_flags", "Extra flag ??")
def fileFlags(s):
yield FileFlags(s, "flags", "File block flags")
class ExtTime(FieldSet):
def createFields(self):
yield textHandler(UInt16(self, "time_flags", "Flags for extended time"), hexadecimal)
flags = self["time_flags"].value
for index in xrange(4):
rmode = flags >> ((3-index)*4)
if rmode & 8:
if index:
yield TimeDateMSDOS32(self, "dos_time[]", "DOS Time")
if rmode & 3:
yield RawBytes(self, "remainder[]", rmode & 3, "Time remainder")
def specialHeader(s, is_file):
yield filesizeHandler(UInt32(s, "compressed_size", "Compressed size (bytes)"))
yield filesizeHandler(UInt32(s, "uncompressed_size", "Uncompressed size (bytes)"))
yield Enum(UInt8(s, "host_os", "Operating system used for archiving"), OS_NAME)
yield textHandler(UInt32(s, "crc32", "File CRC32"), hexadecimal)
yield TimeDateMSDOS32(s, "ftime", "Date and time (MS DOS format)")
yield textHandler(UInt8(s, "version", "RAR version needed to extract file"), formatRARVersion)
yield Enum(UInt8(s, "method", "Packing method"), COMPRESSION_NAME)
yield filesizeHandler(UInt16(s, "filename_length", "File name size"))
if s["host_os"].value in (OS_MSDOS, OS_WIN32):
yield MSDOSFileAttr32(s, "file_attr", "File attributes")
else:
yield textHandler(UInt32(s, "file_attr", "File attributes"), hexadecimal)
# Start additional field from unrar
if s["flags/is_large"].value:
yield filesizeHandler(UInt64(s, "large_size", "Extended 64bits filesize"))
# End additional field
size = s["filename_length"].value
if size > 0:
if s["flags/is_unicode"].value:
charset = "UTF-8"
else:
charset = "ISO-8859-15"
yield String(s, "filename", size, "Filename", charset=charset)
# Start additional fields from unrar - file only
if is_file:
if s["flags/has_salt"].value:
yield textHandler(UInt8(s, "salt", "Salt"), hexadecimal)
if s["flags/has_ext_time"].value:
yield ExtTime(s, "extra_time", "Extra time info")
def fileHeader(s):
return specialHeader(s, True)
def fileBody(s):
# File compressed data
size = s["compressed_size"].value
if s["flags/is_large"].value:
size += s["large_size"].value
if size > 0:
yield RawBytes(s, "compressed_data", size, "File compressed data")
def fileDescription(s):
return "File entry: %s (%s)" % \
(s["filename"].display, s["compressed_size"].display)
def newSubHeader(s):
return specialHeader(s, False)
class EndFlags(StaticFieldSet):
format = (
(Bit, "has_next_vol", "Whether there is another next volume"),
(Bit, "has_data_crc", "Whether a CRC value is present"),
(Bit, "rev_space"),
(Bit, "has_vol_number", "Whether the volume number is present"),
(Bits, "unused[]", 4),
(Bit, "has_added_size", "Additional field indicating additional size"),
(Bit, "is_ignorable", "Old versions of RAR should ignore this block when copying data"),
(Bits, "unused[]", 6),
)
def endFlags(s):
yield EndFlags(s, "flags", "End block flags")
class BlockFlags(FieldSet):
static_size = 16
def createFields(self):
yield textHandler(Bits(self, "unused[]", 8, "Unused flag bits"), hexadecimal)
yield Bit(self, "has_added_size", "Additional field indicating additional size")
yield Bit(self, "is_ignorable", "Old versions of RAR should ignore this block when copying data")
yield Bits(self, "unused[]", 6)
class Block(FieldSet):
BLOCK_INFO = {
# None means 'use default function'
0x72: ("marker", "Archive header", None, None, None),
0x73: ("archive_start", "Archive info", archiveFlags, archiveHeader, None),
0x74: ("file[]", fileDescription, fileFlags, fileHeader, fileBody),
0x75: ("comment[]", "Stray comment", None, commentHeader, commentBody),
0x76: ("av_info[]", "Extra information", None, avInfoHeader, avInfoBody),
0x77: ("sub_block[]", "Stray subblock", None, newSubHeader, fileBody),
0x78: ("recovery[]", "Recovery block", None, recoveryHeader, None),
0x79: ("signature", "Signature block", None, signatureHeader, None),
0x7A: ("new_sub_block[]", "Stray new-format subblock", fileFlags,
newSubHeader, fileBody),
0x7B: ("archive_end", "Archive end block", endFlags, None, None),
}
def __init__(self, parent, name):
FieldSet.__init__(self, parent, name)
t = self["block_type"].value
if t in self.BLOCK_INFO:
self._name, desc, parseFlags, parseHeader, parseBody = self.BLOCK_INFO[t]
if callable(desc):
self.createDescription = lambda: desc(self)
elif desc:
self._description = desc
if parseFlags : self.parseFlags = lambda: parseFlags(self)
if parseHeader : self.parseHeader = lambda: parseHeader(self)
if parseBody : self.parseBody = lambda: parseBody(self)
else:
self.info("Processing as unknown block block of type %u" % type)
self._size = 8*self["block_size"].value
if t == 0x74 or t == 0x7A:
self._size += 8*self["compressed_size"].value
if "is_large" in self["flags"] and self["flags/is_large"].value:
self._size += 8*self["large_size"].value
elif "has_added_size" in self:
self._size += 8*self["added_size"].value
# TODO: check if any other member is needed here
def createFields(self):
yield textHandler(UInt16(self, "crc16", "Block CRC16"), hexadecimal)
yield textHandler(UInt8(self, "block_type", "Block type"), hexadecimal)
# Parse flags
for field in self.parseFlags():
yield field
# Get block size
yield filesizeHandler(UInt16(self, "block_size", "Block size"))
# Parse remaining header
for field in self.parseHeader():
yield field
# Finish header with stuff of unknow size
size = self["block_size"].value - (self.current_size//8)
if size > 0:
yield RawBytes(self, "unknown", size, "Unknow data (UInt32 probably)")
# Parse body
for field in self.parseBody():
yield field
def createDescription(self):
return "Block entry: %s" % self["type"].display
def parseFlags(self):
yield BlockFlags(self, "flags", "Block header flags")
def parseHeader(self):
if "has_added_size" in self["flags"] and \
self["flags/has_added_size"].value:
yield filesizeHandler(UInt32(self, "added_size",
"Supplementary block size"))
def parseBody(self):
"""
Parse what is left of the block
"""
size = self["block_size"].value - (self.current_size//8)
if "has_added_size" in self["flags"] and self["flags/has_added_size"].value:
size += self["added_size"].value
if size > 0:
yield RawBytes(self, "body", size, "Body data")
class RarFile(Parser):
MAGIC = "Rar!\x1A\x07\x00"
PARSER_TAGS = {
"id": "rar",
"category": "archive",
"file_ext": ("rar",),
"mime": (u"application/x-rar-compressed", ),
"min_size": 7*8,
"magic": ((MAGIC, 0),),
"description": "Roshal archive (RAR)",
}
endian = LITTLE_ENDIAN
def validate(self):
magic = self.MAGIC
if self.stream.readBytes(0, len(magic)) != magic:
return "Invalid magic"
return True
def createFields(self):
while not self.eof:
yield Block(self, "block[]")
def createContentSize(self):
start = 0
end = MAX_FILESIZE * 8
pos = self.stream.searchBytes("\xC4\x3D\x7B\x00\x40\x07\x00", start, end)
if pos is not None:
return pos + 7*8
return None
| 13,384 | Python | .py | 303 | 37.023102 | 106 | 0.637096 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,551 | mar.py | midgetspy_Sick-Beard/lib/hachoir_parser/archive/mar.py | """
Microsoft Archive parser
Author: Victor Stinner
Creation date: 2007-03-04
"""
MAX_NB_FILE = 100000
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import FieldSet, String, UInt32, SubFile
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal
class FileIndex(FieldSet):
static_size = 68*8
def createFields(self):
yield String(self, "filename", 56, truncate="\0", charset="ASCII")
yield filesizeHandler(UInt32(self, "filesize"))
yield textHandler(UInt32(self, "crc32"), hexadecimal)
yield UInt32(self, "offset")
def createDescription(self):
return "File %s (%s) at %s" % (
self["filename"].value, self["filesize"].display, self["offset"].value)
class MarFile(Parser):
MAGIC = "MARC"
PARSER_TAGS = {
"id": "mar",
"category": "archive",
"file_ext": ("mar",),
"min_size": 80*8, # At least one file index
"magic": ((MAGIC, 0),),
"description": "Microsoft Archive",
}
endian = LITTLE_ENDIAN
def validate(self):
if self.stream.readBytes(0, 4) != self.MAGIC:
return "Invalid magic"
if self["version"].value != 3:
return "Invalid version"
if not(1 <= self["nb_file"].value <= MAX_NB_FILE):
return "Invalid number of file"
return True
def createFields(self):
yield String(self, "magic", 4, "File signature (MARC)", charset="ASCII")
yield UInt32(self, "version")
yield UInt32(self, "nb_file")
files = []
for index in xrange(self["nb_file"].value):
item = FileIndex(self, "file[]")
yield item
if item["filesize"].value:
files.append(item)
files.sort(key=lambda item: item["offset"].value)
for index in files:
padding = self.seekByte(index["offset"].value)
if padding:
yield padding
size = index["filesize"].value
desc = "File %s" % index["filename"].value
yield SubFile(self, "data[]", size, desc, filename=index["filename"].value)
| 2,220 | Python | .py | 57 | 30.964912 | 87 | 0.609847 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,552 | zip.py | midgetspy_Sick-Beard/lib/hachoir_parser/archive/zip.py | """
Zip splitter.
Status: can read most important headers
Authors: Christophe Gisquet and Victor Stinner
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
Bit, Bits, Enum,
TimeDateMSDOS32, SubFile,
UInt8, UInt16, UInt32, UInt64,
String, PascalString16,
RawBytes)
from lib.hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal
from lib.hachoir_core.error import HACHOIR_ERRORS
from lib.hachoir_core.tools import makeUnicode
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_parser.common.deflate import Deflate
MAX_FILESIZE = 1000 * 1024 * 1024
COMPRESSION_DEFLATE = 8
COMPRESSION_METHOD = {
0: u"no compression",
1: u"Shrunk",
2: u"Reduced (factor 1)",
3: u"Reduced (factor 2)",
4: u"Reduced (factor 3)",
5: u"Reduced (factor 4)",
6: u"Imploded",
7: u"Tokenizing",
8: u"Deflate",
9: u"Deflate64",
10: u"PKWARE Imploding",
11: u"Reserved by PKWARE",
12: u"File is compressed using BZIP2 algorithm",
13: u"Reserved by PKWARE",
14: u"LZMA (EFS)",
15: u"Reserved by PKWARE",
16: u"Reserved by PKWARE",
17: u"Reserved by PKWARE",
18: u"File is compressed using IBM TERSE (new)",
19: u"IBM LZ77 z Architecture (PFS)",
98: u"PPMd version I, Rev 1",
}
def ZipRevision(field):
return "%u.%u" % divmod(field.value, 10)
class ZipVersion(FieldSet):
static_size = 16
HOST_OS = {
0: u"FAT file system (DOS, OS/2, NT)",
1: u"Amiga",
2: u"VMS (VAX or Alpha AXP)",
3: u"Unix",
4: u"VM/CMS",
5: u"Atari",
6: u"HPFS file system (OS/2, NT 3.x)",
7: u"Macintosh",
8: u"Z-System",
9: u"CP/M",
10: u"TOPS-20",
11: u"NTFS file system (NT)",
12: u"SMS/QDOS",
13: u"Acorn RISC OS",
14: u"VFAT file system (Win95, NT)",
15: u"MVS",
16: u"BeOS (BeBox or PowerMac)",
17: u"Tandem",
}
def createFields(self):
yield textHandler(UInt8(self, "zip_version", "ZIP version"), ZipRevision)
yield Enum(UInt8(self, "host_os", "ZIP Host OS"), self.HOST_OS)
class ZipGeneralFlags(FieldSet):
static_size = 16
def createFields(self):
# Need the compression info from the parent, and that is the byte following
method = self.stream.readBits(self.absolute_address+16, 16, LITTLE_ENDIAN)
yield Bits(self, "unused[]", 2, "Unused")
yield Bit(self, "encrypted_central_dir", "Selected data values in the Local Header are masked")
yield Bit(self, "incomplete", "Reserved by PKWARE for enhanced compression.")
yield Bit(self, "uses_unicode", "Filename and comments are in UTF-8")
yield Bits(self, "unused[]", 4, "Unused")
yield Bit(self, "strong_encrypt", "Strong encryption (version >= 50)")
yield Bit(self, "is_patched", "File is compressed with patched data?")
yield Bit(self, "enhanced_deflate", "Reserved for use with method 8")
yield Bit(self, "has_descriptor",
"Compressed data followed by descriptor?")
if method == 6:
yield Bit(self, "use_8k_sliding", "Use 8K sliding dictionary (instead of 4K)")
yield Bit(self, "use_3shannon", "Use a 3 Shannon-Fano tree (instead of 2 Shannon-Fano)")
elif method in (8, 9):
NAME = {
0: "Normal compression",
1: "Maximum compression",
2: "Fast compression",
3: "Super Fast compression"
}
yield Enum(Bits(self, "method", 2), NAME)
elif method == 14: #LZMA
yield Bit(self, "lzma_eos", "LZMA stream is ended with a EndOfStream marker")
yield Bit(self, "unused[]")
else:
yield Bits(self, "compression_info", 2)
yield Bit(self, "is_encrypted", "File is encrypted?")
class ExtraField(FieldSet):
EXTRA_FIELD_ID = {
0x0007: "AV Info",
0x0009: "OS/2 extended attributes (also Info-ZIP)",
0x000a: "PKWARE Win95/WinNT FileTimes", # undocumented!
0x000c: "PKWARE VAX/VMS (also Info-ZIP)",
0x000d: "PKWARE Unix",
0x000f: "Patch Descriptor",
0x07c8: "Info-ZIP Macintosh (old, J. Lee)",
0x2605: "ZipIt Macintosh (first version)",
0x2705: "ZipIt Macintosh v 1.3.5 and newer (w/o full filename)",
0x334d: "Info-ZIP Macintosh (new, D. Haase Mac3 field)",
0x4341: "Acorn/SparkFS (David Pilling)",
0x4453: "Windows NT security descriptor (binary ACL)",
0x4704: "VM/CMS",
0x470f: "MVS",
0x4b46: "FWKCS MD5 (third party, see below)",
0x4c41: "OS/2 access control list (text ACL)",
0x4d49: "Info-ZIP VMS (VAX or Alpha)",
0x5356: "AOS/VS (binary ACL)",
0x5455: "extended timestamp",
0x5855: "Info-ZIP Unix (original; also OS/2, NT, etc.)",
0x6542: "BeOS (BeBox, PowerMac, etc.)",
0x756e: "ASi Unix",
0x7855: "Info-ZIP Unix (new)",
0xfb4a: "SMS/QDOS",
}
def createFields(self):
yield Enum(UInt16(self, "field_id", "Extra field ID"),
self.EXTRA_FIELD_ID)
size = UInt16(self, "field_data_size", "Extra field data size")
yield size
if size.value > 0:
yield RawBytes(self, "field_data", size, "Unknown field data")
def ZipStartCommonFields(self):
yield ZipVersion(self, "version_needed", "Version needed")
yield ZipGeneralFlags(self, "flags", "General purpose flag")
yield Enum(UInt16(self, "compression", "Compression method"),
COMPRESSION_METHOD)
yield TimeDateMSDOS32(self, "last_mod", "Last modification file time")
yield textHandler(UInt32(self, "crc32", "CRC-32"), hexadecimal)
yield UInt32(self, "compressed_size", "Compressed size")
yield UInt32(self, "uncompressed_size", "Uncompressed size")
yield UInt16(self, "filename_length", "Filename length")
yield UInt16(self, "extra_length", "Extra fields length")
def zipGetCharset(self):
if self["flags/uses_unicode"].value:
return "UTF-8"
else:
return "ISO-8859-15"
class ZipCentralDirectory(FieldSet):
HEADER = 0x02014b50
def createFields(self):
yield ZipVersion(self, "version_made_by", "Version made by")
for field in ZipStartCommonFields(self):
yield field
# Check unicode status
charset = zipGetCharset(self)
yield UInt16(self, "comment_length", "Comment length")
yield UInt16(self, "disk_number_start", "Disk number start")
yield UInt16(self, "internal_attr", "Internal file attributes")
yield UInt32(self, "external_attr", "External file attributes")
yield UInt32(self, "offset_header", "Relative offset of local header")
yield String(self, "filename", self["filename_length"].value,
"Filename", charset=charset)
if 0 < self["extra_length"].value:
yield RawBytes(self, "extra", self["extra_length"].value,
"Extra fields")
if 0 < self["comment_length"].value:
yield String(self, "comment", self["comment_length"].value,
"Comment", charset=charset)
def createDescription(self):
return "Central directory: %s" % self["filename"].display
class Zip64EndCentralDirectory(FieldSet):
HEADER = 0x06064b50
def createFields(self):
yield UInt64(self, "zip64_end_size",
"Size of zip64 end of central directory record")
yield ZipVersion(self, "version_made_by", "Version made by")
yield ZipVersion(self, "version_needed", "Version needed to extract")
yield UInt32(self, "number_disk", "Number of this disk")
yield UInt32(self, "number_disk2",
"Number of the disk with the start of the central directory")
yield UInt64(self, "number_entries",
"Total number of entries in the central directory on this disk")
yield UInt64(self, "number_entries2",
"Total number of entries in the central directory")
yield UInt64(self, "size", "Size of the central directory")
yield UInt64(self, "offset", "Offset of start of central directory")
if 0 < self["zip64_end_size"].value:
yield RawBytes(self, "data_sector", self["zip64_end_size"].value,
"zip64 extensible data sector")
class ZipEndCentralDirectory(FieldSet):
HEADER = 0x06054b50
def createFields(self):
yield UInt16(self, "number_disk", "Number of this disk")
yield UInt16(self, "number_disk2", "Number in the central dir")
yield UInt16(self, "total_number_disk",
"Total number of entries in this disk")
yield UInt16(self, "total_number_disk2",
"Total number of entries in the central dir")
yield UInt32(self, "size", "Size of the central directory")
yield UInt32(self, "offset", "Offset of start of central directory")
yield PascalString16(self, "comment", "ZIP comment")
class ZipDataDescriptor(FieldSet):
HEADER_STRING = "\x50\x4B\x07\x08"
HEADER = 0x08074B50
static_size = 96
def createFields(self):
yield textHandler(UInt32(self, "file_crc32",
"Checksum (CRC32)"), hexadecimal)
yield filesizeHandler(UInt32(self, "file_compressed_size",
"Compressed size (bytes)"))
yield filesizeHandler(UInt32(self, "file_uncompressed_size",
"Uncompressed size (bytes)"))
class FileEntry(FieldSet):
HEADER = 0x04034B50
filename = None
def data(self, size):
compression = self["compression"].value
if compression == 0:
return SubFile(self, "data", size, filename=self.filename)
compressed = SubFile(self, "compressed_data", size, filename=self.filename)
if compression == COMPRESSION_DEFLATE:
return Deflate(compressed)
else:
return compressed
def resync(self):
# Non-seekable output, search the next data descriptor
size = self.stream.searchBytesLength(ZipDataDescriptor.HEADER_STRING, False,
self.absolute_address+self.current_size)
if size <= 0:
raise ParserError("Couldn't resync to %s" %
ZipDataDescriptor.HEADER_STRING)
yield self.data(size)
yield textHandler(UInt32(self, "header[]", "Header"), hexadecimal)
data_desc = ZipDataDescriptor(self, "data_desc", "Data descriptor")
#self.info("Resynced!")
yield data_desc
# The above could be checked anytime, but we prefer trying parsing
# than aborting
if self["crc32"].value == 0 and \
data_desc["file_compressed_size"].value != size:
raise ParserError("Bad resync: position=>%i but data_desc=>%i" %
(size, data_desc["file_compressed_size"].value))
def createFields(self):
for field in ZipStartCommonFields(self):
yield field
length = self["filename_length"].value
if length:
filename = String(self, "filename", length, "Filename",
charset=zipGetCharset(self))
yield filename
self.filename = filename.value
if self["extra_length"].value:
yield RawBytes(self, "extra", self["extra_length"].value, "Extra")
size = self["compressed_size"].value
if size > 0:
yield self.data(size)
elif self["flags/incomplete"].value:
for field in self.resync():
yield field
if self["flags/has_descriptor"].value:
yield ZipDataDescriptor(self, "data_desc", "Data descriptor")
def createDescription(self):
return "File entry: %s (%s)" % \
(self["filename"].value, self["compressed_size"].display)
def validate(self):
if self["compression"].value not in COMPRESSION_METHOD:
return "Unknown compression method (%u)" % self["compression"].value
return ""
class ZipSignature(FieldSet):
HEADER = 0x05054B50
def createFields(self):
yield PascalString16(self, "signature", "Signature")
class Zip64EndCentralDirectoryLocator(FieldSet):
HEADER = 0x07064b50
def createFields(self):
yield UInt32(self, "disk_number", \
"Number of the disk with the start of the zip64 end of central directory")
yield UInt64(self, "relative_offset", \
"Relative offset of the zip64 end of central directory record")
yield UInt32(self, "disk_total_number", "Total number of disks")
class ZipFile(Parser):
endian = LITTLE_ENDIAN
MIME_TYPES = {
# Default ZIP archive
u"application/zip": "zip",
u"application/x-zip": "zip",
# Java archive (JAR)
u"application/x-jar": "jar",
u"application/java-archive": "jar",
# OpenOffice 1.0
u"application/vnd.sun.xml.calc": "sxc",
u"application/vnd.sun.xml.draw": "sxd",
u"application/vnd.sun.xml.impress": "sxi",
u"application/vnd.sun.xml.writer": "sxw",
u"application/vnd.sun.xml.math": "sxm",
# OpenOffice 1.0 (template)
u"application/vnd.sun.xml.calc.template": "stc",
u"application/vnd.sun.xml.draw.template": "std",
u"application/vnd.sun.xml.impress.template": "sti",
u"application/vnd.sun.xml.writer.template": "stw",
u"application/vnd.sun.xml.writer.global": "sxg",
# OpenDocument
u"application/vnd.oasis.opendocument.chart": "odc",
u"application/vnd.oasis.opendocument.image": "odi",
u"application/vnd.oasis.opendocument.database": "odb",
u"application/vnd.oasis.opendocument.formula": "odf",
u"application/vnd.oasis.opendocument.graphics": "odg",
u"application/vnd.oasis.opendocument.presentation": "odp",
u"application/vnd.oasis.opendocument.spreadsheet": "ods",
u"application/vnd.oasis.opendocument.text": "odt",
u"application/vnd.oasis.opendocument.text-master": "odm",
# OpenDocument (template)
u"application/vnd.oasis.opendocument.graphics-template": "otg",
u"application/vnd.oasis.opendocument.presentation-template": "otp",
u"application/vnd.oasis.opendocument.spreadsheet-template": "ots",
u"application/vnd.oasis.opendocument.text-template": "ott",
}
PARSER_TAGS = {
"id": "zip",
"category": "archive",
"file_ext": tuple(MIME_TYPES.itervalues()),
"mime": tuple(MIME_TYPES.iterkeys()),
"magic": (("PK\3\4", 0),),
"subfile": "skip",
"min_size": (4 + 26)*8, # header + file entry
"description": "ZIP archive"
}
def validate(self):
if self["header[0]"].value != FileEntry.HEADER:
return "Invalid magic"
try:
file0 = self["file[0]"]
except HACHOIR_ERRORS, err:
return "Unable to get file #0"
err = file0.validate()
if err:
return "File #0: %s" % err
return True
def createFields(self):
# File data
self.signature = None
self.central_directory = []
while not self.eof:
header = textHandler(UInt32(self, "header[]", "Header"), hexadecimal)
yield header
header = header.value
if header == FileEntry.HEADER:
yield FileEntry(self, "file[]")
elif header == ZipDataDescriptor.HEADER:
yield ZipDataDescriptor(self, "spanning[]")
elif header == 0x30304b50:
yield ZipDataDescriptor(self, "temporary_spanning[]")
elif header == ZipCentralDirectory.HEADER:
yield ZipCentralDirectory(self, "central_directory[]")
elif header == ZipEndCentralDirectory.HEADER:
yield ZipEndCentralDirectory(self, "end_central_directory", "End of central directory")
elif header == Zip64EndCentralDirectory.HEADER:
yield Zip64EndCentralDirectory(self, "end64_central_directory", "ZIP64 end of central directory")
elif header == ZipSignature.HEADER:
yield ZipSignature(self, "signature", "Signature")
elif header == Zip64EndCentralDirectoryLocator.HEADER:
yield Zip64EndCentralDirectoryLocator(self, "end_locator", "ZIP64 Enf of central directory locator")
else:
raise ParserError("Error, unknown ZIP header (0x%08X)." % header)
def createMimeType(self):
if self["file[0]/filename"].value == "mimetype":
return makeUnicode(self["file[0]/data"].value)
else:
return u"application/zip"
def createFilenameSuffix(self):
if self["file[0]/filename"].value == "mimetype":
mime = self["file[0]/compressed_data"].value
if mime in self.MIME_TYPES:
return "." + self.MIME_TYPES[mime]
return ".zip"
def createContentSize(self):
start = 0
end = MAX_FILESIZE * 8
end = self.stream.searchBytes("PK\5\6", start, end)
if end is not None:
return end + 22*8
return None
| 17,522 | Python | .py | 386 | 36.031088 | 116 | 0.618836 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,553 | __init__.py | midgetspy_Sick-Beard/lib/hachoir_parser/archive/__init__.py | from lib.hachoir_parser.archive.ace import AceFile
from lib.hachoir_parser.archive.ar import ArchiveFile
from lib.hachoir_parser.archive.bzip2_parser import Bzip2Parser
from lib.hachoir_parser.archive.cab import CabFile
from lib.hachoir_parser.archive.gzip_parser import GzipParser
from lib.hachoir_parser.archive.tar import TarFile
from lib.hachoir_parser.archive.zip import ZipFile
from lib.hachoir_parser.archive.rar import RarFile
from lib.hachoir_parser.archive.rpm import RpmFile
from lib.hachoir_parser.archive.sevenzip import SevenZipParser
from lib.hachoir_parser.archive.mar import MarFile
| 601 | Python | .py | 11 | 53.545455 | 63 | 0.865874 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,554 | rpm.py | midgetspy_Sick-Beard/lib/hachoir_parser/archive/rpm.py | """
RPM archive parser.
Author: Victor Stinner, 1st December 2005.
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
UInt8, UInt16, UInt32, UInt64, Enum,
NullBytes, Bytes, RawBytes, SubFile,
Character, CString, String)
from lib.hachoir_core.endian import BIG_ENDIAN
from lib.hachoir_parser.archive.gzip_parser import GzipParser
from lib.hachoir_parser.archive.bzip2_parser import Bzip2Parser
class ItemContent(FieldSet):
format_type = {
0: UInt8,
1: Character,
2: UInt8,
3: UInt16,
4: UInt32,
5: UInt64,
6: CString,
7: RawBytes,
8: CString,
9: CString
}
def __init__(self, parent, name, item):
FieldSet.__init__(self, parent, name, item.description)
self.related_item = item
self._name = "content_%s" % item.name
def createFields(self):
item = self.related_item
type = item["type"].value
cls = self.format_type[type]
count = item["count"].value
if cls is RawBytes: # or type == 8:
if cls is RawBytes:
args = (self, "value", count)
else:
args = (self, "value") # cls is CString
count = 1
else:
if 1 < count:
args = (self, "value[]")
else:
args = (self, "value")
for index in xrange(count):
yield cls(*args)
class Item(FieldSet):
type_name = {
0: "NULL",
1: "CHAR",
2: "INT8",
3: "INT16",
4: "INT32",
5: "INT64",
6: "CSTRING",
7: "BIN",
8: "CSTRING_ARRAY",
9: "CSTRING?"
}
tag_name = {
1000: "File size",
1001: "(Broken) MD5 signature",
1002: "PGP 2.6.3 signature",
1003: "(Broken) MD5 signature",
1004: "MD5 signature",
1005: "GnuPG signature",
1006: "PGP5 signature",
1007: "Uncompressed payload size (bytes)",
256+8: "Broken SHA1 header digest",
256+9: "Broken SHA1 header digest",
256+13: "Broken SHA1 header digest",
256+11: "DSA header signature",
256+12: "RSA header signature"
}
def __init__(self, parent, name, description=None, tag_name_dict=None):
FieldSet.__init__(self, parent, name, description)
if tag_name_dict is None:
tag_name_dict = Item.tag_name
self.tag_name_dict = tag_name_dict
def createFields(self):
yield Enum(UInt32(self, "tag", "Tag"), self.tag_name_dict)
yield Enum(UInt32(self, "type", "Type"), Item.type_name)
yield UInt32(self, "offset", "Offset")
yield UInt32(self, "count", "Count")
def createDescription(self):
return "Item: %s (%s)" % (self["tag"].display, self["type"].display)
class ItemHeader(Item):
tag_name = {
61: "Current image",
62: "Signatures",
63: "Immutable",
64: "Regions",
100: "I18N string locales",
1000: "Name",
1001: "Version",
1002: "Release",
1003: "Epoch",
1004: "Summary",
1005: "Description",
1006: "Build time",
1007: "Build host",
1008: "Install time",
1009: "Size",
1010: "Distribution",
1011: "Vendor",
1012: "Gif",
1013: "Xpm",
1014: "Licence",
1015: "Packager",
1016: "Group",
1017: "Changelog",
1018: "Source",
1019: "Patch",
1020: "Url",
1021: "OS",
1022: "Arch",
1023: "Prein",
1024: "Postin",
1025: "Preun",
1026: "Postun",
1027: "Old filenames",
1028: "File sizes",
1029: "File states",
1030: "File modes",
1031: "File uids",
1032: "File gids",
1033: "File rdevs",
1034: "File mtimes",
1035: "File MD5s",
1036: "File link to's",
1037: "File flags",
1038: "Root",
1039: "File username",
1040: "File groupname",
1043: "Icon",
1044: "Source rpm",
1045: "File verify flags",
1046: "Archive size",
1047: "Provide name",
1048: "Require flags",
1049: "Require name",
1050: "Require version",
1051: "No source",
1052: "No patch",
1053: "Conflict flags",
1054: "Conflict name",
1055: "Conflict version",
1056: "Default prefix",
1057: "Build root",
1058: "Install prefix",
1059: "Exclude arch",
1060: "Exclude OS",
1061: "Exclusive arch",
1062: "Exclusive OS",
1064: "RPM version",
1065: "Trigger scripts",
1066: "Trigger name",
1067: "Trigger version",
1068: "Trigger flags",
1069: "Trigger index",
1079: "Verify script",
#TODO: Finish the list (id 1070..1162 using rpm library source code)
}
def __init__(self, parent, name, description=None):
Item.__init__(self, parent, name, description, self.tag_name)
def sortRpmItem(a,b):
return int( a["offset"].value - b["offset"].value )
class PropertySet(FieldSet):
def __init__(self, parent, name, *args):
FieldSet.__init__(self, parent, name, *args)
self._size = self["content_item[1]"].address + self["size"].value * 8
def createFields(self):
# Read chunk header
yield Bytes(self, "signature", 3, r"Property signature (\x8E\xAD\xE8)")
if self["signature"].value != "\x8E\xAD\xE8":
raise ParserError("Invalid property signature")
yield UInt8(self, "version", "Signature version")
yield NullBytes(self, "reserved", 4, "Reserved")
yield UInt32(self, "count", "Count")
yield UInt32(self, "size", "Size")
# Read item header
items = []
for i in range(0, self["count"].value):
item = ItemHeader(self, "item[]")
yield item
items.append(item)
# Sort items by their offset
items.sort( sortRpmItem )
# Read item content
start = self.current_size/8
for item in items:
offset = item["offset"].value
diff = offset - (self.current_size/8 - start)
if 0 < diff:
yield NullBytes(self, "padding[]", diff)
yield ItemContent(self, "content[]", item)
size = start + self["size"].value - self.current_size/8
if 0 < size:
yield NullBytes(self, "padding[]", size)
class RpmFile(Parser):
PARSER_TAGS = {
"id": "rpm",
"category": "archive",
"file_ext": ("rpm",),
"mime": (u"application/x-rpm",),
"min_size": (96 + 16 + 16)*8, # file header + checksum + content header
"magic": (('\xED\xAB\xEE\xDB', 0),),
"description": "RPM package"
}
TYPE_NAME = {
0: "Binary",
1: "Source"
}
endian = BIG_ENDIAN
def validate(self):
if self["signature"].value != '\xED\xAB\xEE\xDB':
return "Invalid signature"
if self["major_ver"].value != 3:
return "Unknown major version (%u)" % self["major_ver"].value
if self["type"].value not in self.TYPE_NAME:
return "Invalid RPM type"
return True
def createFields(self):
yield Bytes(self, "signature", 4, r"RPM file signature (\xED\xAB\xEE\xDB)")
yield UInt8(self, "major_ver", "Major version")
yield UInt8(self, "minor_ver", "Minor version")
yield Enum(UInt16(self, "type", "RPM type"), RpmFile.TYPE_NAME)
yield UInt16(self, "architecture", "Architecture")
yield String(self, "name", 66, "Archive name", strip="\0", charset="ASCII")
yield UInt16(self, "os", "OS")
yield UInt16(self, "signature_type", "Type of signature")
yield NullBytes(self, "reserved", 16, "Reserved")
yield PropertySet(self, "checksum", "Checksum (signature)")
yield PropertySet(self, "header", "Header")
if self._size is None: # TODO: is it possible to handle piped input?
raise NotImplementedError
size = (self._size - self.current_size) // 8
if size:
if 3 <= size and self.stream.readBytes(self.current_size, 3) == "BZh":
yield SubFile(self, "content", size, "bzip2 content", parser=Bzip2Parser)
else:
yield SubFile(self, "content", size, "gzip content", parser=GzipParser)
| 8,588 | Python | .py | 243 | 26.588477 | 89 | 0.555582 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,555 | __init__.py | midgetspy_Sick-Beard/lib/httplib2/__init__.py | from __future__ import generators
"""
httplib2
A caching http interface that supports ETags and gzip
to conserve bandwidth.
Requires Python 2.3 or later
Changelog:
2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
"""
__author__ = "Joe Gregorio ([email protected])"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = ["Thomas Broyer ([email protected])",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger"]
__license__ = "MIT"
__version__ = "$Rev$"
import re
import sys
import email
import email.Utils
import email.Message
import email.FeedParser
import StringIO
import gzip
import zlib
import httplib
import urlparse
import base64
import os
import copy
import calendar
import time
import random
import errno
# remove depracated warning in python2.6
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
import sha
import md5
_sha = sha.new
_md5 = md5.new
import hmac
from gettext import gettext as _
import socket
# Try using local version, followed by system, and none if neither are found
try:
import lib.socks as socks
except ImportError:
try:
import socks as socks
except ImportError:
socks = None
# Build the appropriate socket wrapper for ssl
try:
import ssl # python 2.6
ssl_SSLError = ssl.SSLError
_ssl_wrap_socket = ssl.wrap_socket
except ImportError:
def _ssl_wrap_socket(sock, key_file, cert_file):
ssl_sock = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl_sock)
if sys.version_info >= (2,3):
from iri2uri import iri2uri
else:
def iri2uri(uri):
return uri
def has_timeout(timeout): # python 2.6
if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
return (timeout is not None)
__all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error',
'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent',
'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError',
'debuglevel']
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# Python 2.3 support
if sys.version_info < (2,4):
def sorted(seq):
seq.sort()
return seq
# Python 2.3 support
def HTTPResponse__getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise httplib.ResponseNotReady()
return self.msg.items()
if not hasattr(httplib.HTTPResponse, 'getheaders'):
httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception): pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
class RedirectLimit(HttpLib2ErrorWithResponse): pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class RelativeURIError(HttpLib2Error): pass
class ServerNotFoundError(HttpLib2Error): pass
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
return [header for header in response.keys() if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(r'^\w+://')
re_slash = re.compile(r'[?/:|]+')
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if re_url_scheme.match(filename):
if isinstance(filename,str):
filename = filename.decode('utf-8')
filename = filename.encode('idna')
else:
filename = filename.encode('idna')
except UnicodeError:
pass
if isinstance(filename,unicode):
filename=filename.encode('utf-8')
filemd5 = _md5(filename).hexdigest()
filename = re_url_scheme.sub("", filename)
filename = re_slash.sub(",", filename)
# limit length of filename
if len(filename)>200:
filename=filename[:200]
return ",".join((filename, filemd5))
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
def _parse_cache_control(headers):
retval = {}
if headers.has_key('cache-control'):
parts = headers['cache-control'].split(',')
parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")]
parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$")
UNQUOTE_PAIRS = re.compile(r'\\(.)')
def _parse_www_authenticate(headers, headername='www-authenticate'):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headers.has_key(headername):
authenticate = headers[headername].strip()
www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
while authenticate:
# Break off the scheme at the beginning of the line
if headername == 'authentication-info':
(auth_scheme, the_rest) = ('digest', authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
return retval
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1:
retval = "TRANSPARENT"
if 'cache-control' not in request_headers:
request_headers['cache-control'] = 'no-cache'
elif cc.has_key('no-cache'):
retval = "TRANSPARENT"
elif cc_response.has_key('no-cache'):
retval = "STALE"
elif cc.has_key('only-if-cached'):
retval = "FRESH"
elif response_headers.has_key('date'):
date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date']))
now = time.time()
current_age = max(0, now - date)
if cc_response.has_key('max-age'):
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif response_headers.has_key('expires'):
expires = email.Utils.parsedate_tz(response_headers['expires'])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if cc.has_key('max-age'):
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if cc.has_key('min-fresh'):
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get('content-encoding', None)
if encoding in ['gzip', 'deflate']:
if encoding == 'gzip':
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == 'deflate':
content = zlib.decompress(content)
response['content-length'] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response['-content-encoding'] = response['content-encoding']
del response['content-encoding']
except IOError:
content = ""
raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if cc.has_key('no-store') or cc_response.has_key('no-store'):
cache.delete(cachekey)
else:
info = email.Message.Message()
for key, value in response_headers.iteritems():
if key not in ['status','content-encoding','transfer-encoding']:
info[key] = value
# Add annotations to the cache to indicate what headers
# are variant for this request.
vary = response_headers.get('vary', None)
if vary:
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
try:
info[key] = request_headers[header]
except KeyError:
pass
status = response_headers.status
if status == 304:
status = 200
status_header = 'status: %d\r\n' % response_headers.status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = "".join([status_header, header_str, content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path):].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-rise this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'Basic ' + base64.b64encode("%s:%s" % self.credentials).strip()
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['digest']
qop = self.challenge.get('qop', 'auth')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
if self.challenge['qop'] is None:
raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper()
if self.challenge['algorithm'] != 'MD5':
raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]])
self.challenge['nc'] = 1
def request(self, method, request_uri, headers, content, cnonce = None):
"""Modify the request headers"""
H = lambda x: _md5(x).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge['cnonce'] = cnonce or _cnonce()
request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (self.challenge['nonce'],
'%08x' % self.challenge['nc'],
self.challenge['cnonce'],
self.challenge['qop'], H(A2)
))
headers['Authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['nonce'],
request_uri,
self.challenge['algorithm'],
request_digest,
self.challenge['qop'],
self.challenge['nc'],
self.challenge['cnonce'],
)
self.challenge['nc'] += 1
def response(self, response, content):
if not response.has_key('authentication-info'):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
return True
else:
updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {})
if updated_challenge.has_key('nextnonce'):
self.challenge['nonce'] = updated_challenge['nextnonce']
self.challenge['nc'] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer ([email protected])"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
if self.challenge['reason'] not in ['unauthorized', 'integrity']:
self.challenge['reason'] = 'unauthorized'
self.challenge['salt'] = self.challenge.get('salt', '')
if not self.challenge.get('snonce'):
raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty."))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1')
if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1')
if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm']))
if self.challenge['algorithm'] == 'HMAC-MD5':
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge['pw-algorithm'] == 'MD5':
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join([self.credentials[0], ":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(),
":", self.challenge['realm']
])
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers['Authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['snonce'],
cnonce,
request_uri,
created,
request_digest,
keylist,
)
def response(self, response, content):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['Authorization'] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
self.credentials[0],
password_digest,
cnonce,
iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == 'xapi' and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
#elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = file(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError, e:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = file(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
def __init__(self, proxy_type, proxy_host, proxy_port, proxy_rdns=None, proxy_user=None, proxy_pass=None):
"""The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX
constants. For example:
p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', proxy_port=8000)
"""
self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass = proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass
def astuple(self):
return (self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns,
self.proxy_user, self.proxy_pass)
def isgood(self):
return socks and (self.proxy_host != None) and (self.proxy_port != None)
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
"""HTTPConnection subclass that supports timeouts"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
# Mostly verbatim from httplib.py.
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if self.proxy_info and self.proxy_info.isgood():
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(*self.proxy_info.astuple())
else:
self.sock = socket.socket(af, socktype, proto)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
self.sock.connect(sa)
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
"This class allows communication via SSL."
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None):
httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file,
cert_file=cert_file, strict=strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"Connect to a host on a given (SSL) port."
if self.proxy_info and self.proxy_info.isgood():
sock = socks.socksocket(socket.AF_INET, socket.SOCK_STREAM)
sock.setproxy(*self.proxy_info.astuple())
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
self.sock =_ssl_wrap_socket(sock, self.key_file, self.cert_file)
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(self, cache=None, timeout=None, proxy_info=None):
"""The value of proxy_info is a ProxyInfo instance.
If 'cache' is a string then it is used as a directory name
for a disk cache. Otherwise it must be an object that supports
the same interface as FileCache."""
self.proxy_info = proxy_info
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, str):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, 'www-authenticate')
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if challenges.has_key(scheme):
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
for i in range(2):
try:
if conn.sock is None:
conn.connect()
conn.request(method, request_uri, body, headers)
except socket.timeout:
raise
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except ssl_SSLError:
conn.close()
raise
except socket.error, e:
err = 0
if hasattr(e, 'args'):
err = getattr(e, 'args')[0]
else:
err = e.errno
if err == errno.ECONNREFUSED: # Connection refused
raise
except httplib.HTTPException:
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
if conn.sock is None:
if i == 0:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
if i == 0:
conn.close()
conn.connect()
continue
try:
response = conn.getresponse()
except (socket.error, httplib.HTTPException):
if i == 0:
conn.close()
conn.connect()
continue
else:
raise
else:
content = ""
if method == "HEAD":
response.close()
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers )
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers, )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if not response.has_key('location') and response.status != 300:
raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if response.has_key('location'):
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urlparse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if headers.has_key('if-none-match'):
del headers['if-none-match']
if headers.has_key('if-modified-since'):
del headers['if-modified-since']
if response.has_key('location'):
location = response['location']
old_response = copy.deepcopy(response)
if not old_response.has_key('content-location'):
old_response['content-location'] = absolute_uri
redirect_method = ((response.status == 303) and (method not in ["GET", "HEAD"])) and "GET" or method
(response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1)
response.previous = old_response
else:
raise RedirectLimit( _("Redirected more times than rediection_limit allows."), response, content)
elif response.status in [200, 203] and method == "GET":
# Don't cache 206's since we aren't going to handle byte range requests
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
def _normalize_headers(self, headers):
return _normalize_headers(headers)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin
with either 'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc.
There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a string
object.
Any extra headers that are to be sent with the request should be provided in the
'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if not headers.has_key('user-agent'):
headers['user-agent'] = "Python-httplib2/%s" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
domain_port = authority.split(":")[0:2]
if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
scheme = 'https'
authority = domain_port[0]
conn_key = scheme+":"+authority
if conn_key in self.connections:
conn = self.connections[conn_key]
else:
if not connection_type:
connection_type = (scheme == 'https') and HTTPSConnectionWithTimeout or HTTPConnectionWithTimeout
certs = list(self.certificates.iter(authority))
if scheme == 'https' and certs:
conn = self.connections[conn_key] = connection_type(authority, key_file=certs[0][0],
cert_file=certs[0][1], timeout=self.timeout, proxy_info=self.proxy_info)
else:
conn = self.connections[conn_key] = connection_type(authority, timeout=self.timeout, proxy_info=self.proxy_info)
conn.set_debuglevel(debuglevel)
if method in ["GET", "HEAD"] and 'range' not in headers and 'accept-encoding' not in headers:
headers['accept-encoding'] = 'gzip, deflate'
info = email.Message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split('\r\n\r\n', 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except IndexError, ValueError:
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers:
# http://www.w3.org/1999/04/Editing/
headers['if-match'] = info['etag']
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in ['GET', 'HEAD'] and 'vary' in info:
vary = info['vary']
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
value = info[key]
if headers.get(header, '') != value:
cached_value = None
break
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if info.has_key('-x-permanent-redirect-url'):
# Should cached permanent redirects be counted in our redirection count? For now, yes.
(response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info['status'] = '504'
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers:
headers['if-none-match'] = info['etag']
if info.has_key('last-modified') and not 'last-modified' in headers:
headers['if-modified-since'] = info['last-modified']
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if cc.has_key('only-if-cached'):
info['status'] = '504'
response = Response(info)
content = ""
else:
(response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
except Exception, e:
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif isinstance(e, socket.timeout) or (isinstance(e, socket.error) and 'timed out' in str(e)):
content = "Request Timeout"
response = Response( {
"content-type": "text/plain",
"status": "408",
"content-length": len(content)
})
response.reason = "Request Timeout"
else:
content = str(e)
response = Response( {
"content-type": "text/plain",
"status": "400",
"content-length": len(content)
})
response.reason = "Bad Request"
else:
raise
return (response, content)
class Response(dict):
"""An object more like email.Message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.Message or
# an httplib.HTTPResponse object.
if isinstance(info, httplib.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self['status'] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.Message.Message):
for key, value in info.items():
self[key] = value
self.status = int(self['status'])
else:
for key, value in info.iteritems():
self[key] = value
self.status = int(self.get('status', self.status))
def __getattr__(self, name):
if name == 'dict':
return self
else:
raise AttributeError, name
| 52,224 | Python | .py | 1,052 | 38.634981 | 235 | 0.598332 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,556 | iri2uri.py | midgetspy_Sick-Beard/lib/httplib2/iri2uri.py | """
iri2uri
Converts an IRI to a URI.
"""
__author__ = "Joe Gregorio ([email protected])"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = []
__version__ = "1.0.0"
__license__ = "MIT"
__history__ = """
"""
import urlparse
# Convert an IRI to a URI following the rules in RFC 3987
#
# The characters we need to enocde and escape are defined in the spec:
#
# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
# / %xD0000-DFFFD / %xE1000-EFFFD
escape_range = [
(0xA0, 0xD7FF ),
(0xE000, 0xF8FF ),
(0xF900, 0xFDCF ),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD ),
(0x20000, 0x2FFFD ),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD ),
(0x50000, 0x5FFFD ),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD ),
(0x80000, 0x8FFFD ),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD ),
(0xB0000, 0xBFFFD ),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD ),
(0xE1000, 0xEFFFD),
(0xF0000, 0xFFFFD ),
(0x100000, 0x10FFFD)
]
def encode(c):
retval = c
i = ord(c)
for low, high in escape_range:
if i < low:
break
if i >= low and i <= high:
retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')])
break
return retval
def iri2uri(uri):
"""Convert an IRI to a URI. Note that IRIs must be
passed in a unicode strings. That is, do not utf-8 encode
the IRI before passing it into the function."""
if isinstance(uri ,unicode):
(scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)
authority = authority.encode('idna')
# For each character in 'ucschar' or 'iprivate'
# 1. encode as utf-8
# 2. then %-encode each octet of that utf-8
uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))
uri = "".join([encode(c) for c in uri])
return uri
if __name__ == "__main__":
import unittest
class Test(unittest.TestCase):
def test_uris(self):
"""Test that URIs are invariant under the transformation."""
invariant = [
u"ftp://ftp.is.co.za/rfc/rfc1808.txt",
u"http://www.ietf.org/rfc/rfc2396.txt",
u"ldap://[2001:db8::7]/c=GB?objectClass?one",
u"mailto:[email protected]",
u"news:comp.infosystems.www.servers.unix",
u"tel:+1-816-555-1212",
u"telnet://192.0.2.16:80/",
u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ]
for uri in invariant:
self.assertEqual(uri, iri2uri(uri))
def test_iri(self):
""" Test that the right type of escaping is done for each part of the URI."""
self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}"))
self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}"))
self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}"))
self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")))
self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8')))
unittest.main()
| 3,850 | Python | .py | 94 | 34.06383 | 143 | 0.598385 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,557 | __init__.py | midgetspy_Sick-Beard/lib/socks/__init__.py | """SocksiPy - Python SOCKS module.
Version 1.00
Copyright 2006 Dan-Haim. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of Dan Haim nor the names of his contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
This module provides a standard socket-like interface for Python
for tunneling connections through SOCKS proxies.
"""
"""
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
for use in PyLoris (http://pyloris.sourceforge.net/)
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
mainly to merge bug fixes found in Sourceforge
"""
import re
import socket
import struct
import sys
PROXY_TYPE_SOCKS4 = 1
PROXY_TYPE_SOCKS5 = 2
PROXY_TYPE_HTTP = 3
PROXY_REGEX = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*):([^/?#]*))?")
_defaultproxy = None
_orgsocket = socket.socket
class ProxyError(Exception): pass
class GeneralProxyError(ProxyError): pass
class Socks5AuthError(ProxyError): pass
class Socks5Error(ProxyError): pass
class Socks4Error(ProxyError): pass
class HTTPError(ProxyError): pass
_generalerrors = ("success",
"invalid data",
"not connected",
"not available",
"bad proxy type",
"bad input")
_socks5errors = ("succeeded",
"general SOCKS server failure",
"connection not allowed by ruleset",
"Network unreachable",
"Host unreachable",
"Connection refused",
"TTL expired",
"Command not supported",
"Address type not supported",
"Unknown error")
_socks5autherrors = ("succeeded",
"authentication is required",
"all offered authentication methods were rejected",
"unknown username or invalid password",
"unknown error")
_socks4errors = ("request granted",
"request rejected or failed",
"request rejected because SOCKS server cannot connect to identd on the client",
"request rejected because the client program and identd report different user-ids",
"unknown error")
def parseproxyuri(proxyurl):
"""Parses a http proxy uri in the format x://a.b.c.d:port
(protocol, addr, port) = parseproxyuri(uri)
"""
groups = PROXY_REGEX.match(proxyurl).groups()
return (groups[1], groups[3], groups[4])
def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
"""
global _defaultproxy
_defaultproxy = (proxytype, addr, port, rdns, username, password)
def wrapmodule(module):
"""wrapmodule(module)
Attempts to replace a module's socket library with a SOCKS socket. Must set
a default proxy using setdefaultproxy(...) first.
This will only work on modules that import socket directly into the namespace;
most of the Python Standard Library falls into this category.
"""
if _defaultproxy != None:
module.socket.socket = socksocket
else:
raise GeneralProxyError((4, "no proxy specified"))
class socksocket(socket.socket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
"""
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None):
_orgsocket.__init__(self, family, type, proto, _sock)
if _defaultproxy != None:
self.__proxy = _defaultproxy
else:
self.__proxy = (None, None, None, None, None, None)
self.__proxysockname = None
self.__proxypeername = None
def __recvall(self, count):
"""__recvall(count) -> data
Receive EXACTLY the number of bytes requested from the socket.
Blocks until the required number of bytes have been received.
"""
data = self.recv(count)
while len(data) < count:
d = self.recv(count-len(data))
if not d: raise GeneralProxyError((0, "connection closed unexpectedly"))
data = data + d
return data
def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxytype - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be preformed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.__proxy = (proxytype, addr, port, rdns, username, password)
def __negotiatesocks5(self, destaddr, destport):
"""__negotiatesocks5(self,destaddr,destport)
Negotiates a connection through a SOCKS5 server.
"""
# First we'll send the authentication packages we support.
if (self.__proxy[4]!=None) and (self.__proxy[5]!=None):
# The username/password details were supplied to the
# setproxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
self.sendall(struct.pack('BBBB', 0x05, 0x02, 0x00, 0x02))
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
self.sendall(struct.pack('BBB', 0x05, 0x01, 0x00))
# We'll receive the server's response to determine which
# method was selected
chosenauth = self.__recvall(2)
if chosenauth[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
# Check the chosen authentication method
if chosenauth[1:2] == chr(0x00).encode():
# No authentication is required
pass
elif chosenauth[1:2] == chr(0x02).encode():
# Okay, we need to perform a basic username/password
# authentication.
self.sendall(chr(0x01).encode() + chr(len(self.__proxy[4])) + self.__proxy[4] + chr(len(self.__proxy[5])) + self.__proxy[5])
authstat = self.__recvall(2)
if authstat[0:1] != chr(0x01).encode():
# Bad response
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if authstat[1:2] != chr(0x00).encode():
# Authentication failed
self.close()
raise Socks5AuthError((3, _socks5autherrors[3]))
# Authentication succeeded
else:
# Reaching here is always bad
self.close()
if chosenauth[1] == chr(0xFF).encode():
raise Socks5AuthError((2, _socks5autherrors[2]))
else:
raise GeneralProxyError((1, _generalerrors[1]))
# Now we can request the actual connection
req = struct.pack('BBB', 0x05, 0x01, 0x00)
# If the given destination address is an IP address, we'll
# use the IPv4 address request even if remote resolving was specified.
try:
ipaddr = socket.inet_aton(destaddr)
req = req + chr(0x01).encode() + ipaddr
except socket.error:
# Well it's not an IP number, so it's probably a DNS name.
if self.__proxy[3]:
# Resolve remotely
ipaddr = None
req = req + chr(0x03).encode() + chr(len(destaddr)).encode() + destaddr
else:
# Resolve locally
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
req = req + chr(0x01).encode() + ipaddr
req = req + struct.pack(">H", destport)
self.sendall(req)
# Get the response
resp = self.__recvall(4)
if resp[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
elif resp[1:2] != chr(0x00).encode():
# Connection failed
self.close()
if ord(resp[1:2])<=8:
raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])]))
else:
raise Socks5Error((9, _socks5errors[9]))
# Get the bound address/port
elif resp[3:4] == chr(0x01).encode():
boundaddr = self.__recvall(4)
elif resp[3:4] == chr(0x03).encode():
resp = resp + self.recv(1)
boundaddr = self.__recvall(ord(resp[4:5]))
else:
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
boundport = struct.unpack(">H", self.__recvall(2))[0]
self.__proxysockname = (boundaddr, boundport)
if ipaddr != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def getproxysockname(self):
"""getsockname() -> address info
Returns the bound IP address and port number at the proxy.
"""
return self.__proxysockname
def getproxypeername(self):
"""getproxypeername() -> address info
Returns the IP and port number of the proxy.
"""
return _orgsocket.getpeername(self)
def getpeername(self):
"""getpeername() -> address info
Returns the IP address and port number of the destination
machine (note: getproxypeername returns the proxy)
"""
return self.__proxypeername
def __negotiatesocks4(self,destaddr,destport):
"""__negotiatesocks4(self,destaddr,destport)
Negotiates a connection through a SOCKS4 server.
"""
# Check if the destination address provided is an IP address
rmtrslv = False
try:
ipaddr = socket.inet_aton(destaddr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if self.__proxy[3]:
ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)
rmtrslv = True
else:
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
# Construct the request packet
req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr
# The username parameter is considered userid for SOCKS4
if self.__proxy[4] != None:
req = req + self.__proxy[4]
req = req + chr(0x00).encode()
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if rmtrslv:
req = req + destaddr + chr(0x00).encode()
self.sendall(req)
# Get the response from the server
resp = self.__recvall(8)
if resp[0:1] != chr(0x00).encode():
# Bad data
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
if resp[1:2] != chr(0x5A).encode():
# Server returned an error
self.close()
if ord(resp[1:2]) in (91, 92, 93):
self.close()
raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90]))
else:
raise Socks4Error((94, _socks4errors[4]))
# Get the bound address/port
self.__proxysockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if rmtrslv != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def __negotiatehttp(self, destaddr, destport):
"""__negotiatehttp(self,destaddr,destport)
Negotiates a connection through an HTTP server.
"""
# If we need to resolve locally, we do this now
if not self.__proxy[3]:
addr = socket.gethostbyname(destaddr)
else:
addr = destaddr
self.sendall(("CONNECT " + addr + ":" + str(destport) + " HTTP/1.1\r\n" + "Host: " + destaddr + "\r\n\r\n").encode())
# We read the response until we get the string "\r\n\r\n"
resp = self.recv(1)
while resp.find("\r\n\r\n".encode()) == -1:
resp = resp + self.recv(1)
# We just need the first line to check if the connection
# was successful
statusline = resp.splitlines()[0].split(" ".encode(), 2)
if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()):
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
try:
statuscode = int(statusline[1])
except ValueError:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if statuscode != 200:
self.close()
raise HTTPError((statuscode, statusline[2]))
self.__proxysockname = ("0.0.0.0", 0)
self.__proxypeername = (addr, destport)
def connect(self, destpair):
"""connect(self, despair)
Connects to the specified destination through a proxy.
destpar - A tuple of the IP/DNS address and the port number.
(identical to socket's connect).
To select the proxy server use setproxy().
"""
# Do a minimal input check first
if (not type(destpair) in (list,tuple)) or (len(destpair) < 2) or (type(destpair[0]) != type('')) or (type(destpair[1]) != int):
raise GeneralProxyError((5, _generalerrors[5]))
if self.__proxy[0] == PROXY_TYPE_SOCKS5:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks5(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatesocks4(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatehttp(destpair[0], destpair[1])
elif self.__proxy[0] == None:
_orgsocket.connect(self, (destpair[0], destpair[1]))
else:
raise GeneralProxyError((4, _generalerrors[4]))
| 16,505 | Python | .py | 359 | 36.941504 | 136 | 0.623068 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,558 | gntp.py | midgetspy_Sick-Beard/lib/growl/gntp.py | import re
import hashlib
import time
import platform
__version__ = '0.1'
class BaseError(Exception):
pass
class ParseError(BaseError):
def gntp_error(self):
error = GNTPError(errorcode=500,errordesc='Error parsing the message')
return error.encode()
class AuthError(BaseError):
def gntp_error(self):
error = GNTPError(errorcode=400,errordesc='Error with authorization')
return error.encode()
class UnsupportedError(BaseError):
def gntp_error(self):
error = GNTPError(errorcode=500,errordesc='Currently unsupported by gntp.py')
return error.encode()
class _GNTPBase(object):
def __init__(self,messagetype):
self.info = {
'version':'1.0',
'messagetype':messagetype,
'encryptionAlgorithmID':None
}
self.requiredHeaders = []
self.headers = {}
def add_origin_info(self):
self.add_header('Origin-Machine-Name',platform.node())
self.add_header('Origin-Software-Name','gntp.py')
self.add_header('Origin-Software-Version',__version__)
self.add_header('Origin-Platform-Name',platform.system())
self.add_header('Origin-Platform-Version',platform.platform())
def send(self):
print self.encode()
def __str__(self):
return self.encode()
def parse_info(self,data):
'''
Parse the first line of a GNTP message to get security and other info values
@param data: GNTP Message
@return: GNTP Message information in a dictionary
'''
#GNTP/<version> <messagetype> <encryptionAlgorithmID>[:<ivValue>][ <keyHashAlgorithmID>:<keyHash>.<salt>]
match = re.match('GNTP/(?P<version>\d+\.\d+) (?P<messagetype>REGISTER|NOTIFY|SUBSCRIBE|\-OK|\-ERROR)'+
' (?P<encryptionAlgorithmID>[A-Z0-9]+(:(?P<ivValue>[A-F0-9]+))?) ?'+
'((?P<keyHashAlgorithmID>[A-Z0-9]+):(?P<keyHash>[A-F0-9]+).(?P<salt>[A-F0-9]+))?\r\n', data,re.IGNORECASE)
if not match:
raise ParseError('ERROR_PARSING_INFO_LINE')
info = match.groupdict()
if info['encryptionAlgorithmID'] == 'NONE':
info['encryptionAlgorithmID'] = None
return info
def set_password(self,password,encryptAlgo='MD5'):
'''
Set a password for a GNTP Message
@param password: Null to clear password
@param encryptAlgo: Currently only supports MD5
@todo: Support other hash functions
'''
self.password = password
if not password:
self.info['encryptionAlgorithmID'] = None
self.info['keyHashAlgorithm'] = None;
return
password = password.encode('utf8')
seed = time.ctime()
salt = hashlib.md5(seed).hexdigest()
saltHash = hashlib.md5(seed).digest()
keyBasis = password+saltHash
key = hashlib.md5(keyBasis).digest()
keyHash = hashlib.md5(key).hexdigest()
self.info['keyHashAlgorithmID'] = encryptAlgo.upper()
self.info['keyHash'] = keyHash.upper()
self.info['salt'] = salt.upper()
def _decode_hex(self,value):
'''
Helper function to decode hex string to `proper` hex string
@param value: Value to decode
@return: Hex string
'''
result = ''
for i in range(0,len(value),2):
tmp = int(value[i:i+2],16)
result += chr(tmp)
return result
def _decode_binary(self,rawIdentifier,identifier):
rawIdentifier += '\r\n\r\n'
dataLength = int(identifier['Length'])
pointerStart = self.raw.find(rawIdentifier)+len(rawIdentifier)
pointerEnd = pointerStart + dataLength
data = self.raw[pointerStart:pointerEnd]
if not len(data) == dataLength:
raise ParseError('INVALID_DATA_LENGTH Expected: %s Recieved %s'%(dataLength,len(data)))
return data
def validate_password(self,password):
'''
Validate GNTP Message against stored password
'''
self.password = password
if password == None: raise Exception()
keyHash = self.info.get('keyHash',None)
if keyHash is None and self.password is None:
return True
if keyHash is None:
raise AuthError('Invalid keyHash')
if self.password is None:
raise AuthError('Missing password')
password = self.password.encode('utf8')
saltHash = self._decode_hex(self.info['salt'])
keyBasis = password+saltHash
key = hashlib.md5(keyBasis).digest()
keyHash = hashlib.md5(key).hexdigest()
if not keyHash.upper() == self.info['keyHash'].upper():
raise AuthError('Invalid Hash')
return True
def validate(self):
'''
Verify required headers
'''
for header in self.requiredHeaders:
if not self.headers.get(header,False):
raise ParseError('Missing Notification Header: '+header)
def format_info(self):
'''
Generate info line for GNTP Message
@return: Info line string
'''
info = u'GNTP/%s %s'%(
self.info.get('version'),
self.info.get('messagetype'),
)
if self.info.get('encryptionAlgorithmID',None):
info += ' %s:%s'%(
self.info.get('encryptionAlgorithmID'),
self.info.get('ivValue'),
)
else:
info+=' NONE'
if self.info.get('keyHashAlgorithmID',None):
info += ' %s:%s.%s'%(
self.info.get('keyHashAlgorithmID'),
self.info.get('keyHash'),
self.info.get('salt')
)
return info
def parse_dict(self,data):
'''
Helper function to parse blocks of GNTP headers into a dictionary
@param data:
@return: Dictionary of headers
'''
dict = {}
for line in data.split('\r\n'):
match = re.match('([\w-]+):(.+)', line)
if not match: continue
key = match.group(1).strip()
val = match.group(2).strip()
dict[key] = val
#print key,'\t\t\t',val
return dict
def add_header(self,key,value):
self.headers[key] = value
def decode(self,data,password=None):
'''
Decode GNTP Message
@param data:
'''
self.password = password
self.raw = data
parts = self.raw.split('\r\n\r\n')
self.info = self.parse_info(data)
self.headers = self.parse_dict(parts[0])
def encode(self):
'''
Encode a GNTP Message
@return: GNTP Message ready to be sent
'''
self.validate()
SEP = u': '
EOL = u'\r\n'
message = self.format_info() + EOL
#Headers
for k,v in self.headers.iteritems():
message += k.encode('utf8') + SEP + str(v).encode('utf8') + EOL
message += EOL
return message
class GNTPRegister(_GNTPBase):
'''
GNTP Registration Message
'''
def __init__(self,data=None,password=None):
'''
@param data: (Optional) See decode()
@param password: (Optional) Password to use while encoding/decoding messages
'''
_GNTPBase.__init__(self,'REGISTER')
self.notifications = []
self.resources = {}
self.requiredHeaders = [
'Application-Name',
'Notifications-Count'
]
self.requiredNotification = [
'Notification-Name',
]
if data:
self.decode(data,password)
else:
self.set_password(password)
self.headers['Application-Name'] = 'pygntp'
self.headers['Notifications-Count'] = 0
self.add_origin_info()
def validate(self):
'''
Validate required headers and validate notification headers
'''
for header in self.requiredHeaders:
if not self.headers.get(header,False):
raise ParseError('Missing Registration Header: '+header)
for notice in self.notifications:
for header in self.requiredNotification:
if not notice.get(header,False):
raise ParseError('Missing Notification Header: '+header)
def decode(self,data,password):
'''
Decode existing GNTP Registration message
@param data: Message to decode.
'''
self.raw = data
parts = self.raw.split('\r\n\r\n')
self.info = self.parse_info(data)
self.validate_password(password)
self.headers = self.parse_dict(parts[0])
for i,part in enumerate(parts):
if i==0: continue #Skip Header
if part.strip()=='': continue
notice = self.parse_dict(part)
if notice.get('Notification-Name',False):
self.notifications.append(notice)
elif notice.get('Identifier',False):
notice['Data'] = self._decode_binary(part,notice)
#open('register.png','wblol').write(notice['Data'])
self.resources[ notice.get('Identifier') ] = notice
def add_notification(self,name,enabled=True):
'''
Add new Notification to Registration message
@param name: Notification Name
@param enabled: Default Notification to Enabled
'''
notice = {}
notice['Notification-Name'] = name
notice['Notification-Enabled'] = str(enabled)
self.notifications.append(notice)
self.headers['Notifications-Count'] = len(self.notifications)
def encode(self):
'''
Encode a GNTP Registration Message
@return: GNTP Registration Message ready to be sent
'''
self.validate()
SEP = u': '
EOL = u'\r\n'
message = self.format_info() + EOL
#Headers
for k,v in self.headers.iteritems():
message += k.encode('utf8') + SEP + str(v).encode('utf8') + EOL
#Notifications
if len(self.notifications)>0:
for notice in self.notifications:
message += EOL
for k,v in notice.iteritems():
message += k.encode('utf8') + SEP + str(v).encode('utf8') + EOL
message += EOL
return message
class GNTPNotice(_GNTPBase):
'''
GNTP Notification Message
'''
def __init__(self,data=None,app=None,name=None,title=None,password=None):
'''
@param data: (Optional) See decode()
@param app: (Optional) Set Application-Name
@param name: (Optional) Set Notification-Name
@param title: (Optional) Set Notification Title
@param password: (Optional) Password to use while encoding/decoding messages
'''
_GNTPBase.__init__(self,'NOTIFY')
self.resources = {}
self.requiredHeaders = [
'Application-Name',
'Notification-Name',
'Notification-Title'
]
if data:
self.decode(data,password)
else:
self.set_password(password)
if app:
self.headers['Application-Name'] = app
if name:
self.headers['Notification-Name'] = name
if title:
self.headers['Notification-Title'] = title
self.add_origin_info()
def decode(self,data,password):
'''
Decode existing GNTP Notification message
@param data: Message to decode.
'''
self.raw = data
parts = self.raw.split('\r\n\r\n')
self.info = self.parse_info(data)
self.validate_password(password)
self.headers = self.parse_dict(parts[0])
for i,part in enumerate(parts):
if i==0: continue #Skip Header
if part.strip()=='': continue
notice = self.parse_dict(part)
if notice.get('Identifier',False):
notice['Data'] = self._decode_binary(part,notice)
#open('notice.png','wblol').write(notice['Data'])
self.resources[ notice.get('Identifier') ] = notice
def encode(self):
'''
Encode a GNTP Notification Message
@return: GNTP Notification Message ready to be sent
'''
self.validate()
SEP = u': '
EOL = u'\r\n'
message = self.format_info() + EOL
#Headers
for k,v in self.headers.iteritems():
message += k + SEP + unicode(v) + EOL
message += EOL
return message.encode('utf-8')
class GNTPSubscribe(_GNTPBase):
def __init__(self,data=None,password=None):
_GNTPBase.__init__(self, 'SUBSCRIBE')
self.requiredHeaders = [
'Subscriber-ID',
'Subscriber-Name',
]
if data:
self.decode(data,password)
else:
self.set_password(password)
self.add_origin_info()
class GNTPOK(_GNTPBase):
def __init__(self,data=None,action=None):
'''
@param data: (Optional) See _GNTPResponse.decode()
@param action: (Optional) Set type of action the OK Response is for
'''
_GNTPBase.__init__(self,'-OK')
self.requiredHeaders = ['Response-Action']
if data:
self.decode(data)
if action:
self.headers['Response-Action'] = action
self.add_origin_info()
class GNTPError(_GNTPBase):
def __init__(self,data=None,errorcode=None,errordesc=None):
'''
@param data: (Optional) See _GNTPResponse.decode()
@param errorcode: (Optional) Error code
@param errordesc: (Optional) Error Description
'''
_GNTPBase.__init__(self,'-ERROR')
self.requiredHeaders = ['Error-Code','Error-Description']
if data:
self.decode(data)
if errorcode:
self.headers['Error-Code'] = errorcode
self.headers['Error-Description'] = errordesc
self.add_origin_info()
def parse_gntp(data,password=None,debug=False):
'''
Attempt to parse a message as a GNTP message
@param data: Message to be parsed
@param password: Optional password to be used to verify the message
@param debug: Print out extra debugging information
'''
match = re.match('GNTP/(?P<version>\d+\.\d+) (?P<messagetype>REGISTER|NOTIFY|SUBSCRIBE|\-OK|\-ERROR)',data,re.IGNORECASE)
if not match:
if debug:
print '----'
print self.data
print '----'
raise ParseError('INVALID_GNTP_INFO')
info = match.groupdict()
if info['messagetype'] == 'REGISTER':
return GNTPRegister(data,password=password)
elif info['messagetype'] == 'NOTIFY':
return GNTPNotice(data,password=password)
elif info['messagetype'] == 'SUBSCRIBE':
return GNTPSubscribe(data,password=password)
elif info['messagetype'] == '-OK':
return GNTPOK(data)
elif info['messagetype'] == '-ERROR':
return GNTPError(data)
if debug: print info
raise ParseError('INVALID_GNTP_MESSAGE')
| 12,750 | Python | .py | 405 | 28.130864 | 122 | 0.704774 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,559 | gntp_bridge.py | midgetspy_Sick-Beard/lib/growl/gntp_bridge.py | from gntp import *
import urllib
if sys.version_info >= (2, 7, 9):
import ssl
import Growl
def register_send(self):
'''
Resend a GNTP Register message to Growl running on a local OSX Machine
'''
print 'Sending Local Registration'
#Local growls only need a list of strings
notifications=[]
defaultNotifications = []
for notice in self.notifications:
notifications.append(notice['Notification-Name'])
if notice.get('Notification-Enabled',True):
defaultNotifications.append(notice['Notification-Name'])
appIcon = get_resource(self,'Application-Icon')
growl = Growl.GrowlNotifier(
applicationName = self.headers['Application-Name'],
notifications = notifications,
defaultNotifications = defaultNotifications,
applicationIcon = appIcon,
)
growl.register()
return self.encode()
def notice_send(self):
'''
Resend a GNTP Notify message to Growl running on a local OSX Machine
'''
print 'Sending Local Notification'
growl = Growl.GrowlNotifier(
applicationName = self.headers['Application-Name'],
notifications = [self.headers['Notification-Name']]
)
noticeIcon = get_resource(self,'Notification-Icon')
growl.notify(
noteType = self.headers['Notification-Name'],
title = self.headers['Notification-Title'],
description=self.headers.get('Notification-Text',''),
icon=noticeIcon
)
return self.encode()
def get_resource(self,key):
try:
resource = self.headers.get(key,'')
if resource.startswith('x-growl-resource://'):
resource = resource.split('://')
return self.resources.get(resource[1])['Data']
elif resource.startswith('http'):
resource = resource.replace(' ', '%20')
if sys.version_info >= (2, 7, 9):
icon = urllib.urlopen(resource, context=ssl._create_unverified_context())
else:
icon = urllib.urlopen(resource)
return icon.read()
else:
return None
except Exception,e:
print e
return None
GNTPRegister.send = register_send
GNTPNotice.send = notice_send
| 1,967 | Python | .py | 63 | 28.492063 | 77 | 0.746956 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,560 | program.py | midgetspy_Sick-Beard/lib/hachoir_metadata/program.py | from lib.hachoir_metadata.metadata import RootMetadata, registerExtractor
from lib.hachoir_parser.program import ExeFile
from lib.hachoir_metadata.safe import fault_tolerant, getValue
class ExeMetadata(RootMetadata):
KEY_TO_ATTR = {
u"ProductName": "title",
u"LegalCopyright": "copyright",
u"LegalTrademarks": "copyright",
u"LegalTrademarks1": "copyright",
u"LegalTrademarks2": "copyright",
u"CompanyName": "author",
u"BuildDate": "creation_date",
u"FileDescription": "title",
u"ProductVersion": "version",
}
SKIP_KEY = set((u"InternalName", u"OriginalFilename", u"FileVersion", u"BuildVersion"))
def extract(self, exe):
if exe.isPE():
self.extractPE(exe)
elif exe.isNE():
self.extractNE(exe)
def extractNE(self, exe):
if "ne_header" in exe:
self.useNE_Header(exe["ne_header"])
if "info" in exe:
self.useNEInfo(exe["info"])
@fault_tolerant
def useNEInfo(self, info):
for node in info.array("node"):
if node["name"].value == "StringFileInfo":
self.readVersionInfo(node["node[0]"])
def extractPE(self, exe):
# Read information from headers
if "pe_header" in exe:
self.usePE_Header(exe["pe_header"])
if "pe_opt_header" in exe:
self.usePE_OptHeader(exe["pe_opt_header"])
# Use PE resource
resource = exe.getResource()
if resource and "version_info/node[0]" in resource:
for node in resource.array("version_info/node[0]/node"):
if getValue(node, "name") == "StringFileInfo" \
and "node[0]" in node:
self.readVersionInfo(node["node[0]"])
@fault_tolerant
def useNE_Header(self, hdr):
if hdr["is_dll"].value:
self.format_version = u"New-style executable: Dynamic-link library (DLL)"
elif hdr["is_win_app"].value:
self.format_version = u"New-style executable: Windows 3.x application"
else:
self.format_version = u"New-style executable for Windows 3.x"
@fault_tolerant
def usePE_Header(self, hdr):
self.creation_date = hdr["creation_date"].value
self.comment = "CPU: %s" % hdr["cpu"].display
if hdr["is_dll"].value:
self.format_version = u"Portable Executable: Dynamic-link library (DLL)"
else:
self.format_version = u"Portable Executable: Windows application"
@fault_tolerant
def usePE_OptHeader(self, hdr):
self.comment = "Subsystem: %s" % hdr["subsystem"].display
def readVersionInfo(self, info):
values = {}
for node in info.array("node"):
if "value" not in node or "name" not in node:
continue
value = node["value"].value.strip(" \0")
if not value:
continue
key = node["name"].value
values[key] = value
if "ProductName" in values and "FileDescription" in values:
# Make sure that FileDescription is set before ProductName
# as title value
self.title = values["FileDescription"]
self.title = values["ProductName"]
del values["FileDescription"]
del values["ProductName"]
for key, value in values.iteritems():
if key in self.KEY_TO_ATTR:
setattr(self, self.KEY_TO_ATTR[key], value)
elif key not in self.SKIP_KEY:
self.comment = "%s=%s" % (key, value)
registerExtractor(ExeFile, ExeMetadata)
| 3,658 | Python | .py | 86 | 32.534884 | 91 | 0.598932 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,561 | audio.py | midgetspy_Sick-Beard/lib/hachoir_metadata/audio.py | from lib.hachoir_metadata.metadata import (registerExtractor,
Metadata, RootMetadata, MultipleMetadata)
from lib.hachoir_parser.audio import AuFile, MpegAudioFile, RealAudioFile, AiffFile, FlacParser
from lib.hachoir_parser.container import OggFile, RealMediaFile
from lib.hachoir_core.i18n import _
from lib.hachoir_core.tools import makePrintable, timedelta2seconds, humanBitRate
from datetime import timedelta
from lib.hachoir_metadata.metadata_item import QUALITY_FAST, QUALITY_NORMAL, QUALITY_BEST
from lib.hachoir_metadata.safe import fault_tolerant, getValue
def computeComprRate(meta, size):
if not meta.has("duration") \
or not meta.has("sample_rate") \
or not meta.has("bits_per_sample") \
or not meta.has("nb_channel") \
or not size:
return
orig_size = timedelta2seconds(meta.get("duration")) * meta.get('sample_rate') * meta.get('bits_per_sample') * meta.get('nb_channel')
meta.compr_rate = float(orig_size) / size
def computeBitRate(meta):
if not meta.has("bits_per_sample") \
or not meta.has("nb_channel") \
or not meta.has("sample_rate"):
return
meta.bit_rate = meta.get('bits_per_sample') * meta.get('nb_channel') * meta.get('sample_rate')
VORBIS_KEY_TO_ATTR = {
"ARTIST": "artist",
"ALBUM": "album",
"TRACKNUMBER": "track_number",
"TRACKTOTAL": "track_total",
"ENCODER": "producer",
"TITLE": "title",
"LOCATION": "location",
"DATE": "creation_date",
"ORGANIZATION": "organization",
"GENRE": "music_genre",
"": "comment",
"COMPOSER": "music_composer",
"DESCRIPTION": "comment",
"COMMENT": "comment",
"WWW": "url",
"WOAF": "url",
"LICENSE": "copyright",
}
@fault_tolerant
def readVorbisComment(metadata, comment):
metadata.producer = getValue(comment, "vendor")
for item in comment.array("metadata"):
if "=" in item.value:
key, value = item.value.split("=", 1)
key = key.upper()
if key in VORBIS_KEY_TO_ATTR:
key = VORBIS_KEY_TO_ATTR[key]
setattr(metadata, key, value)
elif value:
metadata.warning("Skip Vorbis comment %s: %s" % (key, value))
class OggMetadata(MultipleMetadata):
def extract(self, ogg):
granule_quotient = None
for index, page in enumerate(ogg.array("page")):
if "segments" not in page:
continue
page = page["segments"]
if "vorbis_hdr" in page:
meta = Metadata(self)
self.vorbisHeader(page["vorbis_hdr"], meta)
self.addGroup("audio[]", meta, "Audio")
if not granule_quotient and meta.has("sample_rate"):
granule_quotient = meta.get('sample_rate')
if "theora_hdr" in page:
meta = Metadata(self)
self.theoraHeader(page["theora_hdr"], meta)
self.addGroup("video[]", meta, "Video")
if "video_hdr" in page:
meta = Metadata(self)
self.videoHeader(page["video_hdr"], meta)
self.addGroup("video[]", meta, "Video")
if not granule_quotient and meta.has("frame_rate"):
granule_quotient = meta.get('frame_rate')
if "comment" in page:
readVorbisComment(self, page["comment"])
if 3 <= index:
# Only process pages 0..3
break
# Compute duration
if granule_quotient and QUALITY_NORMAL <= self.quality:
page = ogg.createLastPage()
if page and "abs_granule_pos" in page:
try:
self.duration = timedelta(seconds=float(page["abs_granule_pos"].value) / granule_quotient)
except OverflowError:
pass
def videoHeader(self, header, meta):
meta.compression = header["fourcc"].display
meta.width = header["width"].value
meta.height = header["height"].value
meta.bits_per_pixel = header["bits_per_sample"].value
if header["time_unit"].value:
meta.frame_rate = 10000000.0 / header["time_unit"].value
def theoraHeader(self, header, meta):
meta.compression = "Theora"
meta.format_version = "Theora version %u.%u (revision %u)" % (\
header["version_major"].value,
header["version_minor"].value,
header["version_revision"].value)
meta.width = header["frame_width"].value
meta.height = header["frame_height"].value
if header["fps_den"].value:
meta.frame_rate = float(header["fps_num"].value) / header["fps_den"].value
if header["aspect_ratio_den"].value:
meta.aspect_ratio = float(header["aspect_ratio_num"].value) / header["aspect_ratio_den"].value
meta.pixel_format = header["pixel_format"].display
meta.comment = "Quality: %s" % header["quality"].value
def vorbisHeader(self, header, meta):
meta.compression = u"Vorbis"
meta.sample_rate = header["audio_sample_rate"].value
meta.nb_channel = header["audio_channels"].value
meta.format_version = u"Vorbis version %s" % header["vorbis_version"].value
meta.bit_rate = header["bitrate_nominal"].value
class AuMetadata(RootMetadata):
def extract(self, audio):
self.sample_rate = audio["sample_rate"].value
self.nb_channel = audio["channels"].value
self.compression = audio["codec"].display
if "info" in audio:
self.comment = audio["info"].value
self.bits_per_sample = audio.getBitsPerSample()
computeBitRate(self)
if "audio_data" in audio:
if self.has("bit_rate"):
self.duration = timedelta(seconds=float(audio["audio_data"].size) / self.get('bit_rate'))
computeComprRate(self, audio["audio_data"].size)
class RealAudioMetadata(RootMetadata):
FOURCC_TO_BITRATE = {
u"28_8": 15200, # 28.8 kbit/sec (audio bit rate: 15.2 kbit/s)
u"14_4": 8000, # 14.4 kbit/sec
u"lpcJ": 8000, # 14.4 kbit/sec
}
def extract(self, real):
version = real["version"].value
if "metadata" in real:
self.useMetadata(real["metadata"])
self.useRoot(real)
self.format_version = "Real audio version %s" % version
if version == 3:
size = getValue(real, "data_size")
elif "filesize" in real and "headersize" in real:
size = (real["filesize"].value + 40) - (real["headersize"].value + 16)
else:
size = None
if size:
size *= 8
if self.has("bit_rate"):
sec = float(size) / self.get('bit_rate')
self.duration = timedelta(seconds=sec)
computeComprRate(self, size)
@fault_tolerant
def useMetadata(self, info):
self.title = info["title"].value
self.author = info["author"].value
self.copyright = info["copyright"].value
self.comment = info["comment"].value
@fault_tolerant
def useRoot(self, real):
self.bits_per_sample = 16 # FIXME: Is that correct?
if real["version"].value != 3:
self.sample_rate = real["sample_rate"].value
self.nb_channel = real["channels"].value
else:
self.sample_rate = 8000
self.nb_channel = 1
fourcc = getValue(real, "FourCC")
if fourcc:
self.compression = fourcc
try:
self.bit_rate = self.FOURCC_TO_BITRATE[fourcc]
except LookupError:
pass
class RealMediaMetadata(MultipleMetadata):
KEY_TO_ATTR = {
"generated by": "producer",
"creation date": "creation_date",
"modification date": "last_modification",
"description": "comment",
}
def extract(self, media):
if "file_prop" in media:
self.useFileProp(media["file_prop"])
if "content_desc" in media:
self.useContentDesc(media["content_desc"])
for index, stream in enumerate(media.array("stream_prop")):
self.useStreamProp(stream, index)
@fault_tolerant
def useFileInfoProp(self, prop):
key = prop["name"].value.lower()
value = prop["value"].value
if key in self.KEY_TO_ATTR:
setattr(self, self.KEY_TO_ATTR[key], value)
elif value:
self.warning("Skip %s: %s" % (prop["name"].value, value))
@fault_tolerant
def useFileProp(self, prop):
self.bit_rate = prop["avg_bit_rate"].value
self.duration = timedelta(milliseconds=prop["duration"].value)
@fault_tolerant
def useContentDesc(self, content):
self.title = content["title"].value
self.author = content["author"].value
self.copyright = content["copyright"].value
self.comment = content["comment"].value
@fault_tolerant
def useStreamProp(self, stream, index):
meta = Metadata(self)
meta.comment = "Start: %s" % stream["stream_start"].value
if getValue(stream, "mime_type") == "logical-fileinfo":
for prop in stream.array("file_info/prop"):
self.useFileInfoProp(prop)
else:
meta.bit_rate = stream["avg_bit_rate"].value
meta.duration = timedelta(milliseconds=stream["duration"].value)
meta.mime_type = getValue(stream, "mime_type")
meta.title = getValue(stream, "desc")
self.addGroup("stream[%u]" % index, meta, "Stream #%u" % (1+index))
class MpegAudioMetadata(RootMetadata):
TAG_TO_KEY = {
# ID3 version 2.2
"TP1": "author",
"COM": "comment",
"TEN": "producer",
"TRK": "track_number",
"TAL": "album",
"TT2": "title",
"TYE": "creation_date",
"TCO": "music_genre",
# ID3 version 2.3+
"TPE1": "author",
"COMM": "comment",
"TENC": "producer",
"TRCK": "track_number",
"TALB": "album",
"TIT2": "title",
"TYER": "creation_date",
"WXXX": "url",
"TCON": "music_genre",
"TLAN": "language",
"TCOP": "copyright",
"TDAT": "creation_date",
"TRDA": "creation_date",
"TORY": "creation_date",
"TIT1": "title",
}
def processID3v2(self, field):
# Read value
if "content" not in field:
return
content = field["content"]
if "text" not in content:
return
if "title" in content and content["title"].value:
value = "%s: %s" % (content["title"].value, content["text"].value)
else:
value = content["text"].value
# Known tag?
tag = field["tag"].value
if tag not in self.TAG_TO_KEY:
if tag:
if isinstance(tag, str):
tag = makePrintable(tag, "ISO-8859-1", to_unicode=True)
self.warning("Skip ID3v2 tag %s: %s" % (tag, value))
return
key = self.TAG_TO_KEY[tag]
setattr(self, key, value)
def readID3v2(self, id3):
for field in id3:
if field.is_field_set and "tag" in field:
self.processID3v2(field)
def extract(self, mp3):
if "/frames/frame[0]" in mp3:
frame = mp3["/frames/frame[0]"]
self.nb_channel = (frame.getNbChannel(), frame["channel_mode"].display)
self.format_version = u"MPEG version %s layer %s" % \
(frame["version"].display, frame["layer"].display)
self.sample_rate = frame.getSampleRate()
self.bits_per_sample = 16
if mp3["frames"].looksConstantBitRate():
self.computeBitrate(frame)
else:
self.computeVariableBitrate(mp3)
if "id3v1" in mp3:
id3 = mp3["id3v1"]
self.comment = id3["comment"].value
self.author = id3["author"].value
self.title = id3["song"].value
self.album = id3["album"].value
if id3["year"].value != "0":
self.creation_date = id3["year"].value
if "track_nb" in id3:
self.track_number = id3["track_nb"].value
if "id3v2" in mp3:
self.readID3v2(mp3["id3v2"])
if "frames" in mp3:
computeComprRate(self, mp3["frames"].size)
def computeBitrate(self, frame):
bit_rate = frame.getBitRate() # may returns None on error
if not bit_rate:
return
self.bit_rate = (bit_rate, _("%s (constant)") % humanBitRate(bit_rate))
self.duration = timedelta(seconds=float(frame["/frames"].size) / bit_rate)
def computeVariableBitrate(self, mp3):
if self.quality <= QUALITY_FAST:
return
count = 0
if QUALITY_BEST <= self.quality:
self.warning("Process all MPEG audio frames to compute exact duration")
max_count = None
else:
max_count = 500 * self.quality
total_bit_rate = 0.0
for index, frame in enumerate(mp3.array("frames/frame")):
if index < 3:
continue
bit_rate = frame.getBitRate()
if bit_rate:
total_bit_rate += float(bit_rate)
count += 1
if max_count and max_count <= count:
break
if not count:
return
bit_rate = total_bit_rate / count
self.bit_rate = (bit_rate,
_("%s (Variable bit rate)") % humanBitRate(bit_rate))
duration = timedelta(seconds=float(mp3["frames"].size) / bit_rate)
self.duration = duration
class AiffMetadata(RootMetadata):
def extract(self, aiff):
if "common" in aiff:
self.useCommon(aiff["common"])
computeBitRate(self)
@fault_tolerant
def useCommon(self, info):
self.nb_channel = info["nb_channel"].value
self.bits_per_sample = info["sample_size"].value
self.sample_rate = getValue(info, "sample_rate")
if self.has("sample_rate"):
rate = self.get("sample_rate")
if rate:
sec = float(info["nb_sample"].value) / rate
self.duration = timedelta(seconds=sec)
if "codec" in info:
self.compression = info["codec"].display
class FlacMetadata(RootMetadata):
def extract(self, flac):
if "metadata/stream_info/content" in flac:
self.useStreamInfo(flac["metadata/stream_info/content"])
if "metadata/comment/content" in flac:
readVorbisComment(self, flac["metadata/comment/content"])
@fault_tolerant
def useStreamInfo(self, info):
self.nb_channel = info["nb_channel"].value + 1
self.bits_per_sample = info["bits_per_sample"].value + 1
self.sample_rate = info["sample_hertz"].value
sec = info["total_samples"].value
if sec:
sec = float(sec) / info["sample_hertz"].value
self.duration = timedelta(seconds=sec)
registerExtractor(AuFile, AuMetadata)
registerExtractor(MpegAudioFile, MpegAudioMetadata)
registerExtractor(OggFile, OggMetadata)
registerExtractor(RealMediaFile, RealMediaMetadata)
registerExtractor(RealAudioFile, RealAudioMetadata)
registerExtractor(AiffFile, AiffMetadata)
registerExtractor(FlacParser, FlacMetadata)
| 15,495 | Python | .py | 372 | 31.905914 | 136 | 0.588574 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,562 | jpeg.py | midgetspy_Sick-Beard/lib/hachoir_metadata/jpeg.py | from lib.hachoir_metadata.metadata import RootMetadata, registerExtractor
from lib.hachoir_metadata.image import computeComprRate
from lib.hachoir_parser.image.exif import ExifEntry
from lib.hachoir_parser.image.jpeg import (
JpegFile, JpegChunk,
QUALITY_HASH_COLOR, QUALITY_SUM_COLOR,
QUALITY_HASH_GRAY, QUALITY_SUM_GRAY)
from lib.hachoir_core.field import MissingField
from lib.hachoir_core.i18n import _
from lib.hachoir_core.tools import makeUnicode
from lib.hachoir_metadata.safe import fault_tolerant
from datetime import datetime
def deg2float(degree, minute, second):
return degree + (float(minute) + float(second) / 60.0) / 60.0
class JpegMetadata(RootMetadata):
EXIF_KEY = {
# Exif metadatas
ExifEntry.TAG_CAMERA_MANUFACTURER: "camera_manufacturer",
ExifEntry.TAG_CAMERA_MODEL: "camera_model",
ExifEntry.TAG_ORIENTATION: "image_orientation",
ExifEntry.TAG_EXPOSURE: "camera_exposure",
ExifEntry.TAG_FOCAL: "camera_focal",
ExifEntry.TAG_BRIGHTNESS: "camera_brightness",
ExifEntry.TAG_APERTURE: "camera_aperture",
# Generic metadatas
ExifEntry.TAG_IMG_TITLE: "title",
ExifEntry.TAG_SOFTWARE: "producer",
ExifEntry.TAG_FILE_TIMESTAMP: "creation_date",
ExifEntry.TAG_WIDTH: "width",
ExifEntry.TAG_HEIGHT: "height",
ExifEntry.TAG_USER_COMMENT: "comment",
}
IPTC_KEY = {
80: "author",
90: "city",
101: "country",
116: "copyright",
120: "title",
231: "comment",
}
orientation_name = {
1: _('Horizontal (normal)'),
2: _('Mirrored horizontal'),
3: _('Rotated 180'),
4: _('Mirrored vertical'),
5: _('Mirrored horizontal then rotated 90 counter-clock-wise'),
6: _('Rotated 90 clock-wise'),
7: _('Mirrored horizontal then rotated 90 clock-wise'),
8: _('Rotated 90 counter clock-wise'),
}
def extract(self, jpeg):
if "start_frame/content" in jpeg:
self.startOfFrame(jpeg["start_frame/content"])
elif "start_scan/content/nr_components" in jpeg:
self.bits_per_pixel = 8 * jpeg["start_scan/content/nr_components"].value
if "app0/content" in jpeg:
self.extractAPP0(jpeg["app0/content"])
if "exif/content" in jpeg:
for ifd in jpeg.array("exif/content/ifd"):
for entry in ifd.array("entry"):
self.processIfdEntry(ifd, entry)
self.readGPS(ifd)
if "photoshop/content" in jpeg:
psd = jpeg["photoshop/content"]
if "version/content/reader_name" in psd:
self.producer = psd["version/content/reader_name"].value
if "iptc/content" in psd:
self.parseIPTC(psd["iptc/content"])
for field in jpeg.array("comment"):
if "content/comment" in field:
self.comment = field["content/comment"].value
self.computeQuality(jpeg)
if "data" in jpeg:
computeComprRate(self, jpeg["data"].size)
if not self.has("producer") and "photoshop" in jpeg:
self.producer = u"Adobe Photoshop"
if self.has("compression"):
self.compression = "JPEG"
@fault_tolerant
def startOfFrame(self, sof):
# Set compression method
key = sof["../type"].value
self.compression = "JPEG (%s)" % JpegChunk.START_OF_FRAME[key]
# Read image size and bits/pixel
self.width = sof["width"].value
self.height = sof["height"].value
nb_components = sof["nr_components"].value
self.bits_per_pixel = 8 * nb_components
if nb_components == 3:
self.pixel_format = _("YCbCr")
elif nb_components == 1:
self.pixel_format = _("Grayscale")
self.nb_colors = 256
@fault_tolerant
def computeQuality(self, jpeg):
# This function is an adaption to Python of ImageMagick code
# to compute JPEG quality using quantization tables
# Read quantization tables
qtlist = []
for dqt in jpeg.array("quantization"):
for qt in dqt.array("content/qt"):
# TODO: Take care of qt["index"].value?
qtlist.append(qt)
if not qtlist:
return
# Compute sum of all coefficients
sumcoeff = 0
for qt in qtlist:
coeff = qt.array("coeff")
for index in xrange(64):
sumcoeff += coeff[index].value
# Choose the right quality table and compute hash value
try:
hashval= qtlist[0]["coeff[2]"].value + qtlist[0]["coeff[53]"].value
if 2 <= len(qtlist):
hashval += qtlist[1]["coeff[0]"].value + qtlist[1]["coeff[63]"].value
hashtable = QUALITY_HASH_COLOR
sumtable = QUALITY_SUM_COLOR
else:
hashtable = QUALITY_HASH_GRAY
sumtable = QUALITY_SUM_GRAY
except (MissingField, IndexError):
# A coefficient is missing, so don't compute JPEG quality
return
# Find the JPEG quality
for index in xrange(100):
if (hashval >= hashtable[index]) or (sumcoeff >= sumtable[index]):
quality = "%s%%" % (index + 1)
if (hashval > hashtable[index]) or (sumcoeff > sumtable[index]):
quality += " " + _("(approximate)")
self.comment = "JPEG quality: %s" % quality
return
@fault_tolerant
def extractAPP0(self, app0):
self.format_version = u"JFIF %u.%02u" \
% (app0["ver_maj"].value, app0["ver_min"].value)
if "y_density" in app0:
self.width_dpi = app0["x_density"].value
self.height_dpi = app0["y_density"].value
@fault_tolerant
def processIfdEntry(self, ifd, entry):
# Skip unknown tags
tag = entry["tag"].value
if tag not in self.EXIF_KEY:
return
key = self.EXIF_KEY[tag]
if key in ("width", "height") and self.has(key):
# EXIF "valid size" are sometimes not updated when the image is scaled
# so we just ignore it
return
# Read value
if "value" in entry:
value = entry["value"].value
else:
value = ifd["value_%s" % entry.name].value
# Convert value to string
if tag == ExifEntry.TAG_ORIENTATION:
value = self.orientation_name.get(value, value)
elif tag == ExifEntry.TAG_EXPOSURE:
if not value:
return
if isinstance(value, float):
value = (value, u"1/%g" % (1/value))
elif entry["type"].value in (ExifEntry.TYPE_RATIONAL, ExifEntry.TYPE_SIGNED_RATIONAL):
value = (value, u"%.3g" % value)
# Store information
setattr(self, key, value)
@fault_tolerant
def readGPS(self, ifd):
# Read latitude and longitude
latitude_ref = None
longitude_ref = None
latitude = None
longitude = None
altitude_ref = 1
altitude = None
timestamp = None
datestamp = None
for entry in ifd.array("entry"):
tag = entry["tag"].value
if tag == ExifEntry.TAG_GPS_LATITUDE_REF:
if entry["value"].value == "N":
latitude_ref = 1
else:
latitude_ref = -1
elif tag == ExifEntry.TAG_GPS_LONGITUDE_REF:
if entry["value"].value == "E":
longitude_ref = 1
else:
longitude_ref = -1
elif tag == ExifEntry.TAG_GPS_ALTITUDE_REF:
if entry["value"].value == 1:
altitude_ref = -1
else:
altitude_ref = 1
elif tag == ExifEntry.TAG_GPS_LATITUDE:
latitude = [ifd["value_%s[%u]" % (entry.name, index)].value for index in xrange(3)]
elif tag == ExifEntry.TAG_GPS_LONGITUDE:
longitude = [ifd["value_%s[%u]" % (entry.name, index)].value for index in xrange(3)]
elif tag == ExifEntry.TAG_GPS_ALTITUDE:
altitude = ifd["value_%s" % entry.name].value
elif tag == ExifEntry.TAG_GPS_DATESTAMP:
datestamp = ifd["value_%s" % entry.name].value
elif tag == ExifEntry.TAG_GPS_TIMESTAMP:
items = [ifd["value_%s[%u]" % (entry.name, index)].value for index in xrange(3)]
items = map(int, items)
items = map(str, items)
timestamp = ":".join(items)
if latitude_ref and latitude:
value = deg2float(*latitude)
if latitude_ref < 0:
value = -value
self.latitude = value
if longitude and longitude_ref:
value = deg2float(*longitude)
if longitude_ref < 0:
value = -value
self.longitude = value
if altitude:
value = altitude
if altitude_ref < 0:
value = -value
self.altitude = value
if datestamp:
if timestamp:
datestamp += " " + timestamp
self.creation_date = datestamp
def parseIPTC(self, iptc):
datestr = hourstr = None
for field in iptc:
# Skip incomplete field
if "tag" not in field or "content" not in field:
continue
# Get value
value = field["content"].value
if isinstance(value, (str, unicode)):
value = value.replace("\r", " ")
value = value.replace("\n", " ")
# Skip unknown tag
tag = field["tag"].value
if tag == 55:
datestr = value
continue
if tag == 60:
hourstr = value
continue
if tag not in self.IPTC_KEY:
if tag != 0:
self.warning("Skip IPTC key %s: %s" % (
field["tag"].display, makeUnicode(value)))
continue
setattr(self, self.IPTC_KEY[tag], value)
if datestr and hourstr:
try:
year = int(datestr[0:4])
month = int(datestr[4:6])
day = int(datestr[6:8])
hour = int(hourstr[0:2])
min = int(hourstr[2:4])
sec = int(hourstr[4:6])
self.creation_date = datetime(year, month, day, hour, min, sec)
except ValueError:
pass
registerExtractor(JpegFile, JpegMetadata)
| 10,836 | Python | .py | 264 | 29.481061 | 100 | 0.552479 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,563 | filter.py | midgetspy_Sick-Beard/lib/hachoir_metadata/filter.py | from lib.hachoir_metadata.timezone import UTC
from datetime import date, datetime
# Year in 1850..2030
MIN_YEAR = 1850
MAX_YEAR = 2030
class Filter:
def __init__(self, valid_types, min=None, max=None):
self.types = valid_types
self.min = min
self.max = max
def __call__(self, value):
if not isinstance(value, self.types):
return True
if self.min is not None and value < self.min:
return False
if self.max is not None and self.max < value:
return False
return True
class NumberFilter(Filter):
def __init__(self, min=None, max=None):
Filter.__init__(self, (int, long, float), min, max)
class DatetimeFilter(Filter):
def __init__(self, min=None, max=None):
Filter.__init__(self, (date, datetime),
datetime(MIN_YEAR, 1, 1),
datetime(MAX_YEAR, 12, 31))
self.min_date = date(MIN_YEAR, 1, 1)
self.max_date = date(MAX_YEAR, 12, 31)
self.min_tz = datetime(MIN_YEAR, 1, 1, tzinfo=UTC)
self.max_tz = datetime(MAX_YEAR, 12, 31, tzinfo=UTC)
def __call__(self, value):
"""
Use different min/max values depending on value type
(datetime with timezone, datetime or date).
"""
if not isinstance(value, self.types):
return True
if hasattr(value, "tzinfo") and value.tzinfo:
return (self.min_tz <= value <= self.max_tz)
elif isinstance(value, datetime):
return (self.min <= value <= self.max)
else:
return (self.min_date <= value <= self.max_date)
DATETIME_FILTER = DatetimeFilter()
| 1,668 | Python | .py | 44 | 30 | 60 | 0.600248 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,564 | safe.py | midgetspy_Sick-Beard/lib/hachoir_metadata/safe.py | from lib.hachoir_core.error import HACHOIR_ERRORS, warning
def fault_tolerant(func, *args):
def safe_func(*args, **kw):
try:
func(*args, **kw)
except HACHOIR_ERRORS, err:
warning("Error when calling function %s(): %s" % (
func.__name__, err))
return safe_func
def getFieldAttribute(fieldset, key, attrname):
try:
field = fieldset[key]
if field.hasValue():
return getattr(field, attrname)
except HACHOIR_ERRORS, err:
warning("Unable to get %s of field %s/%s: %s" % (
attrname, fieldset.path, key, err))
return None
def getValue(fieldset, key):
return getFieldAttribute(fieldset, key, "value")
def getDisplay(fieldset, key):
return getFieldAttribute(fieldset, key, "display")
| 811 | Python | .py | 22 | 29.636364 | 62 | 0.631378 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,565 | archive.py | midgetspy_Sick-Beard/lib/hachoir_metadata/archive.py | from lib.hachoir_metadata.metadata_item import QUALITY_BEST, QUALITY_FASTEST
from lib.hachoir_metadata.safe import fault_tolerant, getValue
from lib.hachoir_metadata.metadata import (
RootMetadata, Metadata, MultipleMetadata, registerExtractor)
from lib.hachoir_parser.archive import (Bzip2Parser, CabFile, GzipParser,
TarFile, ZipFile, MarFile)
from lib.hachoir_core.tools import humanUnixAttributes
from lib.hachoir_core.i18n import _
def maxNbFile(meta):
if meta.quality <= QUALITY_FASTEST:
return 0
if QUALITY_BEST <= meta.quality:
return None
return 1 + int(10 * meta.quality)
def computeCompressionRate(meta):
"""
Compute compression rate, sizes have to be in byte.
"""
if not meta.has("file_size") \
or not meta.get("compr_size", 0):
return
file_size = meta.get("file_size")
if not file_size:
return
meta.compr_rate = float(file_size) / meta.get("compr_size")
class Bzip2Metadata(RootMetadata):
def extract(self, zip):
if "file" in zip:
self.compr_size = zip["file"].size/8
class GzipMetadata(RootMetadata):
def extract(self, gzip):
self.useHeader(gzip)
computeCompressionRate(self)
@fault_tolerant
def useHeader(self, gzip):
self.compression = gzip["compression"].display
if gzip["mtime"]:
self.last_modification = gzip["mtime"].value
self.os = gzip["os"].display
if gzip["has_filename"].value:
self.filename = getValue(gzip, "filename")
if gzip["has_comment"].value:
self.comment = getValue(gzip, "comment")
self.compr_size = gzip["file"].size/8
self.file_size = gzip["size"].value
class ZipMetadata(MultipleMetadata):
def extract(self, zip):
max_nb = maxNbFile(self)
for index, field in enumerate(zip.array("file")):
if max_nb is not None and max_nb <= index:
self.warning("ZIP archive contains many files, but only first %s files are processed" % max_nb)
break
self.processFile(field)
@fault_tolerant
def processFile(self, field):
meta = Metadata(self)
meta.filename = field["filename"].value
meta.creation_date = field["last_mod"].value
meta.compression = field["compression"].display
if "data_desc" in field:
meta.file_size = field["data_desc/file_uncompressed_size"].value
if field["data_desc/file_compressed_size"].value:
meta.compr_size = field["data_desc/file_compressed_size"].value
else:
meta.file_size = field["uncompressed_size"].value
if field["compressed_size"].value:
meta.compr_size = field["compressed_size"].value
computeCompressionRate(meta)
self.addGroup(field.name, meta, "File \"%s\"" % meta.get('filename'))
class TarMetadata(MultipleMetadata):
def extract(self, tar):
max_nb = maxNbFile(self)
for index, field in enumerate(tar.array("file")):
if max_nb is not None and max_nb <= index:
self.warning("TAR archive contains many files, but only first %s files are processed" % max_nb)
break
meta = Metadata(self)
self.extractFile(field, meta)
if meta.has("filename"):
title = _('File "%s"') % meta.getText('filename')
else:
title = _("File")
self.addGroup(field.name, meta, title)
@fault_tolerant
def extractFile(self, field, meta):
meta.filename = field["name"].value
meta.file_attr = humanUnixAttributes(field.getOctal("mode"))
meta.file_size = field.getOctal("size")
try:
if field.getOctal("mtime"):
meta.last_modification = field.getDatetime()
except ValueError:
pass
meta.file_type = field["type"].display
meta.author = "%s (uid=%s), group %s (gid=%s)" %\
(field["uname"].value, field.getOctal("uid"),
field["gname"].value, field.getOctal("gid"))
class CabMetadata(MultipleMetadata):
def extract(self, cab):
if "folder[0]" in cab:
self.useFolder(cab["folder[0]"])
self.format_version = "Microsoft Cabinet version %s" % cab["cab_version"].display
self.comment = "%s folders, %s files" % (
cab["nb_folder"].value, cab["nb_files"].value)
max_nb = maxNbFile(self)
for index, field in enumerate(cab.array("file")):
if max_nb is not None and max_nb <= index:
self.warning("CAB archive contains many files, but only first %s files are processed" % max_nb)
break
self.useFile(field)
@fault_tolerant
def useFolder(self, folder):
compr = folder["compr_method"].display
if folder["compr_method"].value != 0:
compr += " (level %u)" % folder["compr_level"].value
self.compression = compr
@fault_tolerant
def useFile(self, field):
meta = Metadata(self)
meta.filename = field["filename"].value
meta.file_size = field["filesize"].value
meta.creation_date = field["timestamp"].value
attr = field["attributes"].value
if attr != "(none)":
meta.file_attr = attr
if meta.has("filename"):
title = _("File \"%s\"") % meta.getText('filename')
else:
title = _("File")
self.addGroup(field.name, meta, title)
class MarMetadata(MultipleMetadata):
def extract(self, mar):
self.comment = "Contains %s files" % mar["nb_file"].value
self.format_version = "Microsoft Archive version %s" % mar["version"].value
max_nb = maxNbFile(self)
for index, field in enumerate(mar.array("file")):
if max_nb is not None and max_nb <= index:
self.warning("MAR archive contains many files, but only first %s files are processed" % max_nb)
break
meta = Metadata(self)
meta.filename = field["filename"].value
meta.compression = "None"
meta.file_size = field["filesize"].value
self.addGroup(field.name, meta, "File \"%s\"" % meta.getText('filename'))
registerExtractor(CabFile, CabMetadata)
registerExtractor(GzipParser, GzipMetadata)
registerExtractor(Bzip2Parser, Bzip2Metadata)
registerExtractor(TarFile, TarMetadata)
registerExtractor(ZipFile, ZipMetadata)
registerExtractor(MarFile, MarMetadata)
| 6,566 | Python | .py | 150 | 34.9 | 111 | 0.624688 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,566 | metadata_item.py | midgetspy_Sick-Beard/lib/hachoir_metadata/metadata_item.py | from lib.hachoir_core.tools import makeUnicode, normalizeNewline
from lib.hachoir_core.error import HACHOIR_ERRORS
from lib.hachoir_metadata import config
from lib.hachoir_metadata.setter import normalizeString
MIN_PRIORITY = 100
MAX_PRIORITY = 999
QUALITY_FASTEST = 0.0
QUALITY_FAST = 0.25
QUALITY_NORMAL = 0.5
QUALITY_GOOD = 0.75
QUALITY_BEST = 1.0
class DataValue:
def __init__(self, value, text):
self.value = value
self.text = text
class Data:
def __init__(self, key, priority, description,
text_handler=None, type=None, filter=None, conversion=None):
"""
handler is only used if value is not string nor unicode, prototype:
def handler(value) -> str/unicode
"""
assert MIN_PRIORITY <= priority <= MAX_PRIORITY
assert isinstance(description, unicode)
self.metadata = None
self.key = key
self.description = description
self.values = []
if type and not isinstance(type, (tuple, list)):
type = (type,)
self.type = type
self.text_handler = text_handler
self.filter = filter
self.priority = priority
self.conversion = conversion
def _createItem(self, value, text=None):
if text is None:
if isinstance(value, unicode):
text = value
elif self.text_handler:
text = self.text_handler(value)
assert isinstance(text, unicode)
else:
text = makeUnicode(value)
return DataValue(value, text)
def add(self, value):
if isinstance(value, tuple):
if len(value) != 2:
raise ValueError("Data.add() only accept tuple of 2 elements: (value,text)")
value, text = value
else:
text = None
# Skip value 'None'
if value is None:
return
if isinstance(value, (str, unicode)):
value = normalizeString(value)
if not value:
return
# Convert string to Unicode string using charset ISO-8859-1
if self.conversion:
try:
new_value = self.conversion(self.metadata, self.key, value)
except HACHOIR_ERRORS, err:
self.metadata.warning("Error during conversion of %r value: %s" % (
self.key, err))
return
if new_value is None:
dest_types = " or ".join(str(item.__name__) for item in self.type)
self.metadata.warning("Unable to convert %s=%r (%s) to %s" % (
self.key, value, type(value).__name__, dest_types))
return
if isinstance(new_value, tuple):
if text:
value = new_value[0]
else:
value, text = new_value
else:
value = new_value
elif isinstance(value, str):
value = unicode(value, "ISO-8859-1")
if self.type and not isinstance(value, self.type):
dest_types = " or ".join(str(item.__name__) for item in self.type)
self.metadata.warning("Key %r: value %r type (%s) is not %s" % (
self.key, value, type(value).__name__, dest_types))
return
# Skip empty strings
if isinstance(value, unicode):
value = normalizeNewline(value)
if config.MAX_STR_LENGTH \
and config.MAX_STR_LENGTH < len(value):
value = value[:config.MAX_STR_LENGTH] + "(...)"
# Skip duplicates
if value in self:
return
# Use filter
if self.filter and not self.filter(value):
self.metadata.warning("Skip value %s=%r (filter)" % (self.key, value))
return
# For string, if you have "verlongtext" and "verylo",
# keep the longer value
if isinstance(value, unicode):
for index, item in enumerate(self.values):
item = item.value
if not isinstance(item, unicode):
continue
if value.startswith(item):
# Find longer value, replace the old one
self.values[index] = self._createItem(value, text)
return
if item.startswith(value):
# Find truncated value, skip it
return
# Add new value
self.values.append(self._createItem(value, text))
def __len__(self):
return len(self.values)
def __getitem__(self, index):
return self.values[index]
def __contains__(self, value):
for item in self.values:
if value == item.value:
return True
return False
def __cmp__(self, other):
return cmp(self.priority, other.priority)
| 4,916 | Python | .py | 126 | 27.738095 | 92 | 0.557652 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,567 | formatter.py | midgetspy_Sick-Beard/lib/hachoir_metadata/formatter.py | from lib.hachoir_core.i18n import _, ngettext
NB_CHANNEL_NAME = {1: _("mono"), 2: _("stereo")}
def humanAudioChannel(value):
return NB_CHANNEL_NAME.get(value, unicode(value))
def humanFrameRate(value):
if isinstance(value, (int, long, float)):
return _("%.1f fps") % value
else:
return value
def humanComprRate(rate):
return u"%.1fx" % rate
def humanAltitude(value):
return ngettext("%.1f meter", "%.1f meters", value) % value
def humanPixelSize(value):
return ngettext("%s pixel", "%s pixels", value) % value
def humanDPI(value):
return u"%s DPI" % value
| 608 | Python | .py | 17 | 31.705882 | 63 | 0.674099 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,568 | timezone.py | midgetspy_Sick-Beard/lib/hachoir_metadata/timezone.py | from datetime import tzinfo, timedelta
class TimezoneUTC(tzinfo):
"""UTC timezone"""
ZERO = timedelta(0)
def utcoffset(self, dt):
return TimezoneUTC.ZERO
def tzname(self, dt):
return u"UTC"
def dst(self, dt):
return TimezoneUTC.ZERO
def __repr__(self):
return "<TimezoneUTC delta=0, name=u'UTC'>"
class Timezone(TimezoneUTC):
"""Fixed offset in hour from UTC."""
def __init__(self, offset):
self._offset = timedelta(minutes=offset*60)
self._name = u"%+03u00" % offset
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return self._name
def __repr__(self):
return "<Timezone delta=%s, name='%s'>" % (
self._offset, self._name)
UTC = TimezoneUTC()
def createTimezone(offset):
if offset:
return Timezone(offset)
else:
return UTC
| 907 | Python | .py | 30 | 23.766667 | 51 | 0.616185 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,569 | metadata.py | midgetspy_Sick-Beard/lib/hachoir_metadata/metadata.py | # -*- coding: utf-8 -*-
from lib.hachoir_core.compatibility import any, sorted
from lib.hachoir_core.endian import endian_name
from lib.hachoir_core.tools import makePrintable, makeUnicode
from lib.hachoir_core.dict import Dict
from lib.hachoir_core.error import error, HACHOIR_ERRORS
from lib.hachoir_core.i18n import _
from lib.hachoir_core.log import Logger
from lib.hachoir_metadata.metadata_item import (
MIN_PRIORITY, MAX_PRIORITY, QUALITY_NORMAL)
from lib.hachoir_metadata.register import registerAllItems
extractors = {}
class Metadata(Logger):
header = u"Metadata"
def __init__(self, parent, quality=QUALITY_NORMAL):
assert isinstance(self.header, unicode)
# Limit to 0.0 .. 1.0
if parent:
quality = parent.quality
else:
quality = min(max(0.0, quality), 1.0)
object.__init__(self)
object.__setattr__(self, "_Metadata__data", {})
object.__setattr__(self, "quality", quality)
header = self.__class__.header
object.__setattr__(self, "_Metadata__header", header)
registerAllItems(self)
def _logger(self):
pass
def __setattr__(self, key, value):
"""
Add a new value to data with name 'key'. Skip duplicates.
"""
# Invalid key?
if key not in self.__data:
raise KeyError(_("%s has no metadata '%s'") % (self.__class__.__name__, key))
# Skip duplicates
self.__data[key].add(value)
def setHeader(self, text):
object.__setattr__(self, "header", text)
def getItems(self, key):
try:
return self.__data[key]
except LookupError:
raise ValueError("Metadata has no value '%s'" % key)
def getItem(self, key, index):
try:
return self.getItems(key)[index]
except (LookupError, ValueError):
return None
def has(self, key):
return 1 <= len(self.getItems(key))
def get(self, key, default=None, index=0):
"""
Read first value of tag with name 'key'.
>>> from datetime import timedelta
>>> a = RootMetadata()
>>> a.duration = timedelta(seconds=2300)
>>> a.get('duration')
datetime.timedelta(0, 2300)
>>> a.get('author', u'Anonymous')
u'Anonymous'
"""
item = self.getItem(key, index)
if item is None:
if default is None:
raise ValueError("Metadata has no value '%s' (index %s)" % (key, index))
else:
return default
return item.value
def getValues(self, key):
try:
data = self.__data[key]
except LookupError:
raise ValueError("Metadata has no value '%s'" % key)
return [ item.value for item in data ]
def getText(self, key, default=None, index=0):
"""
Read first value, as unicode string, of tag with name 'key'.
>>> from datetime import timedelta
>>> a = RootMetadata()
>>> a.duration = timedelta(seconds=2300)
>>> a.getText('duration')
u'38 min 20 sec'
>>> a.getText('titre', u'Unknown')
u'Unknown'
"""
item = self.getItem(key, index)
if item is not None:
return item.text
else:
return default
def register(self, data):
assert data.key not in self.__data
data.metadata = self
self.__data[data.key] = data
def __iter__(self):
return self.__data.itervalues()
def __str__(self):
r"""
Create a multi-line ASCII string (end of line is "\n") which
represents all datas.
>>> a = RootMetadata()
>>> a.author = "haypo"
>>> a.copyright = unicode("© Hachoir", "UTF-8")
>>> print a
Metadata:
- Author: haypo
- Copyright: \xa9 Hachoir
@see __unicode__() and exportPlaintext()
"""
text = self.exportPlaintext()
return "\n".join( makePrintable(line, "ASCII") for line in text )
def __unicode__(self):
r"""
Create a multi-line Unicode string (end of line is "\n") which
represents all datas.
>>> a = RootMetadata()
>>> a.copyright = unicode("© Hachoir", "UTF-8")
>>> print repr(unicode(a))
u'Metadata:\n- Copyright: \xa9 Hachoir'
@see __str__() and exportPlaintext()
"""
return "\n".join(self.exportPlaintext())
def exportPlaintext(self, priority=None, human=True, line_prefix=u"- ", title=None):
r"""
Convert metadata to multi-line Unicode string and skip datas
with priority lower than specified priority.
Default priority is Metadata.MAX_PRIORITY. If human flag is True, data
key are translated to better human name (eg. "bit_rate" becomes
"Bit rate") which may be translated using gettext.
If priority is too small, metadata are empty and so None is returned.
>>> print RootMetadata().exportPlaintext()
None
>>> meta = RootMetadata()
>>> meta.copyright = unicode("© Hachoir", "UTF-8")
>>> print repr(meta.exportPlaintext())
[u'Metadata:', u'- Copyright: \xa9 Hachoir']
@see __str__() and __unicode__()
"""
if priority is not None:
priority = max(priority, MIN_PRIORITY)
priority = min(priority, MAX_PRIORITY)
else:
priority = MAX_PRIORITY
if not title:
title = self.header
text = ["%s:" % title]
for data in sorted(self):
if priority < data.priority:
break
if not data.values:
continue
if human:
title = data.description
else:
title = data.key
for item in data.values:
if human:
value = item.text
else:
value = makeUnicode(item.value)
text.append("%s%s: %s" % (line_prefix, title, value))
if 1 < len(text):
return text
else:
return None
def __nonzero__(self):
return any(item for item in self.__data.itervalues())
class RootMetadata(Metadata):
def __init__(self, quality=QUALITY_NORMAL):
Metadata.__init__(self, None, quality)
class MultipleMetadata(RootMetadata):
header = _("Common")
def __init__(self, quality=QUALITY_NORMAL):
RootMetadata.__init__(self, quality)
object.__setattr__(self, "_MultipleMetadata__groups", Dict())
object.__setattr__(self, "_MultipleMetadata__key_counter", {})
def __contains__(self, key):
return key in self.__groups
def __getitem__(self, key):
return self.__groups[key]
def iterGroups(self):
return self.__groups.itervalues()
def __nonzero__(self):
if RootMetadata.__nonzero__(self):
return True
return any(bool(group) for group in self.__groups)
def addGroup(self, key, metadata, header=None):
"""
Add a new group (metadata of a sub-document).
Returns False if the group is skipped, True if it has been added.
"""
if not metadata:
self.warning("Skip empty group %s" % key)
return False
if key.endswith("[]"):
key = key[:-2]
if key in self.__key_counter:
self.__key_counter[key] += 1
else:
self.__key_counter[key] = 1
key += "[%u]" % self.__key_counter[key]
if header:
metadata.setHeader(header)
self.__groups.append(key, metadata)
return True
def exportPlaintext(self, priority=None, human=True, line_prefix=u"- "):
common = Metadata.exportPlaintext(self, priority, human, line_prefix)
if common:
text = common
else:
text = []
for key, metadata in self.__groups.iteritems():
if not human:
title = key
else:
title = None
value = metadata.exportPlaintext(priority, human, line_prefix, title=title)
if value:
text.extend(value)
if len(text):
return text
else:
return None
def registerExtractor(parser, extractor):
assert parser not in extractors
assert issubclass(extractor, RootMetadata)
extractors[parser] = extractor
def extractMetadata(parser, quality=QUALITY_NORMAL):
"""
Create a Metadata class from a parser. Returns None if no metadata
extractor does exist for the parser class.
"""
try:
extractor = extractors[parser.__class__]
except KeyError:
return None
metadata = extractor(quality)
try:
metadata.extract(parser)
except HACHOIR_ERRORS, err:
error("Error during metadata extraction: %s" % unicode(err))
if metadata:
metadata.mime_type = parser.mime_type
metadata.endian = endian_name[parser.endian]
return metadata
| 9,158 | Python | .py | 247 | 27.870445 | 89 | 0.577956 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,570 | __init__.py | midgetspy_Sick-Beard/lib/hachoir_metadata/__init__.py | from lib.hachoir_metadata.version import VERSION as __version__
from lib.hachoir_metadata.metadata import extractMetadata
# Just import the module,
# each module use registerExtractor() method
import lib.hachoir_metadata.archive
import lib.hachoir_metadata.audio
import lib.hachoir_metadata.file_system
import lib.hachoir_metadata.image
import lib.hachoir_metadata.jpeg
import lib.hachoir_metadata.misc
import lib.hachoir_metadata.program
import lib.hachoir_metadata.riff
import lib.hachoir_metadata.video
| 508 | Python | .py | 13 | 37.923077 | 63 | 0.860041 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,571 | setter.py | midgetspy_Sick-Beard/lib/hachoir_metadata/setter.py | from datetime import date, datetime
import re
from lib.hachoir_core.language import Language
from locale import setlocale, LC_ALL
from time import strptime
from lib.hachoir_metadata.timezone import createTimezone
from lib.hachoir_metadata import config
NORMALIZE_REGEX = re.compile("[-/.: ]+")
YEAR_REGEX1 = re.compile("^([0-9]{4})$")
# Date regex: YYYY-MM-DD (US format)
DATE_REGEX1 = re.compile("^([0-9]{4})~([01][0-9])~([0-9]{2})$")
# Date regex: YYYY-MM-DD HH:MM:SS (US format)
DATETIME_REGEX1 = re.compile("^([0-9]{4})~([01][0-9])~([0-9]{2})~([0-9]{1,2})~([0-9]{2})~([0-9]{2})$")
# Datetime regex: "MM-DD-YYYY HH:MM:SS" (FR format)
DATETIME_REGEX2 = re.compile("^([01]?[0-9])~([0-9]{2})~([0-9]{4})~([0-9]{1,2})~([0-9]{2})~([0-9]{2})$")
# Timezone regex: "(...) +0200"
TIMEZONE_REGEX = re.compile("^(.*)~([+-][0-9]{2})00$")
# Timestmap: 'February 2007'
MONTH_YEAR = "%B~%Y"
# Timestmap: 'Sun Feb 24 15:51:09 2008'
RIFF_TIMESTAMP = "%a~%b~%d~%H~%M~%S~%Y"
# Timestmap: 'Thu, 19 Jul 2007 09:03:57'
ISO_TIMESTAMP = "%a,~%d~%b~%Y~%H~%M~%S"
def parseDatetime(value):
"""
Year and date:
>>> parseDatetime("2000")
(datetime.date(2000, 1, 1), u'2000')
>>> parseDatetime("2004-01-02")
datetime.date(2004, 1, 2)
Timestamp:
>>> parseDatetime("2004-01-02 18:10:45")
datetime.datetime(2004, 1, 2, 18, 10, 45)
>>> parseDatetime("2004-01-02 18:10:45")
datetime.datetime(2004, 1, 2, 18, 10, 45)
Timestamp with timezone:
>>> parseDatetime(u'Thu, 19 Jul 2007 09:03:57 +0000')
datetime.datetime(2007, 7, 19, 9, 3, 57, tzinfo=<TimezoneUTC delta=0, name=u'UTC'>)
>>> parseDatetime(u'Thu, 19 Jul 2007 09:03:57 +0200')
datetime.datetime(2007, 7, 19, 9, 3, 57, tzinfo=<Timezone delta=2:00:00, name='+0200'>)
"""
value = NORMALIZE_REGEX.sub("~", value.strip())
regs = YEAR_REGEX1.match(value)
if regs:
try:
year = int(regs.group(1))
return (date(year, 1, 1), unicode(year))
except ValueError:
pass
regs = DATE_REGEX1.match(value)
if regs:
try:
year = int(regs.group(1))
month = int(regs.group(2))
day = int(regs.group(3))
return date(year, month, day)
except ValueError:
pass
regs = DATETIME_REGEX1.match(value)
if regs:
try:
year = int(regs.group(1))
month = int(regs.group(2))
day = int(regs.group(3))
hour = int(regs.group(4))
min = int(regs.group(5))
sec = int(regs.group(6))
return datetime(year, month, day, hour, min, sec)
except ValueError:
pass
regs = DATETIME_REGEX2.match(value)
if regs:
try:
month = int(regs.group(1))
day = int(regs.group(2))
year = int(regs.group(3))
hour = int(regs.group(4))
min = int(regs.group(5))
sec = int(regs.group(6))
return datetime(year, month, day, hour, min, sec)
except ValueError:
pass
current_locale = setlocale(LC_ALL, "C")
try:
match = TIMEZONE_REGEX.match(value)
if match:
without_timezone = match.group(1)
delta = int(match.group(2))
delta = createTimezone(delta)
else:
without_timezone = value
delta = None
try:
timestamp = strptime(without_timezone, ISO_TIMESTAMP)
arguments = list(timestamp[0:6]) + [0, delta]
return datetime(*arguments)
except ValueError:
pass
try:
timestamp = strptime(without_timezone, RIFF_TIMESTAMP)
arguments = list(timestamp[0:6]) + [0, delta]
return datetime(*arguments)
except ValueError:
pass
try:
timestamp = strptime(value, MONTH_YEAR)
arguments = list(timestamp[0:3])
return date(*arguments)
except ValueError:
pass
finally:
setlocale(LC_ALL, current_locale)
return None
def setDatetime(meta, key, value):
if isinstance(value, (str, unicode)):
return parseDatetime(value)
elif isinstance(value, (date, datetime)):
return value
return None
def setLanguage(meta, key, value):
"""
>>> setLanguage(None, None, "fre")
<Language 'French', code='fre'>
>>> setLanguage(None, None, u"ger")
<Language 'German', code='ger'>
"""
return Language(value)
def setTrackTotal(meta, key, total):
"""
>>> setTrackTotal(None, None, "10")
10
"""
try:
return int(total)
except ValueError:
meta.warning("Invalid track total: %r" % total)
return None
def setTrackNumber(meta, key, number):
if isinstance(number, (int, long)):
return number
if "/" in number:
number, total = number.split("/", 1)
meta.track_total = total
try:
return int(number)
except ValueError:
meta.warning("Invalid track number: %r" % number)
return None
def normalizeString(text):
if config.RAW_OUTPUT:
return text
return text.strip(" \t\v\n\r\0")
| 5,215 | Python | .py | 152 | 27 | 103 | 0.579699 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,572 | riff.py | midgetspy_Sick-Beard/lib/hachoir_metadata/riff.py | """
Extract metadata from RIFF file format: AVI video and WAV sound.
"""
from lib.hachoir_metadata.metadata import Metadata, MultipleMetadata, registerExtractor
from lib.hachoir_metadata.safe import fault_tolerant, getValue
from lib.hachoir_parser.container.riff import RiffFile
from lib.hachoir_parser.video.fourcc import UNCOMPRESSED_AUDIO
from lib.hachoir_core.tools import humanFilesize, makeUnicode, timedelta2seconds
from lib.hachoir_core.i18n import _
from lib.hachoir_metadata.audio import computeComprRate as computeAudioComprRate
from datetime import timedelta
class RiffMetadata(MultipleMetadata):
TAG_TO_KEY = {
"INAM": "title",
"IART": "artist",
"ICMT": "comment",
"ICOP": "copyright",
"IENG": "author", # (engineer)
"ISFT": "producer",
"ICRD": "creation_date",
"IDIT": "creation_date",
}
def extract(self, riff):
type = riff["type"].value
if type == "WAVE":
self.extractWAVE(riff)
size = getValue(riff, "audio_data/size")
if size:
computeAudioComprRate(self, size*8)
elif type == "AVI ":
if "headers" in riff:
self.extractAVI(riff["headers"])
self.extractInfo(riff["headers"])
elif type == "ACON":
self.extractAnim(riff)
if "info" in riff:
self.extractInfo(riff["info"])
def processChunk(self, chunk):
if "text" not in chunk:
return
value = chunk["text"].value
tag = chunk["tag"].value
if tag not in self.TAG_TO_KEY:
self.warning("Skip RIFF metadata %s: %s" % (tag, value))
return
key = self.TAG_TO_KEY[tag]
setattr(self, key, value)
@fault_tolerant
def extractWAVE(self, wav):
format = wav["format"]
# Number of channel, bits/sample, sample rate
self.nb_channel = format["nb_channel"].value
self.bits_per_sample = format["bit_per_sample"].value
self.sample_rate = format["sample_per_sec"].value
self.compression = format["codec"].display
if "nb_sample/nb_sample" in wav \
and 0 < format["sample_per_sec"].value:
self.duration = timedelta(seconds=float(wav["nb_sample/nb_sample"].value) / format["sample_per_sec"].value)
if format["codec"].value in UNCOMPRESSED_AUDIO:
# Codec with fixed bit rate
self.bit_rate = format["nb_channel"].value * format["bit_per_sample"].value * format["sample_per_sec"].value
if not self.has("duration") \
and "audio_data/size" in wav \
and self.has("bit_rate"):
duration = float(wav["audio_data/size"].value)*8 / self.get('bit_rate')
self.duration = timedelta(seconds=duration)
def extractInfo(self, fieldset):
for field in fieldset:
if not field.is_field_set:
continue
if "tag" in field:
if field["tag"].value == "LIST":
self.extractInfo(field)
else:
self.processChunk(field)
@fault_tolerant
def extractAVIVideo(self, header, meta):
meta.compression = "%s (fourcc:\"%s\")" \
% (header["fourcc"].display, makeUnicode(header["fourcc"].value))
if header["rate"].value and header["scale"].value:
fps = float(header["rate"].value) / header["scale"].value
meta.frame_rate = fps
if 0 < fps:
self.duration = meta.duration = timedelta(seconds=float(header["length"].value) / fps)
if "../stream_fmt/width" in header:
format = header["../stream_fmt"]
meta.width = format["width"].value
meta.height = format["height"].value
meta.bits_per_pixel = format["depth"].value
else:
meta.width = header["right"].value - header["left"].value
meta.height = header["bottom"].value - header["top"].value
@fault_tolerant
def extractAVIAudio(self, format, meta):
meta.nb_channel = format["channel"].value
meta.sample_rate = format["sample_rate"].value
meta.bit_rate = format["bit_rate"].value * 8
if format["bits_per_sample"].value:
meta.bits_per_sample = format["bits_per_sample"].value
if "../stream_hdr" in format:
header = format["../stream_hdr"]
if header["rate"].value and header["scale"].value:
frame_rate = float(header["rate"].value) / header["scale"].value
meta.duration = timedelta(seconds=float(header["length"].value) / frame_rate)
if header["fourcc"].value != "":
meta.compression = "%s (fourcc:\"%s\")" \
% (format["codec"].display, header["fourcc"].value)
if not meta.has("compression"):
meta.compression = format["codec"].display
self.computeAudioComprRate(meta)
@fault_tolerant
def computeAudioComprRate(self, meta):
uncompr = meta.get('bit_rate', 0)
if not uncompr:
return
compr = meta.get('nb_channel') * meta.get('sample_rate') * meta.get('bits_per_sample', default=16)
if not compr:
return
meta.compr_rate = float(compr) / uncompr
@fault_tolerant
def useAviHeader(self, header):
microsec = header["microsec_per_frame"].value
if microsec:
self.frame_rate = 1000000.0 / microsec
total_frame = getValue(header, "total_frame")
if total_frame and not self.has("duration"):
self.duration = timedelta(microseconds=total_frame * microsec)
self.width = header["width"].value
self.height = header["height"].value
def extractAVI(self, headers):
audio_index = 1
for stream in headers.array("stream"):
if "stream_hdr/stream_type" not in stream:
continue
stream_type = stream["stream_hdr/stream_type"].value
if stream_type == "vids":
if "stream_hdr" in stream:
meta = Metadata(self)
self.extractAVIVideo(stream["stream_hdr"], meta)
self.addGroup("video", meta, "Video stream")
elif stream_type == "auds":
if "stream_fmt" in stream:
meta = Metadata(self)
self.extractAVIAudio(stream["stream_fmt"], meta)
self.addGroup("audio[%u]" % audio_index, meta, "Audio stream")
audio_index += 1
if "avi_hdr" in headers:
self.useAviHeader(headers["avi_hdr"])
# Compute global bit rate
if self.has("duration") and "/movie/size" in headers:
self.bit_rate = float(headers["/movie/size"].value) * 8 / timedelta2seconds(self.get('duration'))
# Video has index?
if "/index" in headers:
self.comment = _("Has audio/video index (%s)") \
% humanFilesize(headers["/index"].size/8)
@fault_tolerant
def extractAnim(self, riff):
if "anim_rate/rate[0]" in riff:
count = 0
total = 0
for rate in riff.array("anim_rate/rate"):
count += 1
if 100 < count:
break
total += rate.value / 60.0
if count and total:
self.frame_rate = count / total
if not self.has("frame_rate") and "anim_hdr/jiffie_rate" in riff:
self.frame_rate = 60.0 / riff["anim_hdr/jiffie_rate"].value
registerExtractor(RiffFile, RiffMetadata)
| 7,707 | Python | .py | 170 | 34.335294 | 120 | 0.579753 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,573 | video.py | midgetspy_Sick-Beard/lib/hachoir_metadata/video.py | from lib.hachoir_core.field import MissingField
from lib.hachoir_metadata.metadata import (registerExtractor,
Metadata, RootMetadata, MultipleMetadata)
from lib.hachoir_metadata.metadata_item import QUALITY_GOOD
from lib.hachoir_metadata.safe import fault_tolerant
from lib.hachoir_parser.video import MovFile, AsfFile, FlvFile
from lib.hachoir_parser.video.asf import Descriptor as ASF_Descriptor
from lib.hachoir_parser.container import MkvFile
from lib.hachoir_parser.container.mkv import dateToDatetime
from lib.hachoir_core.i18n import _
from lib.hachoir_core.tools import makeUnicode, makePrintable, timedelta2seconds
from datetime import timedelta
class MkvMetadata(MultipleMetadata):
tag_key = {
"TITLE": "title",
"URL": "url",
"COPYRIGHT": "copyright",
# TODO: use maybe another name?
# Its value may be different than (...)/Info/DateUTC/date
"DATE_RECORDED": "creation_date",
# TODO: Extract subtitle metadata
"SUBTITLE": "subtitle_author",
}
def extract(self, mkv):
for segment in mkv.array("Segment"):
self.processSegment(segment)
def processSegment(self, segment):
for field in segment:
if field.name.startswith("Info["):
self.processInfo(field)
elif field.name.startswith("Tags["):
for tag in field.array("Tag"):
self.processTag(tag)
elif field.name.startswith("Tracks["):
self.processTracks(field)
elif field.name.startswith("Cluster["):
if self.quality < QUALITY_GOOD:
return
def processTracks(self, tracks):
for entry in tracks.array("TrackEntry"):
self.processTrack(entry)
def processTrack(self, track):
if "TrackType/enum" not in track:
return
if track["TrackType/enum"].display == "video":
self.processVideo(track)
elif track["TrackType/enum"].display == "audio":
self.processAudio(track)
elif track["TrackType/enum"].display == "subtitle":
self.processSubtitle(track)
def trackCommon(self, track, meta):
if "Name/unicode" in track:
meta.title = track["Name/unicode"].value
if "Language/string" in track \
and track["Language/string"].value not in ("mis", "und"):
meta.language = track["Language/string"].value
def processVideo(self, track):
video = Metadata(self)
self.trackCommon(track, video)
try:
video.compression = track["CodecID/string"].value
if "Video" in track:
video.width = track["Video/PixelWidth/unsigned"].value
video.height = track["Video/PixelHeight/unsigned"].value
except MissingField:
pass
self.addGroup("video[]", video, "Video stream")
def getDouble(self, field, parent):
float_key = '%s/float' % parent
if float_key in field:
return field[float_key].value
double_key = '%s/double' % parent
if double_key in field:
return field[double_key].value
return None
def processAudio(self, track):
audio = Metadata(self)
self.trackCommon(track, audio)
if "Audio" in track:
frequency = self.getDouble(track, "Audio/SamplingFrequency")
if frequency is not None:
audio.sample_rate = frequency
if "Audio/Channels/unsigned" in track:
audio.nb_channel = track["Audio/Channels/unsigned"].value
if "Audio/BitDepth/unsigned" in track:
audio.bits_per_sample = track["Audio/BitDepth/unsigned"].value
if "CodecID/string" in track:
audio.compression = track["CodecID/string"].value
self.addGroup("audio[]", audio, "Audio stream")
def processSubtitle(self, track):
sub = Metadata(self)
self.trackCommon(track, sub)
try:
sub.compression = track["CodecID/string"].value
except MissingField:
pass
self.addGroup("subtitle[]", sub, "Subtitle")
def processTag(self, tag):
for field in tag.array("SimpleTag"):
self.processSimpleTag(field)
def processSimpleTag(self, tag):
if "TagName/unicode" not in tag \
or "TagString/unicode" not in tag:
return
name = tag["TagName/unicode"].value
if name not in self.tag_key:
return
key = self.tag_key[name]
value = tag["TagString/unicode"].value
setattr(self, key, value)
def processInfo(self, info):
if "TimecodeScale/unsigned" in info:
duration = self.getDouble(info, "Duration")
if duration is not None:
try:
seconds = duration * info["TimecodeScale/unsigned"].value * 1e-9
self.duration = timedelta(seconds=seconds)
except OverflowError:
# Catch OverflowError for timedelta (long int too large
# to be converted to an int)
pass
if "DateUTC/date" in info:
try:
self.creation_date = dateToDatetime(info["DateUTC/date"].value)
except OverflowError:
pass
if "WritingApp/unicode" in info:
self.producer = info["WritingApp/unicode"].value
if "MuxingApp/unicode" in info:
self.producer = info["MuxingApp/unicode"].value
if "Title/unicode" in info:
self.title = info["Title/unicode"].value
class FlvMetadata(MultipleMetadata):
def extract(self, flv):
if "video[0]" in flv:
meta = Metadata(self)
self.extractVideo(flv["video[0]"], meta)
self.addGroup("video", meta, "Video stream")
if "audio[0]" in flv:
meta = Metadata(self)
self.extractAudio(flv["audio[0]"], meta)
self.addGroup("audio", meta, "Audio stream")
# TODO: Computer duration
# One technic: use last video/audio chunk and use timestamp
# But this is very slow
self.format_version = flv.description
if "metadata/entry[1]" in flv:
self.extractAMF(flv["metadata/entry[1]"])
if self.has('duration'):
self.bit_rate = flv.size / timedelta2seconds(self.get('duration'))
@fault_tolerant
def extractAudio(self, audio, meta):
if audio["codec"].display == "MP3" and "music_data" in audio:
meta.compression = audio["music_data"].description
else:
meta.compression = audio["codec"].display
meta.sample_rate = audio.getSampleRate()
if audio["is_16bit"].value:
meta.bits_per_sample = 16
else:
meta.bits_per_sample = 8
if audio["is_stereo"].value:
meta.nb_channel = 2
else:
meta.nb_channel = 1
@fault_tolerant
def extractVideo(self, video, meta):
meta.compression = video["codec"].display
def extractAMF(self, amf):
for entry in amf.array("item"):
self.useAmfEntry(entry)
@fault_tolerant
def useAmfEntry(self, entry):
key = entry["key"].value
if key == "duration":
self.duration = timedelta(seconds=entry["value"].value)
elif key == "creator":
self.producer = entry["value"].value
elif key == "audiosamplerate":
self.sample_rate = entry["value"].value
elif key == "framerate":
self.frame_rate = entry["value"].value
elif key == "metadatacreator":
self.producer = entry["value"].value
elif key == "metadatadate":
self.creation_date = entry.value
elif key == "width":
self.width = int(entry["value"].value)
elif key == "height":
self.height = int(entry["value"].value)
class MovMetadata(RootMetadata):
def extract(self, mov):
for atom in mov:
if "movie" in atom:
self.processMovie(atom["movie"])
@fault_tolerant
def processMovieHeader(self, hdr):
self.creation_date = hdr["creation_date"].value
self.last_modification = hdr["lastmod_date"].value
self.duration = timedelta(seconds=float(hdr["duration"].value) / hdr["time_scale"].value)
self.comment = _("Play speed: %.1f%%") % (hdr["play_speed"].value*100)
self.comment = _("User volume: %.1f%%") % (float(hdr["volume"].value)*100//255)
@fault_tolerant
def processTrackHeader(self, hdr):
width = int(hdr["frame_size_width"].value)
height = int(hdr["frame_size_height"].value)
if width and height:
self.width = width
self.height = height
def processTrack(self, atom):
for field in atom:
if "track_hdr" in field:
self.processTrackHeader(field["track_hdr"])
def processMovie(self, atom):
for field in atom:
if "track" in field:
self.processTrack(field["track"])
if "movie_hdr" in field:
self.processMovieHeader(field["movie_hdr"])
class AsfMetadata(MultipleMetadata):
EXT_DESC_TO_ATTR = {
"Encoder": "producer",
"ToolName": "producer",
"AlbumTitle": "album",
"Track": "track_number",
"TrackNumber": "track_total",
"Year": "creation_date",
"AlbumArtist": "author",
}
SKIP_EXT_DESC = set((
# Useless informations
"WMFSDKNeeded", "WMFSDKVersion",
"Buffer Average", "VBR Peak", "EncodingTime",
"MediaPrimaryClassID", "UniqueFileIdentifier",
))
def extract(self, asf):
if "header/content" in asf:
self.processHeader(asf["header/content"])
def processHeader(self, header):
compression = []
is_vbr = None
if "ext_desc/content" in header:
# Extract all data from ext_desc
data = {}
for desc in header.array("ext_desc/content/descriptor"):
self.useExtDescItem(desc, data)
# Have ToolName and ToolVersion? If yes, group them to producer key
if "ToolName" in data and "ToolVersion" in data:
self.producer = "%s (version %s)" % (data["ToolName"], data["ToolVersion"])
del data["ToolName"]
del data["ToolVersion"]
# "IsVBR" key
if "IsVBR" in data:
is_vbr = (data["IsVBR"] == 1)
del data["IsVBR"]
# Store data
for key, value in data.iteritems():
if key in self.EXT_DESC_TO_ATTR:
key = self.EXT_DESC_TO_ATTR[key]
else:
if isinstance(key, str):
key = makePrintable(key, "ISO-8859-1", to_unicode=True)
value = "%s=%s" % (key, value)
key = "comment"
setattr(self, key, value)
if "file_prop/content" in header:
self.useFileProp(header["file_prop/content"], is_vbr)
if "codec_list/content" in header:
for codec in header.array("codec_list/content/codec"):
if "name" in codec:
text = codec["name"].value
if "desc" in codec and codec["desc"].value:
text = "%s (%s)" % (text, codec["desc"].value)
compression.append(text)
audio_index = 1
video_index = 1
for index, stream_prop in enumerate(header.array("stream_prop")):
if "content/audio_header" in stream_prop:
meta = Metadata(self)
self.streamProperty(header, index, meta)
self.streamAudioHeader(stream_prop["content/audio_header"], meta)
if self.addGroup("audio[%u]" % audio_index, meta, "Audio stream #%u" % audio_index):
audio_index += 1
elif "content/video_header" in stream_prop:
meta = Metadata(self)
self.streamProperty(header, index, meta)
self.streamVideoHeader(stream_prop["content/video_header"], meta)
if self.addGroup("video[%u]" % video_index, meta, "Video stream #%u" % video_index):
video_index += 1
if "metadata/content" in header:
info = header["metadata/content"]
try:
self.title = info["title"].value
self.author = info["author"].value
self.copyright = info["copyright"].value
except MissingField:
pass
@fault_tolerant
def streamAudioHeader(self, audio, meta):
if not meta.has("compression"):
meta.compression = audio["twocc"].display
meta.nb_channel = audio["channels"].value
meta.sample_rate = audio["sample_rate"].value
meta.bits_per_sample = audio["bits_per_sample"].value
@fault_tolerant
def streamVideoHeader(self, video, meta):
meta.width = video["width"].value
meta.height = video["height"].value
if "bmp_info" in video:
bmp_info = video["bmp_info"]
if not meta.has("compression"):
meta.compression = bmp_info["codec"].display
meta.bits_per_pixel = bmp_info["bpp"].value
@fault_tolerant
def useExtDescItem(self, desc, data):
if desc["type"].value == ASF_Descriptor.TYPE_BYTE_ARRAY:
# Skip binary data
return
key = desc["name"].value
if "/" in key:
# Replace "WM/ToolName" with "ToolName"
key = key.split("/", 1)[1]
if key in self.SKIP_EXT_DESC:
# Skip some keys
return
value = desc["value"].value
if not value:
return
value = makeUnicode(value)
data[key] = value
@fault_tolerant
def useFileProp(self, prop, is_vbr):
self.creation_date = prop["creation_date"].value
self.duration = prop["play_duration"].value
if prop["seekable"].value:
self.comment = u"Is seekable"
value = prop["max_bitrate"].value
text = prop["max_bitrate"].display
if is_vbr is True:
text = "VBR (%s max)" % text
elif is_vbr is False:
text = "%s (CBR)" % text
else:
text = "%s (max)" % text
self.bit_rate = (value, text)
def streamProperty(self, header, index, meta):
key = "bit_rates/content/bit_rate[%u]/avg_bitrate" % index
if key in header:
meta.bit_rate = header[key].value
# TODO: Use codec list
# It doesn't work when the video uses /header/content/bitrate_mutex
# since the codec list are shared between streams but... how is it
# shared?
# key = "codec_list/content/codec[%u]" % index
# if key in header:
# codec = header[key]
# if "name" in codec:
# text = codec["name"].value
# if "desc" in codec and codec["desc"].value:
# meta.compression = "%s (%s)" % (text, codec["desc"].value)
# else:
# meta.compression = text
registerExtractor(MovFile, MovMetadata)
registerExtractor(AsfFile, AsfMetadata)
registerExtractor(FlvFile, FlvMetadata)
registerExtractor(MkvFile, MkvMetadata)
| 15,568 | Python | .py | 366 | 31.978142 | 100 | 0.58109 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,574 | misc.py | midgetspy_Sick-Beard/lib/hachoir_metadata/misc.py | from lib.hachoir_metadata.metadata import RootMetadata, registerExtractor
from lib.hachoir_metadata.safe import fault_tolerant
from lib.hachoir_parser.container import SwfFile
from lib.hachoir_parser.misc import TorrentFile, TrueTypeFontFile, OLE2_File, PcfFile
from lib.hachoir_core.field import isString
from lib.hachoir_core.error import warning
from lib.hachoir_parser import guessParser
from lib.hachoir_metadata.setter import normalizeString
class TorrentMetadata(RootMetadata):
KEY_TO_ATTR = {
u"announce": "url",
u"comment": "comment",
u"creation_date": "creation_date",
}
INFO_TO_ATTR = {
u"length": "file_size",
u"name": "filename",
}
def extract(self, torrent):
for field in torrent[0]:
self.processRoot(field)
@fault_tolerant
def processRoot(self, field):
if field.name in self.KEY_TO_ATTR:
key = self.KEY_TO_ATTR[field.name]
value = field.value
setattr(self, key, value)
elif field.name == "info" and "value" in field:
for field in field["value"]:
self.processInfo(field)
@fault_tolerant
def processInfo(self, field):
if field.name in self.INFO_TO_ATTR:
key = self.INFO_TO_ATTR[field.name]
value = field.value
setattr(self, key, value)
elif field.name == "piece_length":
self.comment = "Piece length: %s" % field.display
class TTF_Metadata(RootMetadata):
NAMEID_TO_ATTR = {
0: "copyright", # Copyright notice
3: "title", # Unique font identifier
5: "version", # Version string
8: "author", # Manufacturer name
11: "url", # URL Vendor
14: "copyright", # License info URL
}
def extract(self, ttf):
if "header" in ttf:
self.extractHeader(ttf["header"])
if "names" in ttf:
self.extractNames(ttf["names"])
@fault_tolerant
def extractHeader(self, header):
self.creation_date = header["created"].value
self.last_modification = header["modified"].value
self.comment = u"Smallest readable size in pixels: %s pixels" % header["lowest"].value
self.comment = u"Font direction: %s" % header["font_dir"].display
@fault_tolerant
def extractNames(self, names):
offset = names["offset"].value
for header in names.array("header"):
key = header["nameID"].value
foffset = offset + header["offset"].value
field = names.getFieldByAddress(foffset*8)
if not field or not isString(field):
continue
value = field.value
if key not in self.NAMEID_TO_ATTR:
continue
key = self.NAMEID_TO_ATTR[key]
if key == "version" and value.startswith(u"Version "):
# "Version 1.2" => "1.2"
value = value[8:]
setattr(self, key, value)
class OLE2_Metadata(RootMetadata):
SUMMARY_ID_TO_ATTR = {
2: "title", # Title
3: "title", # Subject
4: "author",
6: "comment",
8: "author", # Last saved by
12: "creation_date",
13: "last_modification",
14: "nb_page",
18: "producer",
}
IGNORE_SUMMARY = set((
1, # Code page
))
DOC_SUMMARY_ID_TO_ATTR = {
3: "title", # Subject
14: "author", # Manager
}
IGNORE_DOC_SUMMARY = set((
1, # Code page
))
def extract(self, ole2):
self._extract(ole2)
def _extract(self, fieldset, main_document=True):
if main_document:
# _feedAll() is needed to make sure that we get all root[*] fragments
fieldset._feedAll()
if "root[0]" in fieldset:
self.useRoot(fieldset["root[0]"])
doc_summary = self.getField(fieldset, main_document, "doc_summary[0]")
if doc_summary:
self.useSummary(doc_summary, True)
word_doc = self.getField(fieldset, main_document, "word_doc[0]")
if word_doc:
self.useWordDocument(word_doc)
summary = self.getField(fieldset, main_document, "summary[0]")
if summary:
self.useSummary(summary, False)
@fault_tolerant
def useRoot(self, root):
stream = root.getSubIStream()
ministream = guessParser(stream)
if not ministream:
warning("Unable to create the OLE2 mini stream parser!")
return
self._extract(ministream, main_document=False)
def getField(self, fieldset, main_document, name):
if name not in fieldset:
return None
# _feedAll() is needed to make sure that we get all fragments
# eg. summary[0], summary[1], ..., summary[n]
fieldset._feedAll()
field = fieldset[name]
if main_document:
stream = field.getSubIStream()
field = guessParser(stream)
if not field:
warning("Unable to create the OLE2 parser for %s!" % name)
return None
return field
@fault_tolerant
def useSummary(self, summary, is_doc_summary):
if "os" in summary:
self.os = summary["os"].display
if "section[0]" not in summary:
return
summary = summary["section[0]"]
for property in summary.array("property_index"):
self.useProperty(summary, property, is_doc_summary)
@fault_tolerant
def useWordDocument(self, doc):
self.comment = "Encrypted: %s" % doc["fEncrypted"].value
@fault_tolerant
def useProperty(self, summary, property, is_doc_summary):
field = summary.getFieldByAddress(property["offset"].value*8)
if not field \
or "value" not in field:
return
field = field["value"]
if not field.hasValue():
return
# Get value
value = field.value
if isinstance(value, (str, unicode)):
value = normalizeString(value)
if not value:
return
# Get property identifier
prop_id = property["id"].value
if is_doc_summary:
id_to_attr = self.DOC_SUMMARY_ID_TO_ATTR
ignore = self.IGNORE_DOC_SUMMARY
else:
id_to_attr = self.SUMMARY_ID_TO_ATTR
ignore = self.IGNORE_SUMMARY
if prop_id in ignore:
return
# Get Hachoir metadata key
try:
key = id_to_attr[prop_id]
use_prefix = False
except LookupError:
key = "comment"
use_prefix = True
if use_prefix:
prefix = property["id"].display
if (prefix in ("TotalEditingTime", "LastPrinted")) \
and (not field):
# Ignore null time delta
return
value = "%s: %s" % (prefix, value)
else:
if (key == "last_modification") and (not field):
# Ignore null timestamp
return
setattr(self, key, value)
class PcfMetadata(RootMetadata):
PROP_TO_KEY = {
'CHARSET_REGISTRY': 'charset',
'COPYRIGHT': 'copyright',
'WEIGHT_NAME': 'font_weight',
'FOUNDRY': 'author',
'FONT': 'title',
'_XMBDFED_INFO': 'producer',
}
def extract(self, pcf):
if "properties" in pcf:
self.useProperties(pcf["properties"])
def useProperties(self, properties):
last = properties["total_str_length"]
offset0 = last.address + last.size
for index in properties.array("property"):
# Search name and value
value = properties.getFieldByAddress(offset0+index["value_offset"].value*8)
if not value:
continue
value = value.value
if not value:
continue
name = properties.getFieldByAddress(offset0+index["name_offset"].value*8)
if not name:
continue
name = name.value
if name not in self.PROP_TO_KEY:
warning("Skip %s=%r" % (name, value))
continue
key = self.PROP_TO_KEY[name]
setattr(self, key, value)
class SwfMetadata(RootMetadata):
def extract(self, swf):
self.height = swf["rect/ymax"].value # twips
self.width = swf["rect/xmax"].value # twips
self.format_version = "flash version %s" % swf["version"].value
self.frame_rate = swf["frame_rate"].value
self.comment = "Frame count: %s" % swf["frame_count"].value
registerExtractor(TorrentFile, TorrentMetadata)
registerExtractor(TrueTypeFontFile, TTF_Metadata)
registerExtractor(OLE2_File, OLE2_Metadata)
registerExtractor(PcfFile, PcfMetadata)
registerExtractor(SwfFile, SwfMetadata)
| 8,936 | Python | .py | 236 | 28.305085 | 94 | 0.585082 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,575 | register.py | midgetspy_Sick-Beard/lib/hachoir_metadata/register.py | from lib.hachoir_core.i18n import _
from lib.hachoir_core.tools import (
humanDuration, humanBitRate,
humanFrequency, humanBitSize, humanFilesize,
humanDatetime)
from lib.hachoir_core.language import Language
from lib.hachoir_metadata.filter import Filter, NumberFilter, DATETIME_FILTER
from datetime import date, datetime, timedelta
from lib.hachoir_metadata.formatter import (
humanAudioChannel, humanFrameRate, humanComprRate, humanAltitude,
humanPixelSize, humanDPI)
from lib.hachoir_metadata.setter import (
setDatetime, setTrackNumber, setTrackTotal, setLanguage)
from lib.hachoir_metadata.metadata_item import Data
MIN_SAMPLE_RATE = 1000 # 1 kHz
MAX_SAMPLE_RATE = 192000 # 192 kHz
MAX_NB_CHANNEL = 8 # 8 channels
MAX_WIDTH = 20000 # 20 000 pixels
MAX_BIT_RATE = 500 * 1024 * 1024 # 500 Mbit/s
MAX_HEIGHT = MAX_WIDTH
MAX_DPI_WIDTH = 10000
MAX_DPI_HEIGHT = MAX_DPI_WIDTH
MAX_NB_COLOR = 2 ** 24 # 16 million of color
MAX_BITS_PER_PIXEL = 256 # 256 bits/pixel
MAX_FRAME_RATE = 150 # 150 frame/sec
MAX_NB_PAGE = 20000
MAX_COMPR_RATE = 1000.0
MIN_COMPR_RATE = 0.001
MAX_TRACK = 999
DURATION_FILTER = Filter(timedelta,
timedelta(milliseconds=1),
timedelta(days=365))
def registerAllItems(meta):
meta.register(Data("title", 100, _("Title"), type=unicode))
meta.register(Data("artist", 101, _("Artist"), type=unicode))
meta.register(Data("author", 102, _("Author"), type=unicode))
meta.register(Data("music_composer", 103, _("Music composer"), type=unicode))
meta.register(Data("album", 200, _("Album"), type=unicode))
meta.register(Data("duration", 201, _("Duration"), # integer in milliseconde
type=timedelta, text_handler=humanDuration, filter=DURATION_FILTER))
meta.register(Data("nb_page", 202, _("Nb page"), filter=NumberFilter(1, MAX_NB_PAGE)))
meta.register(Data("music_genre", 203, _("Music genre"), type=unicode))
meta.register(Data("language", 204, _("Language"), conversion=setLanguage, type=Language))
meta.register(Data("track_number", 205, _("Track number"), conversion=setTrackNumber,
filter=NumberFilter(1, MAX_TRACK), type=(int, long)))
meta.register(Data("track_total", 206, _("Track total"), conversion=setTrackTotal,
filter=NumberFilter(1, MAX_TRACK), type=(int, long)))
meta.register(Data("organization", 210, _("Organization"), type=unicode))
meta.register(Data("version", 220, _("Version")))
meta.register(Data("width", 301, _("Image width"), filter=NumberFilter(1, MAX_WIDTH), type=(int, long), text_handler=humanPixelSize))
meta.register(Data("height", 302, _("Image height"), filter=NumberFilter(1, MAX_HEIGHT), type=(int, long), text_handler=humanPixelSize))
meta.register(Data("nb_channel", 303, _("Channel"), text_handler=humanAudioChannel, filter=NumberFilter(1, MAX_NB_CHANNEL), type=(int, long)))
meta.register(Data("sample_rate", 304, _("Sample rate"), text_handler=humanFrequency, filter=NumberFilter(MIN_SAMPLE_RATE, MAX_SAMPLE_RATE), type=(int, long, float)))
meta.register(Data("bits_per_sample", 305, _("Bits/sample"), text_handler=humanBitSize, filter=NumberFilter(1, 64), type=(int, long)))
meta.register(Data("image_orientation", 306, _("Image orientation")))
meta.register(Data("nb_colors", 307, _("Number of colors"), filter=NumberFilter(1, MAX_NB_COLOR), type=(int, long)))
meta.register(Data("bits_per_pixel", 308, _("Bits/pixel"), filter=NumberFilter(1, MAX_BITS_PER_PIXEL), type=(int, long)))
meta.register(Data("filename", 309, _("File name"), type=unicode))
meta.register(Data("file_size", 310, _("File size"), text_handler=humanFilesize, type=(int, long)))
meta.register(Data("pixel_format", 311, _("Pixel format")))
meta.register(Data("compr_size", 312, _("Compressed file size"), text_handler=humanFilesize, type=(int, long)))
meta.register(Data("compr_rate", 313, _("Compression rate"), text_handler=humanComprRate, filter=NumberFilter(MIN_COMPR_RATE, MAX_COMPR_RATE), type=(int, long, float)))
meta.register(Data("width_dpi", 320, _("Image DPI width"), filter=NumberFilter(1, MAX_DPI_WIDTH), type=(int, long), text_handler=humanDPI))
meta.register(Data("height_dpi", 321, _("Image DPI height"), filter=NumberFilter(1, MAX_DPI_HEIGHT), type=(int, long), text_handler=humanDPI))
meta.register(Data("file_attr", 400, _("File attributes")))
meta.register(Data("file_type", 401, _("File type")))
meta.register(Data("subtitle_author", 402, _("Subtitle author"), type=unicode))
meta.register(Data("creation_date", 500, _("Creation date"), text_handler=humanDatetime,
filter=DATETIME_FILTER, type=(datetime, date), conversion=setDatetime))
meta.register(Data("last_modification", 501, _("Last modification"), text_handler=humanDatetime,
filter=DATETIME_FILTER, type=(datetime, date), conversion=setDatetime))
meta.register(Data("latitude", 510, _("Latitude"), type=float))
meta.register(Data("longitude", 511, _("Longitude"), type=float))
meta.register(Data("altitude", 511, _("Altitude"), type=float, text_handler=humanAltitude))
meta.register(Data("location", 530, _("Location"), type=unicode))
meta.register(Data("city", 531, _("City"), type=unicode))
meta.register(Data("country", 532, _("Country"), type=unicode))
meta.register(Data("charset", 540, _("Charset"), type=unicode))
meta.register(Data("font_weight", 550, _("Font weight")))
meta.register(Data("camera_aperture", 520, _("Camera aperture")))
meta.register(Data("camera_focal", 521, _("Camera focal")))
meta.register(Data("camera_exposure", 522, _("Camera exposure")))
meta.register(Data("camera_brightness", 530, _("Camera brightness")))
meta.register(Data("camera_model", 531, _("Camera model"), type=unicode))
meta.register(Data("camera_manufacturer", 532, _("Camera manufacturer"), type=unicode))
meta.register(Data("compression", 600, _("Compression")))
meta.register(Data("copyright", 601, _("Copyright"), type=unicode))
meta.register(Data("url", 602, _("URL"), type=unicode))
meta.register(Data("frame_rate", 603, _("Frame rate"), text_handler=humanFrameRate,
filter=NumberFilter(1, MAX_FRAME_RATE), type=(int, long, float)))
meta.register(Data("bit_rate", 604, _("Bit rate"), text_handler=humanBitRate,
filter=NumberFilter(1, MAX_BIT_RATE), type=(int, long, float)))
meta.register(Data("aspect_ratio", 604, _("Aspect ratio"), type=(int, long, float)))
meta.register(Data("os", 900, _("OS"), type=unicode))
meta.register(Data("producer", 901, _("Producer"), type=unicode))
meta.register(Data("comment", 902, _("Comment"), type=unicode))
meta.register(Data("format_version", 950, _("Format version"), type=unicode))
meta.register(Data("mime_type", 951, _("MIME type"), type=unicode))
meta.register(Data("endian", 952, _("Endianness"), type=unicode))
| 7,003 | Python | .py | 99 | 66.333333 | 172 | 0.691772 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,576 | version.py | midgetspy_Sick-Beard/lib/hachoir_metadata/version.py | PACKAGE = "hachoir-metadata"
VERSION = "1.3.3"
WEBSITE = "http://bitbucket.org/haypo/hachoir/wiki/hachoir-metadata"
LICENSE = "GNU GPL v2"
| 140 | Python | .py | 4 | 33.75 | 68 | 0.748148 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,577 | file_system.py | midgetspy_Sick-Beard/lib/hachoir_metadata/file_system.py | from lib.hachoir_metadata.metadata import RootMetadata, registerExtractor
from lib.hachoir_metadata.safe import fault_tolerant
from lib.hachoir_parser.file_system import ISO9660
from datetime import datetime
class ISO9660_Metadata(RootMetadata):
def extract(self, iso):
desc = iso['volume[0]/content']
self.title = desc['volume_id'].value
self.title = desc['vol_set_id'].value
self.author = desc['publisher'].value
self.author = desc['data_preparer'].value
self.producer = desc['application'].value
self.copyright = desc['copyright'].value
self.readTimestamp('creation_date', desc['creation_ts'].value)
self.readTimestamp('last_modification', desc['modification_ts'].value)
@fault_tolerant
def readTimestamp(self, key, value):
if value.startswith("0000"):
return
value = datetime(
int(value[0:4]), int(value[4:6]), int(value[6:8]),
int(value[8:10]), int(value[10:12]), int(value[12:14]))
setattr(self, key, value)
registerExtractor(ISO9660, ISO9660_Metadata)
| 1,107 | Python | .py | 24 | 38.958333 | 78 | 0.677479 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,578 | image.py | midgetspy_Sick-Beard/lib/hachoir_metadata/image.py | from lib.hachoir_metadata.metadata import (registerExtractor,
Metadata, RootMetadata, MultipleMetadata)
from lib.hachoir_parser.image import (
BmpFile, IcoFile, PcxFile, GifFile, PngFile, TiffFile,
XcfFile, TargaFile, WMF_File, PsdFile)
from lib.hachoir_parser.image.png import getBitsPerPixel as pngBitsPerPixel
from lib.hachoir_parser.image.xcf import XcfProperty
from lib.hachoir_core.i18n import _
from lib.hachoir_metadata.safe import fault_tolerant
def computeComprRate(meta, compr_size):
"""
Compute image compression rate. Skip size of color palette, focus on
image pixels. Original size is width x height x bpp. Compressed size
is an argument (in bits).
Set "compr_data" with a string like "1.52x".
"""
if not meta.has("width") \
or not meta.has("height") \
or not meta.has("bits_per_pixel"):
return
if not compr_size:
return
orig_size = meta.get('width') * meta.get('height') * meta.get('bits_per_pixel')
meta.compr_rate = float(orig_size) / compr_size
class BmpMetadata(RootMetadata):
def extract(self, image):
if "header" not in image:
return
hdr = image["header"]
self.width = hdr["width"].value
self.height = hdr["height"].value
bpp = hdr["bpp"].value
if bpp:
if bpp <= 8 and "used_colors" in hdr:
self.nb_colors = hdr["used_colors"].value
self.bits_per_pixel = bpp
self.compression = hdr["compression"].display
self.format_version = u"Microsoft Bitmap version %s" % hdr.getFormatVersion()
self.width_dpi = hdr["horizontal_dpi"].value
self.height_dpi = hdr["vertical_dpi"].value
if "pixels" in image:
computeComprRate(self, image["pixels"].size)
class TiffMetadata(RootMetadata):
key_to_attr = {
"img_width": "width",
"img_height": "width",
# TODO: Enable that (need link to value)
# "description": "comment",
# "doc_name": "title",
# "orientation": "image_orientation",
}
def extract(self, tiff):
if "ifd" in tiff:
self.useIFD(tiff["ifd"])
def useIFD(self, ifd):
for field in ifd:
try:
attrname = self.key_to_attr[field.name]
except KeyError:
continue
if "value" not in field:
continue
value = field["value"].value
setattr(self, attrname, value)
class IcoMetadata(MultipleMetadata):
color_to_bpp = {
2: 1,
16: 4,
256: 8
}
def extract(self, icon):
for index, header in enumerate(icon.array("icon_header")):
image = Metadata(self)
# Read size and colors from header
image.width = header["width"].value
image.height = header["height"].value
bpp = header["bpp"].value
nb_colors = header["nb_color"].value
if nb_colors != 0:
image.nb_colors = nb_colors
if bpp == 0 and nb_colors in self.color_to_bpp:
bpp = self.color_to_bpp[nb_colors]
elif bpp == 0:
bpp = 8
image.bits_per_pixel = bpp
image.setHeader(_("Icon #%u (%sx%s)")
% (1+index, image.get("width", "?"), image.get("height", "?")))
# Read compression from data (if available)
key = "icon_data[%u]/header/codec" % index
if key in icon:
image.compression = icon[key].display
key = "icon_data[%u]/pixels" % index
if key in icon:
computeComprRate(image, icon[key].size)
# Store new image
self.addGroup("image[%u]" % index, image)
class PcxMetadata(RootMetadata):
@fault_tolerant
def extract(self, pcx):
self.width = 1 + pcx["xmax"].value
self.height = 1 + pcx["ymax"].value
self.width_dpi = pcx["horiz_dpi"].value
self.height_dpi = pcx["vert_dpi"].value
self.bits_per_pixel = pcx["bpp"].value
if 1 <= pcx["bpp"].value <= 8:
self.nb_colors = 2 ** pcx["bpp"].value
self.compression = _("Run-length encoding (RLE)")
self.format_version = "PCX: %s" % pcx["version"].display
if "image_data" in pcx:
computeComprRate(self, pcx["image_data"].size)
class XcfMetadata(RootMetadata):
# Map image type to bits/pixel
TYPE_TO_BPP = {0: 24, 1: 8, 2: 8}
def extract(self, xcf):
self.width = xcf["width"].value
self.height = xcf["height"].value
try:
self.bits_per_pixel = self.TYPE_TO_BPP[ xcf["type"].value ]
except KeyError:
pass
self.format_version = xcf["type"].display
self.readProperties(xcf)
@fault_tolerant
def processProperty(self, prop):
type = prop["type"].value
if type == XcfProperty.PROP_PARASITES:
for field in prop["data"]:
if "name" not in field or "data" not in field:
continue
if field["name"].value == "gimp-comment":
self.comment = field["data"].value
elif type == XcfProperty.PROP_COMPRESSION:
self.compression = prop["data/compression"].display
elif type == XcfProperty.PROP_RESOLUTION:
self.width_dpi = int(prop["data/xres"].value)
self.height_dpi = int(prop["data/yres"].value)
def readProperties(self, xcf):
for prop in xcf.array("property"):
self.processProperty(prop)
class PngMetadata(RootMetadata):
TEXT_TO_ATTR = {
"software": "producer",
}
def extract(self, png):
if "header" in png:
self.useHeader(png["header"])
if "time" in png:
self.useTime(png["time"])
if "physical" in png:
self.usePhysical(png["physical"])
for comment in png.array("text"):
if "text" not in comment:
continue
keyword = comment["keyword"].value
text = comment["text"].value
try:
key = self.TEXT_TO_ATTR[keyword.lower()]
setattr(self, key, text)
except KeyError:
if keyword.lower() != "comment":
self.comment = "%s=%s" % (keyword, text)
else:
self.comment = text
compr_size = sum( data.size for data in png.array("data") )
computeComprRate(self, compr_size)
@fault_tolerant
def useTime(self, field):
self.creation_date = field.value
@fault_tolerant
def usePhysical(self, field):
self.width_dpi = field["pixel_per_unit_x"].value
self.height_dpi = field["pixel_per_unit_y"].value
@fault_tolerant
def useHeader(self, header):
self.width = header["width"].value
self.height = header["height"].value
# Read number of colors and pixel format
if "/palette/size" in header:
nb_colors = header["/palette/size"].value // 3
else:
nb_colors = None
if not header["has_palette"].value:
if header["has_alpha"].value:
self.pixel_format = _("RGBA")
else:
self.pixel_format = _("RGB")
elif "/transparency" in header:
self.pixel_format = _("Color index with transparency")
if nb_colors:
nb_colors -= 1
else:
self.pixel_format = _("Color index")
self.bits_per_pixel = pngBitsPerPixel(header)
if nb_colors:
self.nb_colors = nb_colors
# Read compression, timestamp, etc.
self.compression = header["compression"].display
class GifMetadata(RootMetadata):
def extract(self, gif):
self.useScreen(gif["/screen"])
if self.has("bits_per_pixel"):
self.nb_colors = (1 << self.get('bits_per_pixel'))
self.compression = _("LZW")
self.format_version = "GIF version %s" % gif["version"].value
for comments in gif.array("comments"):
for comment in gif.array(comments.name + "/comment"):
self.comment = comment.value
if "graphic_ctl/has_transp" in gif and gif["graphic_ctl/has_transp"].value:
self.pixel_format = _("Color index with transparency")
else:
self.pixel_format = _("Color index")
@fault_tolerant
def useScreen(self, screen):
self.width = screen["width"].value
self.height = screen["height"].value
self.bits_per_pixel = (1 + screen["bpp"].value)
class TargaMetadata(RootMetadata):
def extract(self, tga):
self.width = tga["width"].value
self.height = tga["height"].value
self.bits_per_pixel = tga["bpp"].value
if tga["nb_color"].value:
self.nb_colors = tga["nb_color"].value
self.compression = tga["codec"].display
if "pixels" in tga:
computeComprRate(self, tga["pixels"].size)
class WmfMetadata(RootMetadata):
def extract(self, wmf):
if wmf.isAPM():
if "amf_header/rect" in wmf:
rect = wmf["amf_header/rect"]
self.width = (rect["right"].value - rect["left"].value)
self.height = (rect["bottom"].value - rect["top"].value)
self.bits_per_pixel = 24
elif wmf.isEMF():
emf = wmf["emf_header"]
if "description" in emf:
desc = emf["description"].value
if "\0" in desc:
self.producer, self.title = desc.split("\0", 1)
else:
self.producer = desc
if emf["nb_colors"].value:
self.nb_colors = emf["nb_colors"].value
self.bits_per_pixel = 8
else:
self.bits_per_pixel = 24
self.width = emf["width_px"].value
self.height = emf["height_px"].value
class PsdMetadata(RootMetadata):
@fault_tolerant
def extract(self, psd):
self.width = psd["width"].value
self.height = psd["height"].value
self.bits_per_pixel = psd["depth"].value * psd["nb_channels"].value
self.pixel_format = psd["color_mode"].display
self.compression = psd["compression"].display
registerExtractor(IcoFile, IcoMetadata)
registerExtractor(GifFile, GifMetadata)
registerExtractor(XcfFile, XcfMetadata)
registerExtractor(TargaFile, TargaMetadata)
registerExtractor(PcxFile, PcxMetadata)
registerExtractor(BmpFile, BmpMetadata)
registerExtractor(PngFile, PngMetadata)
registerExtractor(TiffFile, TiffMetadata)
registerExtractor(WMF_File, WmfMetadata)
registerExtractor(PsdFile, PsdMetadata)
| 10,812 | Python | .py | 267 | 30.775281 | 85 | 0.584039 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,579 | dialog_ui.py | midgetspy_Sick-Beard/lib/hachoir_metadata/qt/dialog_ui.py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'hachoir_metadata/qt/dialog.ui'
#
# Created: Mon Jul 26 03:10:06 2010
# by: PyQt4 UI code generator 4.7.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(441, 412)
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.open_button = QtGui.QPushButton(Form)
self.open_button.setObjectName("open_button")
self.horizontalLayout_2.addWidget(self.open_button)
self.files_combo = QtGui.QComboBox(Form)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.files_combo.sizePolicy().hasHeightForWidth())
self.files_combo.setSizePolicy(sizePolicy)
self.files_combo.setObjectName("files_combo")
self.horizontalLayout_2.addWidget(self.files_combo)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.metadata_table = QtGui.QTableWidget(Form)
self.metadata_table.setAlternatingRowColors(True)
self.metadata_table.setShowGrid(False)
self.metadata_table.setRowCount(0)
self.metadata_table.setColumnCount(0)
self.metadata_table.setObjectName("metadata_table")
self.metadata_table.setColumnCount(0)
self.metadata_table.setRowCount(0)
self.verticalLayout.addWidget(self.metadata_table)
self.quit_button = QtGui.QPushButton(Form)
self.quit_button.setObjectName("quit_button")
self.verticalLayout.addWidget(self.quit_button)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "hachoir-metadata", None, QtGui.QApplication.UnicodeUTF8))
self.open_button.setText(QtGui.QApplication.translate("Form", "Open", None, QtGui.QApplication.UnicodeUTF8))
self.quit_button.setText(QtGui.QApplication.translate("Form", "Quit", None, QtGui.QApplication.UnicodeUTF8))
| 2,439 | Python | .py | 46 | 45.630435 | 123 | 0.729367 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,580 | jsonrpc.py | midgetspy_Sick-Beard/lib/jsonrpclib/jsonrpc.py | """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
============================
JSONRPC Library (jsonrpclib)
============================
This library is a JSON-RPC v.2 (proposed) implementation which
follows the xmlrpclib API for portability between clients. It
uses the same Server / ServerProxy, loads, dumps, etc. syntax,
while providing features not present in XML-RPC like:
* Keyword arguments
* Notifications
* Versioning
* Batches and batch notifications
Eventually, I'll add a SimpleXMLRPCServer compatible library,
and other things to tie the thing off nicely. :)
For a quick-start, just open a console and type the following,
replacing the server address, method, and parameters
appropriately.
>>> import jsonrpclib
>>> server = jsonrpclib.Server('http://localhost:8181')
>>> server.add(5, 6)
11
>>> server._notify.add(5, 6)
>>> batch = jsonrpclib.MultiCall(server)
>>> batch.add(3, 50)
>>> batch.add(2, 3)
>>> batch._notify.add(3, 5)
>>> batch()
[53, 5]
See http://code.google.com/p/jsonrpclib/ for more info.
"""
import types
import sys
from xmlrpclib import Transport as XMLTransport
from xmlrpclib import SafeTransport as XMLSafeTransport
from xmlrpclib import ServerProxy as XMLServerProxy
from xmlrpclib import _Method as XML_Method
import time
import string
import random
# Library includes
import lib.jsonrpclib
from lib.jsonrpclib import config
from lib.jsonrpclib import history
# JSON library importing
cjson = None
json = None
try:
import cjson
except ImportError:
try:
import json
except ImportError:
try:
import lib.simplejson as json
except ImportError:
raise ImportError(
'You must have the cjson, json, or simplejson ' +
'module(s) available.'
)
IDCHARS = string.ascii_lowercase+string.digits
class UnixSocketMissing(Exception):
"""
Just a properly named Exception if Unix Sockets usage is
attempted on a platform that doesn't support them (Windows)
"""
pass
#JSON Abstractions
def jdumps(obj, encoding='utf-8'):
# Do 'serialize' test at some point for other classes
global cjson
if cjson:
return cjson.encode(obj)
else:
return json.dumps(obj, encoding=encoding)
def jloads(json_string):
global cjson
if cjson:
return cjson.decode(json_string)
else:
return json.loads(json_string)
# XMLRPClib re-implementations
class ProtocolError(Exception):
pass
class TransportMixIn(object):
""" Just extends the XMLRPC transport where necessary. """
user_agent = config.user_agent
# for Python 2.7 support
_connection = None
def send_content(self, connection, request_body):
connection.putheader("Content-Type", "application/json-rpc")
connection.putheader("Content-Length", str(len(request_body)))
connection.endheaders()
if request_body:
connection.send(request_body)
def getparser(self):
target = JSONTarget()
return JSONParser(target), target
class JSONParser(object):
def __init__(self, target):
self.target = target
def feed(self, data):
self.target.feed(data)
def close(self):
pass
class JSONTarget(object):
def __init__(self):
self.data = []
def feed(self, data):
self.data.append(data)
def close(self):
return ''.join(self.data)
class Transport(TransportMixIn, XMLTransport):
pass
class SafeTransport(TransportMixIn, XMLSafeTransport):
pass
from httplib import HTTP, HTTPConnection
from socket import socket
USE_UNIX_SOCKETS = False
try:
from socket import AF_UNIX, SOCK_STREAM
USE_UNIX_SOCKETS = True
except ImportError:
pass
if (USE_UNIX_SOCKETS):
class UnixHTTPConnection(HTTPConnection):
def connect(self):
self.sock = socket(AF_UNIX, SOCK_STREAM)
self.sock.connect(self.host)
class UnixHTTP(HTTP):
_connection_class = UnixHTTPConnection
class UnixTransport(TransportMixIn, XMLTransport):
def make_connection(self, host):
import httplib
host, extra_headers, x509 = self.get_host_info(host)
return UnixHTTP(host)
class ServerProxy(XMLServerProxy):
"""
Unfortunately, much more of this class has to be copied since
so much of it does the serialization.
"""
def __init__(self, uri, transport=None, encoding=None,
verbose=0, version=None):
import urllib
if not version:
version = config.version
self.__version = version
schema, uri = urllib.splittype(uri)
if schema not in ('http', 'https', 'unix'):
raise IOError('Unsupported JSON-RPC protocol.')
if schema == 'unix':
if not USE_UNIX_SOCKETS:
# Don't like the "generic" Exception...
raise UnixSocketMissing("Unix sockets not available.")
self.__host = uri
self.__handler = '/'
else:
self.__host, self.__handler = urllib.splithost(uri)
if not self.__handler:
# Not sure if this is in the JSON spec?
#self.__handler = '/'
self.__handler == '/'
if transport is None:
if schema == 'unix':
transport = UnixTransport()
elif schema == 'https':
transport = SafeTransport()
else:
transport = Transport()
self.__transport = transport
self.__encoding = encoding
self.__verbose = verbose
def _request(self, methodname, params, rpcid=None):
request = dumps(params, methodname, encoding=self.__encoding,
rpcid=rpcid, version=self.__version)
response = self._run_request(request)
check_for_errors(response)
return response['result']
def _request_notify(self, methodname, params, rpcid=None):
request = dumps(params, methodname, encoding=self.__encoding,
rpcid=rpcid, version=self.__version, notify=True)
response = self._run_request(request, notify=True)
check_for_errors(response)
return
def _run_request(self, request, notify=None):
history.add_request(request)
response = self.__transport.request(
self.__host,
self.__handler,
request,
verbose=self.__verbose
)
# Here, the XMLRPC library translates a single list
# response to the single value -- should we do the
# same, and require a tuple / list to be passed to
# the response object, or expect the Server to be
# outputting the response appropriately?
history.add_response(response)
if not response:
return None
return_obj = loads(response)
return return_obj
def __getattr__(self, name):
# Same as original, just with new _Method reference
return _Method(self._request, name)
@property
def _notify(self):
# Just like __getattr__, but with notify namespace.
return _Notify(self._request_notify)
class _Method(XML_Method):
def __call__(self, *args, **kwargs):
if len(args) > 0 and len(kwargs) > 0:
raise ProtocolError('Cannot use both positional ' +
'and keyword arguments (according to JSON-RPC spec.)')
if len(args) > 0:
return self.__send(self.__name, args)
else:
return self.__send(self.__name, kwargs)
def __getattr__(self, name):
self.__name = '%s.%s' % (self.__name, name)
return self
# The old method returned a new instance, but this seemed wasteful.
# The only thing that changes is the name.
#return _Method(self.__send, "%s.%s" % (self.__name, name))
class _Notify(object):
def __init__(self, request):
self._request = request
def __getattr__(self, name):
return _Method(self._request, name)
# Batch implementation
class MultiCallMethod(object):
def __init__(self, method, notify=False):
self.method = method
self.params = []
self.notify = notify
def __call__(self, *args, **kwargs):
if len(kwargs) > 0 and len(args) > 0:
raise ProtocolError('JSON-RPC does not support both ' +
'positional and keyword arguments.')
if len(kwargs) > 0:
self.params = kwargs
else:
self.params = args
def request(self, encoding=None, rpcid=None):
return dumps(self.params, self.method, version=2.0,
encoding=encoding, rpcid=rpcid, notify=self.notify)
def __repr__(self):
return '%s' % self.request()
def __getattr__(self, method):
new_method = '%s.%s' % (self.method, method)
self.method = new_method
return self
class MultiCallNotify(object):
def __init__(self, multicall):
self.multicall = multicall
def __getattr__(self, name):
new_job = MultiCallMethod(name, notify=True)
self.multicall._job_list.append(new_job)
return new_job
class MultiCallIterator(object):
def __init__(self, results):
self.results = results
def __iter__(self):
for i in range(0, len(self.results)):
yield self[i]
raise StopIteration
def __getitem__(self, i):
item = self.results[i]
check_for_errors(item)
return item['result']
def __len__(self):
return len(self.results)
class MultiCall(object):
def __init__(self, server):
self._server = server
self._job_list = []
def _request(self):
if len(self._job_list) < 1:
# Should we alert? This /is/ pretty obvious.
return
request_body = '[ %s ]' % ','.join([job.request() for
job in self._job_list])
responses = self._server._run_request(request_body)
del self._job_list[:]
if not responses:
responses = []
return MultiCallIterator(responses)
@property
def _notify(self):
return MultiCallNotify(self)
def __getattr__(self, name):
new_job = MultiCallMethod(name)
self._job_list.append(new_job)
return new_job
__call__ = _request
# These lines conform to xmlrpclib's "compatibility" line.
# Not really sure if we should include these, but oh well.
Server = ServerProxy
class Fault(object):
# JSON-RPC error class
def __init__(self, code=-32000, message='Server error', rpcid=None):
self.faultCode = code
self.faultString = message
self.rpcid = rpcid
def error(self):
return {'code':self.faultCode, 'message':self.faultString}
def response(self, rpcid=None, version=None):
if not version:
version = config.version
if rpcid:
self.rpcid = rpcid
return dumps(
self, methodresponse=True, rpcid=self.rpcid, version=version
)
def __repr__(self):
return '<Fault %s: %s>' % (self.faultCode, self.faultString)
def random_id(length=8):
return_id = ''
for i in range(length):
return_id += random.choice(IDCHARS)
return return_id
class Payload(dict):
def __init__(self, rpcid=None, version=None):
if not version:
version = config.version
self.id = rpcid
self.version = float(version)
def request(self, method, params=[]):
if type(method) not in types.StringTypes:
raise ValueError('Method name must be a string.')
if not self.id:
self.id = random_id()
request = { 'id':self.id, 'method':method }
if params:
request['params'] = params
if self.version >= 2:
request['jsonrpc'] = str(self.version)
return request
def notify(self, method, params=[]):
request = self.request(method, params)
if self.version >= 2:
del request['id']
else:
request['id'] = None
return request
def response(self, result=None):
response = {'result':result, 'id':self.id}
if self.version >= 2:
response['jsonrpc'] = str(self.version)
else:
response['error'] = None
return response
def error(self, code=-32000, message='Server error.'):
error = self.response()
if self.version >= 2:
del error['result']
else:
error['result'] = None
error['error'] = {'code':code, 'message':message}
return error
def dumps(params=[], methodname=None, methodresponse=None,
encoding=None, rpcid=None, version=None, notify=None):
"""
This differs from the Python implementation in that it implements
the rpcid argument since the 2.0 spec requires it for responses.
"""
if not version:
version = config.version
valid_params = (types.TupleType, types.ListType, types.DictType)
if methodname in types.StringTypes and \
type(params) not in valid_params and \
not isinstance(params, Fault):
"""
If a method, and params are not in a listish or a Fault,
error out.
"""
raise TypeError('Params must be a dict, list, tuple or Fault ' +
'instance.')
# Begin parsing object
payload = Payload(rpcid=rpcid, version=version)
if not encoding:
encoding = 'utf-8'
if type(params) is Fault:
response = payload.error(params.faultCode, params.faultString)
return jdumps(response, encoding=encoding)
if type(methodname) not in types.StringTypes and methodresponse != True:
raise ValueError('Method name must be a string, or methodresponse '+
'must be set to True.')
if config.use_jsonclass == True:
from lib.jsonrpclib import jsonclass
params = jsonclass.dump(params)
if methodresponse is True:
if rpcid is None:
raise ValueError('A method response must have an rpcid.')
response = payload.response(params)
return jdumps(response, encoding=encoding)
request = None
if notify == True:
request = payload.notify(methodname, params)
else:
request = payload.request(methodname, params)
return jdumps(request, encoding=encoding)
def loads(data):
"""
This differs from the Python implementation, in that it returns
the request structure in Dict format instead of the method, params.
It will return a list in the case of a batch request / response.
"""
if data == '':
# notification
return None
result = jloads(data)
# if the above raises an error, the implementing server code
# should return something like the following:
# { 'jsonrpc':'2.0', 'error': fault.error(), id: None }
if config.use_jsonclass == True:
from lib.jsonrpclib import jsonclass
result = jsonclass.load(result)
return result
def check_for_errors(result):
if not result:
# Notification
return result
if type(result) is not types.DictType:
raise TypeError('Response is not a dict.')
if 'jsonrpc' in result.keys() and float(result['jsonrpc']) > 2.0:
raise NotImplementedError('JSON-RPC version not yet supported.')
if 'result' not in result.keys() and 'error' not in result.keys():
raise ValueError('Response does not have a result or error key.')
if 'error' in result.keys() and result['error'] != None:
code = result['error']['code']
message = result['error']['message']
raise ProtocolError((code, message))
return result
def isbatch(result):
if type(result) not in (types.ListType, types.TupleType):
return False
if len(result) < 1:
return False
if type(result[0]) is not types.DictType:
return False
if 'jsonrpc' not in result[0].keys():
return False
try:
version = float(result[0]['jsonrpc'])
except ValueError:
raise ProtocolError('"jsonrpc" key must be a float(able) value.')
if version < 2:
return False
return True
def isnotification(request):
if 'id' not in request.keys():
# 2.0 notification
return True
if request['id'] == None:
# 1.0 notification
return True
return False
| 17,140 | Python | .py | 466 | 29.167382 | 76 | 0.632478 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,581 | config.py | midgetspy_Sick-Beard/lib/jsonrpclib/config.py | import sys
class LocalClasses(dict):
def add(self, cls):
self[cls.__name__] = cls
class Config(object):
"""
This is pretty much used exclusively for the 'jsonclass'
functionality... set use_jsonclass to False to turn it off.
You can change serialize_method and ignore_attribute, or use
the local_classes.add(class) to include "local" classes.
"""
use_jsonclass = True
# Change to False to keep __jsonclass__ entries raw.
serialize_method = '_serialize'
# The serialize_method should be a string that references the
# method on a custom class object which is responsible for
# returning a tuple of the constructor arguments and a dict of
# attributes.
ignore_attribute = '_ignore'
# The ignore attribute should be a string that references the
# attribute on a custom class object which holds strings and / or
# references of the attributes the class translator should ignore.
classes = LocalClasses()
# The list of classes to use for jsonclass translation.
version = 2.0
# Version of the JSON-RPC spec to support
user_agent = 'jsonrpclib/0.1 (Python %s)' % \
'.'.join([str(ver) for ver in sys.version_info[0:3]])
# User agent to use for calls.
_instance = None
@classmethod
def instance(cls):
if not cls._instance:
cls._instance = cls()
return cls._instance
| 1,418 | Python | .py | 35 | 34.914286 | 70 | 0.686047 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,582 | history.py | midgetspy_Sick-Beard/lib/jsonrpclib/history.py | class History(object):
"""
This holds all the response and request objects for a
session. A server using this should call "clear" after
each request cycle in order to keep it from clogging
memory.
"""
requests = []
responses = []
_instance = None
@classmethod
def instance(cls):
if not cls._instance:
cls._instance = cls()
return cls._instance
def add_response(self, response_obj):
self.responses.append(response_obj)
def add_request(self, request_obj):
self.requests.append(request_obj)
@property
def request(self):
if len(self.requests) == 0:
return None
else:
return self.requests[-1]
@property
def response(self):
if len(self.responses) == 0:
return None
else:
return self.responses[-1]
def clear(self):
del self.requests[:]
del self.responses[:]
| 978 | Python | .py | 34 | 21.088235 | 58 | 0.603226 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,583 | jsonclass.py | midgetspy_Sick-Beard/lib/jsonrpclib/jsonclass.py | import types
import inspect
import re
import traceback
from lib.jsonrpclib import config
iter_types = [
types.DictType,
types.ListType,
types.TupleType
]
string_types = [
types.StringType,
types.UnicodeType
]
numeric_types = [
types.IntType,
types.LongType,
types.FloatType
]
value_types = [
types.BooleanType,
types.NoneType
]
supported_types = iter_types+string_types+numeric_types+value_types
invalid_module_chars = r'[^a-zA-Z0-9\_\.]'
class TranslationError(Exception):
pass
def dump(obj, serialize_method=None, ignore_attribute=None, ignore=[]):
if not serialize_method:
serialize_method = config.serialize_method
if not ignore_attribute:
ignore_attribute = config.ignore_attribute
obj_type = type(obj)
# Parse / return default "types"...
if obj_type in numeric_types+string_types+value_types:
return obj
if obj_type in iter_types:
if obj_type in (types.ListType, types.TupleType):
new_obj = []
for item in obj:
new_obj.append(dump(item, serialize_method,
ignore_attribute, ignore))
if obj_type is types.TupleType:
new_obj = tuple(new_obj)
return new_obj
# It's a dict...
else:
new_obj = {}
for key, value in obj.iteritems():
new_obj[key] = dump(value, serialize_method,
ignore_attribute, ignore)
return new_obj
# It's not a standard type, so it needs __jsonclass__
module_name = inspect.getmodule(obj).__name__
class_name = obj.__class__.__name__
json_class = class_name
if module_name not in ['', '__main__']:
json_class = '%s.%s' % (module_name, json_class)
return_obj = {"__jsonclass__":[json_class,]}
# If a serialization method is defined..
if serialize_method in dir(obj):
# Params can be a dict (keyword) or list (positional)
# Attrs MUST be a dict.
serialize = getattr(obj, serialize_method)
params, attrs = serialize()
return_obj['__jsonclass__'].append(params)
return_obj.update(attrs)
return return_obj
# Otherwise, try to figure it out
# Obviously, we can't assume to know anything about the
# parameters passed to __init__
return_obj['__jsonclass__'].append([])
attrs = {}
ignore_list = getattr(obj, ignore_attribute, [])+ignore
for attr_name, attr_value in obj.__dict__.iteritems():
if type(attr_value) in supported_types and \
attr_name not in ignore_list and \
attr_value not in ignore_list:
attrs[attr_name] = dump(attr_value, serialize_method,
ignore_attribute, ignore)
return_obj.update(attrs)
return return_obj
def load(obj):
if type(obj) in string_types+numeric_types+value_types:
return obj
if type(obj) is types.ListType:
return_list = []
for entry in obj:
return_list.append(load(entry))
return return_list
# Othewise, it's a dict type
if '__jsonclass__' not in obj.keys():
return_dict = {}
for key, value in obj.iteritems():
new_value = load(value)
return_dict[key] = new_value
return return_dict
# It's a dict, and it's a __jsonclass__
orig_module_name = obj['__jsonclass__'][0]
params = obj['__jsonclass__'][1]
if orig_module_name == '':
raise TranslationError('Module name empty.')
json_module_clean = re.sub(invalid_module_chars, '', orig_module_name)
if json_module_clean != orig_module_name:
raise TranslationError('Module name %s has invalid characters.' %
orig_module_name)
json_module_parts = json_module_clean.split('.')
json_class = None
if len(json_module_parts) == 1:
# Local class name -- probably means it won't work
if json_module_parts[0] not in config.classes.keys():
raise TranslationError('Unknown class or module %s.' %
json_module_parts[0])
json_class = config.classes[json_module_parts[0]]
else:
json_class_name = json_module_parts.pop()
json_module_tree = '.'.join(json_module_parts)
try:
temp_module = __import__(json_module_tree)
except ImportError:
raise TranslationError('Could not import %s from module %s.' %
(json_class_name, json_module_tree))
json_class = getattr(temp_module, json_class_name)
# Creating the object...
new_obj = None
if type(params) is types.ListType:
new_obj = json_class(*params)
elif type(params) is types.DictType:
new_obj = json_class(**params)
else:
raise TranslationError('Constructor args must be a dict or list.')
for key, value in obj.iteritems():
if key == '__jsonclass__':
continue
setattr(new_obj, key, value)
return new_obj
| 5,116 | Python | .py | 136 | 29.286765 | 74 | 0.60511 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,584 | __init__.py | midgetspy_Sick-Beard/lib/jsonrpclib/__init__.py | from config import Config
config = Config.instance()
from history import History
history = History.instance()
from jsonrpc import Server, MultiCall, Fault
from jsonrpc import ProtocolError, loads, dumps
| 203 | Python | .py | 6 | 32.833333 | 47 | 0.837563 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,585 | SimpleJSONRPCServer.py | midgetspy_Sick-Beard/lib/jsonrpclib/SimpleJSONRPCServer.py | import lib.jsonrpclib
from lib.jsonrpclib import Fault
from lib.jsonrpclib.jsonrpc import USE_UNIX_SOCKETS
import SimpleXMLRPCServer
import SocketServer
import socket
import logging
import os
import types
import traceback
import sys
try:
import fcntl
except ImportError:
# For Windows
fcntl = None
def get_version(request):
# must be a dict
if 'jsonrpc' in request.keys():
return 2.0
if 'id' in request.keys():
return 1.0
return None
def validate_request(request):
if type(request) is not types.DictType:
fault = Fault(
-32600, 'Request must be {}, not %s.' % type(request)
)
return fault
rpcid = request.get('id', None)
version = get_version(request)
if not version:
fault = Fault(-32600, 'Request %s invalid.' % request, rpcid=rpcid)
return fault
request.setdefault('params', [])
method = request.get('method', None)
params = request.get('params')
param_types = (types.ListType, types.DictType, types.TupleType)
if not method or type(method) not in types.StringTypes or \
type(params) not in param_types:
fault = Fault(
-32600, 'Invalid request parameters or method.', rpcid=rpcid
)
return fault
return True
class SimpleJSONRPCDispatcher(SimpleXMLRPCServer.SimpleXMLRPCDispatcher):
def __init__(self, encoding=None):
SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self,
allow_none=True,
encoding=encoding)
def _marshaled_dispatch(self, data, dispatch_method = None):
response = None
try:
request = jsonrpclib.loads(data)
except Exception, e:
fault = Fault(-32700, 'Request %s invalid. (%s)' % (data, e))
response = fault.response()
return response
if not request:
fault = Fault(-32600, 'Request invalid -- no request data.')
return fault.response()
if type(request) is types.ListType:
# This SHOULD be a batch, by spec
responses = []
for req_entry in request:
result = validate_request(req_entry)
if type(result) is Fault:
responses.append(result.response())
continue
resp_entry = self._marshaled_single_dispatch(req_entry)
if resp_entry is not None:
responses.append(resp_entry)
if len(responses) > 0:
response = '[%s]' % ','.join(responses)
else:
response = ''
else:
result = validate_request(request)
if type(result) is Fault:
return result.response()
response = self._marshaled_single_dispatch(request)
return response
def _marshaled_single_dispatch(self, request):
# TODO - Use the multiprocessing and skip the response if
# it is a notification
# Put in support for custom dispatcher here
# (See SimpleXMLRPCServer._marshaled_dispatch)
method = request.get('method')
params = request.get('params')
try:
response = self._dispatch(method, params)
except:
exc_type, exc_value, exc_tb = sys.exc_info()
fault = Fault(-32603, '%s:%s' % (exc_type, exc_value))
return fault.response()
if 'id' not in request.keys() or request['id'] == None:
# It's a notification
return None
try:
response = jsonrpclib.dumps(response,
methodresponse=True,
rpcid=request['id']
)
return response
except:
exc_type, exc_value, exc_tb = sys.exc_info()
fault = Fault(-32603, '%s:%s' % (exc_type, exc_value))
return fault.response()
def _dispatch(self, method, params):
func = None
try:
func = self.funcs[method]
except KeyError:
if self.instance is not None:
if hasattr(self.instance, '_dispatch'):
return self.instance._dispatch(method, params)
else:
try:
func = SimpleXMLRPCServer.resolve_dotted_attribute(
self.instance,
method,
True
)
except AttributeError:
pass
if func is not None:
try:
if type(params) is types.ListType:
response = func(*params)
else:
response = func(**params)
return response
except TypeError:
return Fault(-32602, 'Invalid parameters.')
except:
err_lines = traceback.format_exc().splitlines()
trace_string = '%s | %s' % (err_lines[-3], err_lines[-1])
fault = jsonrpclib.Fault(-32603, 'Server error: %s' %
trace_string)
return fault
else:
return Fault(-32601, 'Method %s not supported.' % method)
class SimpleJSONRPCRequestHandler(
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
def do_POST(self):
if not self.is_rpc_path_valid():
self.report_404()
return
try:
max_chunk_size = 10*1024*1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
L.append(self.rfile.read(chunk_size))
size_remaining -= len(L[-1])
data = ''.join(L)
response = self.server._marshaled_dispatch(data)
self.send_response(200)
except Exception, e:
self.send_response(500)
err_lines = traceback.format_exc().splitlines()
trace_string = '%s | %s' % (err_lines[-3], err_lines[-1])
fault = jsonrpclib.Fault(-32603, 'Server error: %s' % trace_string)
response = fault.response()
if response == None:
response = ''
self.send_header("Content-type", "application/json-rpc")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
self.wfile.flush()
self.connection.shutdown(1)
class SimpleJSONRPCServer(SocketServer.TCPServer, SimpleJSONRPCDispatcher):
allow_reuse_address = True
def __init__(self, addr, requestHandler=SimpleJSONRPCRequestHandler,
logRequests=True, encoding=None, bind_and_activate=True,
address_family=socket.AF_INET):
self.logRequests = logRequests
SimpleJSONRPCDispatcher.__init__(self, encoding)
# TCPServer.__init__ has an extra parameter on 2.6+, so
# check Python version and decide on how to call it
vi = sys.version_info
self.address_family = address_family
if USE_UNIX_SOCKETS and address_family == socket.AF_UNIX:
# Unix sockets can't be bound if they already exist in the
# filesystem. The convention of e.g. X11 is to unlink
# before binding again.
if os.path.exists(addr):
try:
os.unlink(addr)
except OSError:
logging.warning("Could not unlink socket %s", addr)
# if python 2.5 and lower
if vi[0] < 3 and vi[1] < 6:
SocketServer.TCPServer.__init__(self, addr, requestHandler)
else:
SocketServer.TCPServer.__init__(self, addr, requestHandler,
bind_and_activate)
if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
class CGIJSONRPCRequestHandler(SimpleJSONRPCDispatcher):
def __init__(self, encoding=None):
SimpleJSONRPCDispatcher.__init__(self, encoding)
def handle_jsonrpc(self, request_text):
response = self._marshaled_dispatch(request_text)
print 'Content-Type: application/json-rpc'
print 'Content-Length: %d' % len(response)
print
sys.stdout.write(response)
handle_xmlrpc = handle_jsonrpc
| 8,688 | Python | .py | 213 | 28.816901 | 79 | 0.56372 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,586 | decoder.py | midgetspy_Sick-Beard/lib/simplejson/decoder.py | """Implementation of JSONDecoder
"""
import re
import sys
import struct
from lib.simplejson.scanner import make_scanner
try:
from lib.simplejson._speedups import scanstring as c_scanstring
except ImportError:
c_scanstring = None
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
#fmt = '{0}: line {1} column {2} (char {3})'
#return fmt.format(msg, lineno, colno, pos)
fmt = '%s: line %d column %d (char %d)'
return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
#fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
#return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at" % (terminator,)
#msg = "Invalid control character {0!r} at".format(terminator)
raise ValueError(errmsg(msg, s, end))
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\escape: " + repr(esc)
raise ValueError(errmsg(msg, s, end))
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise ValueError(errmsg(msg, s, end))
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise ValueError(errmsg(msg, s, end))
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise ValueError(errmsg(msg, s, end))
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
pairs = {}
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
return pairs, end + 1
elif nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs[key] = value
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1))
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end))
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True):
"""``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN.
This can be used to raise an exception if invalid JSON numbers
are encountered.
"""
self.encoding = encoding
self.object_hook = object_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
| 12,412 | Python | .py | 306 | 31.496732 | 108 | 0.540319 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,587 | encoder.py | midgetspy_Sick-Beard/lib/simplejson/encoder.py | """Implementation of JSONEncoder
"""
import re
try:
from lib.simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii
except ImportError:
c_encode_basestring_ascii = None
try:
from lib.simplejson._speedups import make_encoder as c_make_encoder
except ImportError:
c_make_encoder = None
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
#ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# Assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
#return '\\u{0:04x}'.format(n)
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
#return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
return ''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
if _one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys:
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
| 16,041 | Python | .py | 392 | 29.767857 | 136 | 0.541119 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,588 | __init__.py | midgetspy_Sick-Beard/lib/simplejson/__init__.py | r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> import decimal
>>> json.loads('1.1', parse_float=decimal.Decimal) == decimal.Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.0.9'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
__author__ = 'Bob Ippolito <[email protected]>'
from decoder import JSONDecoder
from encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s)
| 12,463 | Python | .py | 259 | 42.644788 | 80 | 0.656649 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,589 | scanner.py | midgetspy_Sick-Beard/lib/simplejson/scanner.py | """JSON token scanner
"""
import re
try:
from lib.simplejson._speedups import make_scanner as c_make_scanner
except ImportError:
c_make_scanner = None
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
return _scan_once
make_scanner = c_make_scanner or py_make_scanner
| 2,231 | Python | .py | 57 | 30.982456 | 93 | 0.577562 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,590 | pynma.py | midgetspy_Sick-Beard/lib/pynma/pynma.py | #!/usr/bin/python
from xml.dom.minidom import parseString
from httplib import HTTPSConnection
from urllib import urlencode
__version__ = "0.1"
API_SERVER = 'nma.usk.bz'
ADD_PATH = '/publicapi/notify'
USER_AGENT="PyNMA/v%s"%__version__
def uniq_preserve(seq): # Dave Kirby
# Order preserving
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def uniq(seq):
# Not order preserving
return {}.fromkeys(seq).keys()
class PyNMA(object):
"""PyNMA(apikey=[], developerkey=None)
takes 2 optional arguments:
- (opt) apykey: might me a string containing 1 key or an array of keys
- (opt) developerkey: where you can store your developer key
"""
def __init__(self, apikey=[], developerkey=None):
self._developerkey = None
self.developerkey(developerkey)
if apikey:
if type(apikey) == str:
apikey = [apikey]
self._apikey = uniq(apikey)
def addkey(self, key):
"Add a key (register ?)"
if type(key) == str:
if not key in self._apikey:
self._apikey.append(key)
elif type(key) == list:
for k in key:
if not k in self._apikey:
self._apikey.append(k)
def delkey(self, key):
"Removes a key (unregister ?)"
if type(key) == str:
if key in self._apikey:
self._apikey.remove(key)
elif type(key) == list:
for k in key:
if key in self._apikey:
self._apikey.remove(k)
def developerkey(self, developerkey):
"Sets the developer key (and check it has the good length)"
if type(developerkey) == str and len(developerkey) == 48:
self._developerkey = developerkey
def push(self, application="", event="", description="", url="", priority=0, batch_mode=False):
"""Pushes a message on the registered API keys.
takes 5 arguments:
- (req) application: application name [256]
- (req) event: event name [1000]
- (req) description: description [10000]
- (opt) url: url [512]
- (opt) priority: from -2 (lowest) to 2 (highest) (def:0)
- (opt) batch_mode: call API 5 by 5 (def:False)
Warning: using batch_mode will return error only if all API keys are bad
cf: http://nma.usk.bz/api.php
"""
datas = {
'application': application[:256].encode('utf8'),
'event': event[:1024].encode('utf8'),
'description': description[:10000].encode('utf8'),
'priority': priority
}
if url:
datas['url'] = url[:512]
if self._developerkey:
datas['developerkey'] = self._developerkey
results = {}
if not batch_mode:
for key in self._apikey:
datas['apikey'] = key
res = self.callapi('POST', ADD_PATH, datas)
results[key] = res
else:
for i in range(0, len(self._apikey), 5):
datas['apikey'] = ",".join(self._apikey[i:i+5])
res = self.callapi('POST', ADD_PATH, datas)
results[datas['apikey']] = res
return results
def callapi(self, method, path, args):
headers = { 'User-Agent': USER_AGENT }
if method == "POST":
headers['Content-type'] = "application/x-www-form-urlencoded"
http_handler = HTTPSConnection(API_SERVER)
http_handler.request(method, path, urlencode(args), headers)
resp = http_handler.getresponse()
try:
res = self._parse_reponse(resp.read())
except Exception, e:
res = {'type': "pynmaerror",
'code': 600,
'message': str(e)
}
pass
return res
def _parse_reponse(self, response):
root = parseString(response).firstChild
for elem in root.childNodes:
if elem.nodeType == elem.TEXT_NODE: continue
if elem.tagName == 'success':
res = dict(elem.attributes.items())
res['message'] = ""
res['type'] = elem.tagName
return res
if elem.tagName == 'error':
res = dict(elem.attributes.items())
res['message'] = elem.firstChild.nodeValue
res['type'] = elem.tagName
return res
| 4,721 | Python | .py | 114 | 29.780702 | 99 | 0.535588 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,591 | relativedelta.py | midgetspy_Sick-Beard/lib/dateutil/relativedelta.py | """
Copyright (c) 2003-2010 Gustavo Niemeyer <[email protected]>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <[email protected]>"
__license__ = "PSF License"
import datetime
import calendar
__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class relativedelta:
"""
The relativedelta type is based on the specification of the excelent
work done by M.-A. Lemburg in his mx.DateTime extension. However,
notice that this type does *NOT* implement the same algorithm as
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
There's two different ways to build a relativedelta instance. The
first one is passing it two date/datetime classes:
relativedelta(datetime1, datetime2)
And the other way is to use the following keyword arguments:
year, month, day, hour, minute, second, microsecond:
Absolute information.
years, months, weeks, days, hours, minutes, seconds, microseconds:
Relative information, may be negative.
weekday:
One of the weekday instances (MO, TU, etc). These instances may
receive a parameter N, specifying the Nth weekday, which could
be positive or negative (like MO(+1) or MO(-2). Not specifying
it is the same as specifying +1. You can also use an integer,
where 0=MO.
leapdays:
Will add given days to the date found, if year is a leap
year, and the date found is post 28 of february.
yearday, nlyearday:
Set the yearday or the non-leap year day (jump leap days).
These are converted to day/month/leapdays information.
Here is the behavior of operations with relativedelta:
1) Calculate the absolute year, using the 'year' argument, or the
original datetime year, if the argument is not present.
2) Add the relative 'years' argument to the absolute year.
3) Do steps 1 and 2 for month/months.
4) Calculate the absolute day, using the 'day' argument, or the
original datetime day, if the argument is not present. Then,
subtract from the day until it fits in the year and month
found after their operations.
5) Add the relative 'days' argument to the absolute day. Notice
that the 'weeks' argument is multiplied by 7 and added to
'days'.
6) Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds,
microsecond/microseconds.
7) If the 'weekday' argument is present, calculate the weekday,
with the given (wday, nth) tuple. wday is the index of the
weekday (0-6, 0=Mon), and nth is the number of weeks to add
forward or backward, depending on its signal. Notice that if
the calculated date is already Monday, for example, using
(0, 1) or (0, -1) won't change the day.
"""
def __init__(self, dt1=None, dt2=None,
years=0, months=0, days=0, leapdays=0, weeks=0,
hours=0, minutes=0, seconds=0, microseconds=0,
year=None, month=None, day=None, weekday=None,
yearday=None, nlyearday=None,
hour=None, minute=None, second=None, microsecond=None):
if dt1 and dt2:
if not isinstance(dt1, datetime.date) or \
not isinstance(dt2, datetime.date):
raise TypeError, "relativedelta only diffs datetime/date"
if type(dt1) is not type(dt2):
if not isinstance(dt1, datetime.datetime):
dt1 = datetime.datetime.fromordinal(dt1.toordinal())
elif not isinstance(dt2, datetime.datetime):
dt2 = datetime.datetime.fromordinal(dt2.toordinal())
self.years = 0
self.months = 0
self.days = 0
self.leapdays = 0
self.hours = 0
self.minutes = 0
self.seconds = 0
self.microseconds = 0
self.year = None
self.month = None
self.day = None
self.weekday = None
self.hour = None
self.minute = None
self.second = None
self.microsecond = None
self._has_time = 0
months = (dt1.year*12+dt1.month)-(dt2.year*12+dt2.month)
self._set_months(months)
dtm = self.__radd__(dt2)
if dt1 < dt2:
while dt1 > dtm:
months += 1
self._set_months(months)
dtm = self.__radd__(dt2)
else:
while dt1 < dtm:
months -= 1
self._set_months(months)
dtm = self.__radd__(dt2)
delta = dt1 - dtm
self.seconds = delta.seconds+delta.days*86400
self.microseconds = delta.microseconds
else:
self.years = years
self.months = months
self.days = days+weeks*7
self.leapdays = leapdays
self.hours = hours
self.minutes = minutes
self.seconds = seconds
self.microseconds = microseconds
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
if type(weekday) is int:
self.weekday = weekdays[weekday]
else:
self.weekday = weekday
yday = 0
if nlyearday:
yday = nlyearday
elif yearday:
yday = yearday
if yearday > 59:
self.leapdays = -1
if yday:
ydayidx = [31,59,90,120,151,181,212,243,273,304,334,366]
for idx, ydays in enumerate(ydayidx):
if yday <= ydays:
self.month = idx+1
if idx == 0:
self.day = yday
else:
self.day = yday-ydayidx[idx-1]
break
else:
raise ValueError, "invalid year day (%d)" % yday
self._fix()
def _fix(self):
if abs(self.microseconds) > 999999:
s = self.microseconds//abs(self.microseconds)
div, mod = divmod(self.microseconds*s, 1000000)
self.microseconds = mod*s
self.seconds += div*s
if abs(self.seconds) > 59:
s = self.seconds//abs(self.seconds)
div, mod = divmod(self.seconds*s, 60)
self.seconds = mod*s
self.minutes += div*s
if abs(self.minutes) > 59:
s = self.minutes//abs(self.minutes)
div, mod = divmod(self.minutes*s, 60)
self.minutes = mod*s
self.hours += div*s
if abs(self.hours) > 23:
s = self.hours//abs(self.hours)
div, mod = divmod(self.hours*s, 24)
self.hours = mod*s
self.days += div*s
if abs(self.months) > 11:
s = self.months//abs(self.months)
div, mod = divmod(self.months*s, 12)
self.months = mod*s
self.years += div*s
if (self.hours or self.minutes or self.seconds or self.microseconds or
self.hour is not None or self.minute is not None or
self.second is not None or self.microsecond is not None):
self._has_time = 1
else:
self._has_time = 0
def _set_months(self, months):
self.months = months
if abs(self.months) > 11:
s = self.months//abs(self.months)
div, mod = divmod(self.months*s, 12)
self.months = mod*s
self.years = div*s
else:
self.years = 0
def __radd__(self, other):
if not isinstance(other, datetime.date):
raise TypeError, "unsupported type for add operation"
elif self._has_time and not isinstance(other, datetime.datetime):
other = datetime.datetime.fromordinal(other.toordinal())
year = (self.year or other.year)+self.years
month = self.month or other.month
if self.months:
assert 1 <= abs(self.months) <= 12
month += self.months
if month > 12:
year += 1
month -= 12
elif month < 1:
year -= 1
month += 12
day = min(calendar.monthrange(year, month)[1],
self.day or other.day)
repl = {"year": year, "month": month, "day": day}
for attr in ["hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
repl[attr] = value
days = self.days
if self.leapdays and month > 2 and calendar.isleap(year):
days += self.leapdays
ret = (other.replace(**repl)
+ datetime.timedelta(days=days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds,
microseconds=self.microseconds))
if self.weekday:
weekday, nth = self.weekday.weekday, self.weekday.n or 1
jumpdays = (abs(nth)-1)*7
if nth > 0:
jumpdays += (7-ret.weekday()+weekday)%7
else:
jumpdays += (ret.weekday()-weekday)%7
jumpdays *= -1
ret += datetime.timedelta(days=jumpdays)
return ret
def __rsub__(self, other):
return self.__neg__().__radd__(other)
def __add__(self, other):
if not isinstance(other, relativedelta):
raise TypeError, "unsupported type for add operation"
return relativedelta(years=other.years+self.years,
months=other.months+self.months,
days=other.days+self.days,
hours=other.hours+self.hours,
minutes=other.minutes+self.minutes,
seconds=other.seconds+self.seconds,
microseconds=other.microseconds+self.microseconds,
leapdays=other.leapdays or self.leapdays,
year=other.year or self.year,
month=other.month or self.month,
day=other.day or self.day,
weekday=other.weekday or self.weekday,
hour=other.hour or self.hour,
minute=other.minute or self.minute,
second=other.second or self.second,
microsecond=other.second or self.microsecond)
def __sub__(self, other):
if not isinstance(other, relativedelta):
raise TypeError, "unsupported type for sub operation"
return relativedelta(years=other.years-self.years,
months=other.months-self.months,
days=other.days-self.days,
hours=other.hours-self.hours,
minutes=other.minutes-self.minutes,
seconds=other.seconds-self.seconds,
microseconds=other.microseconds-self.microseconds,
leapdays=other.leapdays or self.leapdays,
year=other.year or self.year,
month=other.month or self.month,
day=other.day or self.day,
weekday=other.weekday or self.weekday,
hour=other.hour or self.hour,
minute=other.minute or self.minute,
second=other.second or self.second,
microsecond=other.second or self.microsecond)
def __neg__(self):
return relativedelta(years=-self.years,
months=-self.months,
days=-self.days,
hours=-self.hours,
minutes=-self.minutes,
seconds=-self.seconds,
microseconds=-self.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __nonzero__(self):
return not (not self.years and
not self.months and
not self.days and
not self.hours and
not self.minutes and
not self.seconds and
not self.microseconds and
not self.leapdays and
self.year is None and
self.month is None and
self.day is None and
self.weekday is None and
self.hour is None and
self.minute is None and
self.second is None and
self.microsecond is None)
def __mul__(self, other):
f = float(other)
return relativedelta(years=self.years*f,
months=self.months*f,
days=self.days*f,
hours=self.hours*f,
minutes=self.minutes*f,
seconds=self.seconds*f,
microseconds=self.microseconds*f,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __eq__(self, other):
if not isinstance(other, relativedelta):
return False
if self.weekday or other.weekday:
if not self.weekday or not other.weekday:
return False
if self.weekday.weekday != other.weekday.weekday:
return False
n1, n2 = self.weekday.n, other.weekday.n
if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
return False
return (self.years == other.years and
self.months == other.months and
self.days == other.days and
self.hours == other.hours and
self.minutes == other.minutes and
self.seconds == other.seconds and
self.leapdays == other.leapdays and
self.year == other.year and
self.month == other.month and
self.day == other.day and
self.hour == other.hour and
self.minute == other.minute and
self.second == other.second and
self.microsecond == other.microsecond)
def __ne__(self, other):
return not self.__eq__(other)
def __div__(self, other):
return self.__mul__(1/float(other))
def __repr__(self):
l = []
for attr in ["years", "months", "days", "leapdays",
"hours", "minutes", "seconds", "microseconds"]:
value = getattr(self, attr)
if value:
l.append("%s=%+d" % (attr, value))
for attr in ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, `value`))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
# vim:ts=4:sw=4:et
| 17,135 | Python | .py | 387 | 29.385013 | 79 | 0.517093 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,592 | easter.py | midgetspy_Sick-Beard/lib/dateutil/easter.py | """
Copyright (c) 2003-2007 Gustavo Niemeyer <[email protected]>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <[email protected]>"
__license__ = "PSF License"
import datetime
__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
def easter(year, method=EASTER_WESTERN):
"""
This method was ported from the work done by GM Arts,
on top of the algorithm by Claus Tondering, which was
based in part on the algorithm of Ouding (1940), as
quoted in "Explanatory Supplement to the Astronomical
Almanac", P. Kenneth Seidelmann, editor.
This algorithm implements three different easter
calculation methods:
1 - Original calculation in Julian calendar, valid in
dates after 326 AD
2 - Original method, with date converted to Gregorian
calendar, valid in years 1583 to 4099
3 - Revised method, in Gregorian calendar, valid in
years 1583 to 4099 as well
These methods are represented by the constants:
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
The default method is method 3.
More about the algorithm may be found at:
http://users.chariot.net.au/~gmarts/eastalg.htm
and
http://www.tondering.dk/claus/calendar.html
"""
if not (1 <= method <= 3):
raise ValueError, "invalid method"
# g - Golden year - 1
# c - Century
# h - (23 - Epact) mod 30
# i - Number of days from March 21 to Paschal Full Moon
# j - Weekday for PFM (0=Sunday, etc)
# p - Number of days from March 21 to Sunday on or before PFM
# (-6 to 28 methods 1 & 3, to 56 for method 2)
# e - Extra days to add for method 2 (converting Julian
# date to Gregorian date)
y = year
g = y % 19
e = 0
if method < 3:
# Old method
i = (19*g+15)%30
j = (y+y//4+i)%7
if method == 2:
# Extra dates to convert Julian to Gregorian date
e = 10
if y > 1600:
e = e+y//100-16-(y//100-16)//4
else:
# New method
c = y//100
h = (c-c//4-(8*c+13)//25+19*g+15)%30
i = h-(h//28)*(1-(h//28)*(29//(h+1))*((21-g)//11))
j = (y+y//4+i+2-c+c//4)%7
# p can be from -6 to 56 corresponding to dates 22 March to 23 May
# (later dates apply to method 2, although 23 May never actually occurs)
p = i-j+e
d = 1+(p+27+(p+6)//40)%31
m = 3+(p+26)//30
return datetime.date(int(y),int(m),int(d))
| 2,633 | Python | .py | 72 | 30.680556 | 76 | 0.624556 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,593 | __init__.py | midgetspy_Sick-Beard/lib/dateutil/__init__.py | """
Copyright (c) 2003-2010 Gustavo Niemeyer <[email protected]>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <[email protected]>"
__license__ = "PSF License"
__version__ = "1.5"
| 252 | Python | .py | 8 | 30.375 | 64 | 0.728395 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,594 | rrule.py | midgetspy_Sick-Beard/lib/dateutil/rrule.py | """
Copyright (c) 2003-2010 Gustavo Niemeyer <[email protected]>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <[email protected]>"
__license__ = "PSF License"
import itertools
import datetime
import calendar
import thread
import sys
__all__ = ["rrule", "rruleset", "rrulestr",
"YEARLY", "MONTHLY", "WEEKLY", "DAILY",
"HOURLY", "MINUTELY", "SECONDLY",
"MO", "TU", "WE", "TH", "FR", "SA", "SU"]
# Every mask is 7 days longer to handle cross-year weekly periods.
M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30+
[7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7)
M365MASK = list(M366MASK)
M29, M30, M31 = range(1,30), range(1,31), range(1,32)
MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
MDAY365MASK = list(MDAY366MASK)
M29, M30, M31 = range(-29,0), range(-30,0), range(-31,0)
NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
NMDAY365MASK = list(NMDAY366MASK)
M366RANGE = (0,31,60,91,121,152,182,213,244,274,305,335,366)
M365RANGE = (0,31,59,90,120,151,181,212,243,273,304,334,365)
WDAYMASK = [0,1,2,3,4,5,6]*55
del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
MDAY365MASK = tuple(MDAY365MASK)
M365MASK = tuple(M365MASK)
(YEARLY,
MONTHLY,
WEEKLY,
DAILY,
HOURLY,
MINUTELY,
SECONDLY) = range(7)
# Imported on demand.
easter = None
parser = None
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
if n == 0:
raise ValueError, "Can't create weekday with n == 0"
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class rrulebase:
def __init__(self, cache=False):
if cache:
self._cache = []
self._cache_lock = thread.allocate_lock()
self._cache_gen = self._iter()
self._cache_complete = False
else:
self._cache = None
self._cache_complete = False
self._len = None
def __iter__(self):
if self._cache_complete:
return iter(self._cache)
elif self._cache is None:
return self._iter()
else:
return self._iter_cached()
def _iter_cached(self):
i = 0
gen = self._cache_gen
cache = self._cache
acquire = self._cache_lock.acquire
release = self._cache_lock.release
while gen:
if i == len(cache):
acquire()
if self._cache_complete:
break
try:
for j in range(10):
cache.append(gen.next())
except StopIteration:
self._cache_gen = gen = None
self._cache_complete = True
break
release()
yield cache[i]
i += 1
while i < self._len:
yield cache[i]
i += 1
def __getitem__(self, item):
if self._cache_complete:
return self._cache[item]
elif isinstance(item, slice):
if item.step and item.step < 0:
return list(iter(self))[item]
else:
return list(itertools.islice(self,
item.start or 0,
item.stop or sys.maxint,
item.step or 1))
elif item >= 0:
gen = iter(self)
try:
for i in range(item+1):
res = gen.next()
except StopIteration:
raise IndexError
return res
else:
return list(iter(self))[item]
def __contains__(self, item):
if self._cache_complete:
return item in self._cache
else:
for i in self:
if i == item:
return True
elif i > item:
return False
return False
# __len__() introduces a large performance penality.
def count(self):
if self._len is None:
for x in self: pass
return self._len
def before(self, dt, inc=False):
if self._cache_complete:
gen = self._cache
else:
gen = self
last = None
if inc:
for i in gen:
if i > dt:
break
last = i
else:
for i in gen:
if i >= dt:
break
last = i
return last
def after(self, dt, inc=False):
if self._cache_complete:
gen = self._cache
else:
gen = self
if inc:
for i in gen:
if i >= dt:
return i
else:
for i in gen:
if i > dt:
return i
return None
def between(self, after, before, inc=False):
if self._cache_complete:
gen = self._cache
else:
gen = self
started = False
l = []
if inc:
for i in gen:
if i > before:
break
elif not started:
if i >= after:
started = True
l.append(i)
else:
l.append(i)
else:
for i in gen:
if i >= before:
break
elif not started:
if i > after:
started = True
l.append(i)
else:
l.append(i)
return l
class rrule(rrulebase):
def __init__(self, freq, dtstart=None,
interval=1, wkst=None, count=None, until=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None,
byhour=None, byminute=None, bysecond=None,
cache=False):
rrulebase.__init__(self, cache)
global easter
if not dtstart:
dtstart = datetime.datetime.now().replace(microsecond=0)
elif not isinstance(dtstart, datetime.datetime):
dtstart = datetime.datetime.fromordinal(dtstart.toordinal())
else:
dtstart = dtstart.replace(microsecond=0)
self._dtstart = dtstart
self._tzinfo = dtstart.tzinfo
self._freq = freq
self._interval = interval
self._count = count
if until and not isinstance(until, datetime.datetime):
until = datetime.datetime.fromordinal(until.toordinal())
self._until = until
if wkst is None:
self._wkst = calendar.firstweekday()
elif type(wkst) is int:
self._wkst = wkst
else:
self._wkst = wkst.weekday
if bysetpos is None:
self._bysetpos = None
elif type(bysetpos) is int:
if bysetpos == 0 or not (-366 <= bysetpos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
self._bysetpos = (bysetpos,)
else:
self._bysetpos = tuple(bysetpos)
for pos in self._bysetpos:
if pos == 0 or not (-366 <= pos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
if not (byweekno or byyearday or bymonthday or
byweekday is not None or byeaster is not None):
if freq == YEARLY:
if not bymonth:
bymonth = dtstart.month
bymonthday = dtstart.day
elif freq == MONTHLY:
bymonthday = dtstart.day
elif freq == WEEKLY:
byweekday = dtstart.weekday()
# bymonth
if not bymonth:
self._bymonth = None
elif type(bymonth) is int:
self._bymonth = (bymonth,)
else:
self._bymonth = tuple(bymonth)
# byyearday
if not byyearday:
self._byyearday = None
elif type(byyearday) is int:
self._byyearday = (byyearday,)
else:
self._byyearday = tuple(byyearday)
# byeaster
if byeaster is not None:
if not easter:
from dateutil import easter
if type(byeaster) is int:
self._byeaster = (byeaster,)
else:
self._byeaster = tuple(byeaster)
else:
self._byeaster = None
# bymonthay
if not bymonthday:
self._bymonthday = ()
self._bynmonthday = ()
elif type(bymonthday) is int:
if bymonthday < 0:
self._bynmonthday = (bymonthday,)
self._bymonthday = ()
else:
self._bymonthday = (bymonthday,)
self._bynmonthday = ()
else:
self._bymonthday = tuple([x for x in bymonthday if x > 0])
self._bynmonthday = tuple([x for x in bymonthday if x < 0])
# byweekno
if byweekno is None:
self._byweekno = None
elif type(byweekno) is int:
self._byweekno = (byweekno,)
else:
self._byweekno = tuple(byweekno)
# byweekday / bynweekday
if byweekday is None:
self._byweekday = None
self._bynweekday = None
elif type(byweekday) is int:
self._byweekday = (byweekday,)
self._bynweekday = None
elif hasattr(byweekday, "n"):
if not byweekday.n or freq > MONTHLY:
self._byweekday = (byweekday.weekday,)
self._bynweekday = None
else:
self._bynweekday = ((byweekday.weekday, byweekday.n),)
self._byweekday = None
else:
self._byweekday = []
self._bynweekday = []
for wday in byweekday:
if type(wday) is int:
self._byweekday.append(wday)
elif not wday.n or freq > MONTHLY:
self._byweekday.append(wday.weekday)
else:
self._bynweekday.append((wday.weekday, wday.n))
self._byweekday = tuple(self._byweekday)
self._bynweekday = tuple(self._bynweekday)
if not self._byweekday:
self._byweekday = None
elif not self._bynweekday:
self._bynweekday = None
# byhour
if byhour is None:
if freq < HOURLY:
self._byhour = (dtstart.hour,)
else:
self._byhour = None
elif type(byhour) is int:
self._byhour = (byhour,)
else:
self._byhour = tuple(byhour)
# byminute
if byminute is None:
if freq < MINUTELY:
self._byminute = (dtstart.minute,)
else:
self._byminute = None
elif type(byminute) is int:
self._byminute = (byminute,)
else:
self._byminute = tuple(byminute)
# bysecond
if bysecond is None:
if freq < SECONDLY:
self._bysecond = (dtstart.second,)
else:
self._bysecond = None
elif type(bysecond) is int:
self._bysecond = (bysecond,)
else:
self._bysecond = tuple(bysecond)
if self._freq >= HOURLY:
self._timeset = None
else:
self._timeset = []
for hour in self._byhour:
for minute in self._byminute:
for second in self._bysecond:
self._timeset.append(
datetime.time(hour, minute, second,
tzinfo=self._tzinfo))
self._timeset.sort()
self._timeset = tuple(self._timeset)
def _iter(self):
year, month, day, hour, minute, second, weekday, yearday, _ = \
self._dtstart.timetuple()
# Some local variables to speed things up a bit
freq = self._freq
interval = self._interval
wkst = self._wkst
until = self._until
bymonth = self._bymonth
byweekno = self._byweekno
byyearday = self._byyearday
byweekday = self._byweekday
byeaster = self._byeaster
bymonthday = self._bymonthday
bynmonthday = self._bynmonthday
bysetpos = self._bysetpos
byhour = self._byhour
byminute = self._byminute
bysecond = self._bysecond
ii = _iterinfo(self)
ii.rebuild(year, month)
getdayset = {YEARLY:ii.ydayset,
MONTHLY:ii.mdayset,
WEEKLY:ii.wdayset,
DAILY:ii.ddayset,
HOURLY:ii.ddayset,
MINUTELY:ii.ddayset,
SECONDLY:ii.ddayset}[freq]
if freq < HOURLY:
timeset = self._timeset
else:
gettimeset = {HOURLY:ii.htimeset,
MINUTELY:ii.mtimeset,
SECONDLY:ii.stimeset}[freq]
if ((freq >= HOURLY and
self._byhour and hour not in self._byhour) or
(freq >= MINUTELY and
self._byminute and minute not in self._byminute) or
(freq >= SECONDLY and
self._bysecond and second not in self._bysecond)):
timeset = ()
else:
timeset = gettimeset(hour, minute, second)
total = 0
count = self._count
while True:
# Get dayset with the right frequency
dayset, start, end = getdayset(year, month, day)
# Do the "hard" work ;-)
filtered = False
for i in dayset[start:end]:
if ((bymonth and ii.mmask[i] not in bymonth) or
(byweekno and not ii.wnomask[i]) or
(byweekday and ii.wdaymask[i] not in byweekday) or
(ii.nwdaymask and not ii.nwdaymask[i]) or
(byeaster and not ii.eastermask[i]) or
((bymonthday or bynmonthday) and
ii.mdaymask[i] not in bymonthday and
ii.nmdaymask[i] not in bynmonthday) or
(byyearday and
((i < ii.yearlen and i+1 not in byyearday
and -ii.yearlen+i not in byyearday) or
(i >= ii.yearlen and i+1-ii.yearlen not in byyearday
and -ii.nextyearlen+i-ii.yearlen
not in byyearday)))):
dayset[i] = None
filtered = True
# Output results
if bysetpos and timeset:
poslist = []
for pos in bysetpos:
if pos < 0:
daypos, timepos = divmod(pos, len(timeset))
else:
daypos, timepos = divmod(pos-1, len(timeset))
try:
i = [x for x in dayset[start:end]
if x is not None][daypos]
time = timeset[timepos]
except IndexError:
pass
else:
date = datetime.date.fromordinal(ii.yearordinal+i)
res = datetime.datetime.combine(date, time)
if res not in poslist:
poslist.append(res)
poslist.sort()
for res in poslist:
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
total += 1
yield res
if count:
count -= 1
if not count:
self._len = total
return
else:
for i in dayset[start:end]:
if i is not None:
date = datetime.date.fromordinal(ii.yearordinal+i)
for time in timeset:
res = datetime.datetime.combine(date, time)
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
total += 1
yield res
if count:
count -= 1
if not count:
self._len = total
return
# Handle frequency and interval
fixday = False
if freq == YEARLY:
year += interval
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == MONTHLY:
month += interval
if month > 12:
div, mod = divmod(month, 12)
month = mod
year += div
if month == 0:
month = 12
year -= 1
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == WEEKLY:
if wkst > weekday:
day += -(weekday+1+(6-wkst))+self._interval*7
else:
day += -(weekday-wkst)+self._interval*7
weekday = wkst
fixday = True
elif freq == DAILY:
day += interval
fixday = True
elif freq == HOURLY:
if filtered:
# Jump to one iteration before next day
hour += ((23-hour)//interval)*interval
while True:
hour += interval
div, mod = divmod(hour, 24)
if div:
hour = mod
day += div
fixday = True
if not byhour or hour in byhour:
break
timeset = gettimeset(hour, minute, second)
elif freq == MINUTELY:
if filtered:
# Jump to one iteration before next day
minute += ((1439-(hour*60+minute))//interval)*interval
while True:
minute += interval
div, mod = divmod(minute, 60)
if div:
minute = mod
hour += div
div, mod = divmod(hour, 24)
if div:
hour = mod
day += div
fixday = True
filtered = False
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute)):
break
timeset = gettimeset(hour, minute, second)
elif freq == SECONDLY:
if filtered:
# Jump to one iteration before next day
second += (((86399-(hour*3600+minute*60+second))
//interval)*interval)
while True:
second += self._interval
div, mod = divmod(second, 60)
if div:
second = mod
minute += div
div, mod = divmod(minute, 60)
if div:
minute = mod
hour += div
div, mod = divmod(hour, 24)
if div:
hour = mod
day += div
fixday = True
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute) and
(not bysecond or second in bysecond)):
break
timeset = gettimeset(hour, minute, second)
if fixday and day > 28:
daysinmonth = calendar.monthrange(year, month)[1]
if day > daysinmonth:
while day > daysinmonth:
day -= daysinmonth
month += 1
if month == 13:
month = 1
year += 1
if year > datetime.MAXYEAR:
self._len = total
return
daysinmonth = calendar.monthrange(year, month)[1]
ii.rebuild(year, month)
class _iterinfo(object):
__slots__ = ["rrule", "lastyear", "lastmonth",
"yearlen", "nextyearlen", "yearordinal", "yearweekday",
"mmask", "mrange", "mdaymask", "nmdaymask",
"wdaymask", "wnomask", "nwdaymask", "eastermask"]
def __init__(self, rrule):
for attr in self.__slots__:
setattr(self, attr, None)
self.rrule = rrule
def rebuild(self, year, month):
# Every mask is 7 days longer to handle cross-year weekly periods.
rr = self.rrule
if year != self.lastyear:
self.yearlen = 365+calendar.isleap(year)
self.nextyearlen = 365+calendar.isleap(year+1)
firstyday = datetime.date(year, 1, 1)
self.yearordinal = firstyday.toordinal()
self.yearweekday = firstyday.weekday()
wday = datetime.date(year, 1, 1).weekday()
if self.yearlen == 365:
self.mmask = M365MASK
self.mdaymask = MDAY365MASK
self.nmdaymask = NMDAY365MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M365RANGE
else:
self.mmask = M366MASK
self.mdaymask = MDAY366MASK
self.nmdaymask = NMDAY366MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M366RANGE
if not rr._byweekno:
self.wnomask = None
else:
self.wnomask = [0]*(self.yearlen+7)
#no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
no1wkst = firstwkst = (7-self.yearweekday+rr._wkst)%7
if no1wkst >= 4:
no1wkst = 0
# Number of days in the year, plus the days we got
# from last year.
wyearlen = self.yearlen+(self.yearweekday-rr._wkst)%7
else:
# Number of days in the year, minus the days we
# left in last year.
wyearlen = self.yearlen-no1wkst
div, mod = divmod(wyearlen, 7)
numweeks = div+mod//4
for n in rr._byweekno:
if n < 0:
n += numweeks+1
if not (0 < n <= numweeks):
continue
if n > 1:
i = no1wkst+(n-1)*7
if no1wkst != firstwkst:
i -= 7-firstwkst
else:
i = no1wkst
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if 1 in rr._byweekno:
# Check week number 1 of next year as well
# TODO: Check -numweeks for next year.
i = no1wkst+numweeks*7
if no1wkst != firstwkst:
i -= 7-firstwkst
if i < self.yearlen:
# If week starts in next year, we
# don't care about it.
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if no1wkst:
# Check last week number of last year as
# well. If no1wkst is 0, either the year
# started on week start, or week number 1
# got days from last year, so there are no
# days from last year's last week number in
# this year.
if -1 not in rr._byweekno:
lyearweekday = datetime.date(year-1,1,1).weekday()
lno1wkst = (7-lyearweekday+rr._wkst)%7
lyearlen = 365+calendar.isleap(year-1)
if lno1wkst >= 4:
lno1wkst = 0
lnumweeks = 52+(lyearlen+
(lyearweekday-rr._wkst)%7)%7//4
else:
lnumweeks = 52+(self.yearlen-no1wkst)%7//4
else:
lnumweeks = -1
if lnumweeks in rr._byweekno:
for i in range(no1wkst):
self.wnomask[i] = 1
if (rr._bynweekday and
(month != self.lastmonth or year != self.lastyear)):
ranges = []
if rr._freq == YEARLY:
if rr._bymonth:
for month in rr._bymonth:
ranges.append(self.mrange[month-1:month+1])
else:
ranges = [(0, self.yearlen)]
elif rr._freq == MONTHLY:
ranges = [self.mrange[month-1:month+1]]
if ranges:
# Weekly frequency won't get here, so we may not
# care about cross-year weekly periods.
self.nwdaymask = [0]*self.yearlen
for first, last in ranges:
last -= 1
for wday, n in rr._bynweekday:
if n < 0:
i = last+(n+1)*7
i -= (self.wdaymask[i]-wday)%7
else:
i = first+(n-1)*7
i += (7-self.wdaymask[i]+wday)%7
if first <= i <= last:
self.nwdaymask[i] = 1
if rr._byeaster:
self.eastermask = [0]*(self.yearlen+7)
eyday = easter.easter(year).toordinal()-self.yearordinal
for offset in rr._byeaster:
self.eastermask[eyday+offset] = 1
self.lastyear = year
self.lastmonth = month
def ydayset(self, year, month, day):
return range(self.yearlen), 0, self.yearlen
def mdayset(self, year, month, day):
set = [None]*self.yearlen
start, end = self.mrange[month-1:month+1]
for i in range(start, end):
set[i] = i
return set, start, end
def wdayset(self, year, month, day):
# We need to handle cross-year weeks here.
set = [None]*(self.yearlen+7)
i = datetime.date(year, month, day).toordinal()-self.yearordinal
start = i
for j in range(7):
set[i] = i
i += 1
#if (not (0 <= i < self.yearlen) or
# self.wdaymask[i] == self.rrule._wkst):
# This will cross the year boundary, if necessary.
if self.wdaymask[i] == self.rrule._wkst:
break
return set, start, i
def ddayset(self, year, month, day):
set = [None]*self.yearlen
i = datetime.date(year, month, day).toordinal()-self.yearordinal
set[i] = i
return set, i, i+1
def htimeset(self, hour, minute, second):
set = []
rr = self.rrule
for minute in rr._byminute:
for second in rr._bysecond:
set.append(datetime.time(hour, minute, second,
tzinfo=rr._tzinfo))
set.sort()
return set
def mtimeset(self, hour, minute, second):
set = []
rr = self.rrule
for second in rr._bysecond:
set.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
set.sort()
return set
def stimeset(self, hour, minute, second):
return (datetime.time(hour, minute, second,
tzinfo=self.rrule._tzinfo),)
class rruleset(rrulebase):
class _genitem:
def __init__(self, genlist, gen):
try:
self.dt = gen()
genlist.append(self)
except StopIteration:
pass
self.genlist = genlist
self.gen = gen
def next(self):
try:
self.dt = self.gen()
except StopIteration:
self.genlist.remove(self)
def __cmp__(self, other):
return cmp(self.dt, other.dt)
def __init__(self, cache=False):
rrulebase.__init__(self, cache)
self._rrule = []
self._rdate = []
self._exrule = []
self._exdate = []
def rrule(self, rrule):
self._rrule.append(rrule)
def rdate(self, rdate):
self._rdate.append(rdate)
def exrule(self, exrule):
self._exrule.append(exrule)
def exdate(self, exdate):
self._exdate.append(exdate)
def _iter(self):
rlist = []
self._rdate.sort()
self._genitem(rlist, iter(self._rdate).next)
for gen in [iter(x).next for x in self._rrule]:
self._genitem(rlist, gen)
rlist.sort()
exlist = []
self._exdate.sort()
self._genitem(exlist, iter(self._exdate).next)
for gen in [iter(x).next for x in self._exrule]:
self._genitem(exlist, gen)
exlist.sort()
lastdt = None
total = 0
while rlist:
ritem = rlist[0]
if not lastdt or lastdt != ritem.dt:
while exlist and exlist[0] < ritem:
exlist[0].next()
exlist.sort()
if not exlist or ritem != exlist[0]:
total += 1
yield ritem.dt
lastdt = ritem.dt
ritem.next()
rlist.sort()
self._len = total
class _rrulestr:
_freq_map = {"YEARLY": YEARLY,
"MONTHLY": MONTHLY,
"WEEKLY": WEEKLY,
"DAILY": DAILY,
"HOURLY": HOURLY,
"MINUTELY": MINUTELY,
"SECONDLY": SECONDLY}
_weekday_map = {"MO":0,"TU":1,"WE":2,"TH":3,"FR":4,"SA":5,"SU":6}
def _handle_int(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = int(value)
def _handle_int_list(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
_handle_INTERVAL = _handle_int
_handle_COUNT = _handle_int
_handle_BYSETPOS = _handle_int_list
_handle_BYMONTH = _handle_int_list
_handle_BYMONTHDAY = _handle_int_list
_handle_BYYEARDAY = _handle_int_list
_handle_BYEASTER = _handle_int_list
_handle_BYWEEKNO = _handle_int_list
_handle_BYHOUR = _handle_int_list
_handle_BYMINUTE = _handle_int_list
_handle_BYSECOND = _handle_int_list
def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
rrkwargs["freq"] = self._freq_map[value]
def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
global parser
if not parser:
from dateutil import parser
try:
rrkwargs["until"] = parser.parse(value,
ignoretz=kwargs.get("ignoretz"),
tzinfos=kwargs.get("tzinfos"))
except ValueError:
raise ValueError, "invalid until date"
def _handle_WKST(self, rrkwargs, name, value, **kwargs):
rrkwargs["wkst"] = self._weekday_map[value]
def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwarsg):
l = []
for wday in value.split(','):
for i in range(len(wday)):
if wday[i] not in '+-0123456789':
break
n = wday[:i] or None
w = wday[i:]
if n: n = int(n)
l.append(weekdays[self._weekday_map[w]](n))
rrkwargs["byweekday"] = l
_handle_BYDAY = _handle_BYWEEKDAY
def _parse_rfc_rrule(self, line,
dtstart=None,
cache=False,
ignoretz=False,
tzinfos=None):
if line.find(':') != -1:
name, value = line.split(':')
if name != "RRULE":
raise ValueError, "unknown parameter name"
else:
value = line
rrkwargs = {}
for pair in value.split(';'):
name, value = pair.split('=')
name = name.upper()
value = value.upper()
try:
getattr(self, "_handle_"+name)(rrkwargs, name, value,
ignoretz=ignoretz,
tzinfos=tzinfos)
except AttributeError:
raise ValueError, "unknown parameter '%s'" % name
except (KeyError, ValueError):
raise ValueError, "invalid '%s': %s" % (name, value)
return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
def _parse_rfc(self, s,
dtstart=None,
cache=False,
unfold=False,
forceset=False,
compatible=False,
ignoretz=False,
tzinfos=None):
global parser
if compatible:
forceset = True
unfold = True
s = s.upper()
if not s.strip():
raise ValueError, "empty string"
if unfold:
lines = s.splitlines()
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
else:
lines = s.split()
if (not forceset and len(lines) == 1 and
(s.find(':') == -1 or s.startswith('RRULE:'))):
return self._parse_rfc_rrule(lines[0], cache=cache,
dtstart=dtstart, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
rrulevals = []
rdatevals = []
exrulevals = []
exdatevals = []
for line in lines:
if not line:
continue
if line.find(':') == -1:
name = "RRULE"
value = line
else:
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError, "empty property name"
name = parms[0]
parms = parms[1:]
if name == "RRULE":
for parm in parms:
raise ValueError, "unsupported RRULE parm: "+parm
rrulevals.append(value)
elif name == "RDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError, "unsupported RDATE parm: "+parm
rdatevals.append(value)
elif name == "EXRULE":
for parm in parms:
raise ValueError, "unsupported EXRULE parm: "+parm
exrulevals.append(value)
elif name == "EXDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError, "unsupported RDATE parm: "+parm
exdatevals.append(value)
elif name == "DTSTART":
for parm in parms:
raise ValueError, "unsupported DTSTART parm: "+parm
if not parser:
from dateutil import parser
dtstart = parser.parse(value, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
raise ValueError, "unsupported property: "+name
if (forceset or len(rrulevals) > 1 or
rdatevals or exrulevals or exdatevals):
if not parser and (rdatevals or exdatevals):
from dateutil import parser
set = rruleset(cache=cache)
for value in rrulevals:
set.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in rdatevals:
for datestr in value.split(','):
set.rdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exrulevals:
set.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exdatevals:
for datestr in value.split(','):
set.exdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
if compatible and dtstart:
set.rdate(dtstart)
return set
else:
return self._parse_rfc_rrule(rrulevals[0],
dtstart=dtstart,
cache=cache,
ignoretz=ignoretz,
tzinfos=tzinfos)
def __call__(self, s, **kwargs):
return self._parse_rfc(s, **kwargs)
rrulestr = _rrulestr()
# vim:ts=4:sw=4:et
| 40,402 | Python | .py | 1,022 | 23.547945 | 78 | 0.453465 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,595 | tzwin.py | midgetspy_Sick-Beard/lib/dateutil/tzwin.py | # This code was originally contributed by Jeffrey Harris.
import datetime
import struct
import _winreg
__author__ = "Jeffrey Harris & Gustavo Niemeyer <[email protected]>"
__all__ = ["tzwin", "tzwinlocal"]
ONEWEEK = datetime.timedelta(7)
TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones"
TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
def _settzkeyname():
global TZKEYNAME
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
try:
_winreg.OpenKey(handle, TZKEYNAMENT).Close()
TZKEYNAME = TZKEYNAMENT
except WindowsError:
TZKEYNAME = TZKEYNAME9X
handle.Close()
_settzkeyname()
class tzwinbase(datetime.tzinfo):
"""tzinfo class based on win32's timezones available in the registry."""
def utcoffset(self, dt):
if self._isdst(dt):
return datetime.timedelta(minutes=self._dstoffset)
else:
return datetime.timedelta(minutes=self._stdoffset)
def dst(self, dt):
if self._isdst(dt):
minutes = self._dstoffset - self._stdoffset
return datetime.timedelta(minutes=minutes)
else:
return datetime.timedelta(0)
def tzname(self, dt):
if self._isdst(dt):
return self._dstname
else:
return self._stdname
def list():
"""Return a list of all time zones known to the system."""
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
tzkey = _winreg.OpenKey(handle, TZKEYNAME)
result = [_winreg.EnumKey(tzkey, i)
for i in range(_winreg.QueryInfoKey(tzkey)[0])]
tzkey.Close()
handle.Close()
return result
list = staticmethod(list)
def display(self):
return self._display
def _isdst(self, dt):
dston = picknthweekday(dt.year, self._dstmonth, self._dstdayofweek,
self._dsthour, self._dstminute,
self._dstweeknumber)
dstoff = picknthweekday(dt.year, self._stdmonth, self._stddayofweek,
self._stdhour, self._stdminute,
self._stdweeknumber)
if dston < dstoff:
return dston <= dt.replace(tzinfo=None) < dstoff
else:
return not dstoff <= dt.replace(tzinfo=None) < dston
class tzwin(tzwinbase):
def __init__(self, name):
self._name = name
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
tzkey = _winreg.OpenKey(handle, "%s\%s" % (TZKEYNAME, name))
keydict = valuestodict(tzkey)
tzkey.Close()
handle.Close()
self._stdname = keydict["Std"].encode("iso-8859-1")
self._dstname = keydict["Dlt"].encode("iso-8859-1")
self._display = keydict["Display"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=3l16h", keydict["TZI"])
self._stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1
self._dstoffset = self._stdoffset-tup[2] # + DaylightBias * -1
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[4:9]
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[12:17]
def __repr__(self):
return "tzwin(%s)" % repr(self._name)
def __reduce__(self):
return (self.__class__, (self._name,))
class tzwinlocal(tzwinbase):
def __init__(self):
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
tzlocalkey = _winreg.OpenKey(handle, TZLOCALKEYNAME)
keydict = valuestodict(tzlocalkey)
tzlocalkey.Close()
self._stdname = keydict["StandardName"].encode("iso-8859-1")
self._dstname = keydict["DaylightName"].encode("iso-8859-1")
try:
tzkey = _winreg.OpenKey(handle, "%s\%s"%(TZKEYNAME, self._stdname))
_keydict = valuestodict(tzkey)
self._display = _keydict["Display"]
tzkey.Close()
except OSError:
self._display = None
handle.Close()
self._stdoffset = -keydict["Bias"]-keydict["StandardBias"]
self._dstoffset = self._stdoffset-keydict["DaylightBias"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=8h", keydict["StandardStart"])
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[1:6]
tup = struct.unpack("=8h", keydict["DaylightStart"])
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[1:6]
def __reduce__(self):
return (self.__class__, ())
def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
"""dayofweek == 0 means Sunday, whichweek 5 means last instance"""
first = datetime.datetime(year, month, 1, hour, minute)
weekdayone = first.replace(day=((dayofweek-first.isoweekday())%7+1))
for n in xrange(whichweek):
dt = weekdayone+(whichweek-n)*ONEWEEK
if dt.month == month:
return dt
def valuestodict(key):
"""Convert a registry key's values to a dictionary."""
dict = {}
size = _winreg.QueryInfoKey(key)[1]
for i in range(size):
data = _winreg.EnumValue(key, i)
dict[data[0]] = data[1]
return dict
| 5,828 | Python | .py | 139 | 32.805755 | 79 | 0.613588 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,596 | parser.py | midgetspy_Sick-Beard/lib/dateutil/parser.py | # -*- coding:iso-8859-1 -*-
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <[email protected]>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <[email protected]>"
__license__ = "PSF License"
import datetime
import string
import time
import sys
import os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import relativedelta
import tz
__all__ = ["parse", "parserinfo"]
# Some pointers:
#
# http://www.cl.cam.ac.uk/~mgk25/iso-time.html
# http://www.iso.ch/iso/en/prods-services/popstds/datesandtime.html
# http://www.w3.org/TR/NOTE-datetime
# http://ringmaster.arc.nasa.gov/tools/time_formats.html
# http://search.cpan.org/author/MUIR/Time-modules-2003.0211/lib/Time/ParseDate.pm
# http://stein.cshl.org/jade/distrib/docs/java.text.SimpleDateFormat.html
class _timelex(object):
def __init__(self, instream):
if isinstance(instream, basestring):
instream = StringIO(instream)
self.instream = instream
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ_'
'ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
self.numchars = '0123456789'
self.whitespace = ' \t\r\n'
self.charstack = []
self.tokenstack = []
self.eof = False
def get_token(self):
if self.tokenstack:
return self.tokenstack.pop(0)
seenletters = False
token = None
state = None
wordchars = self.wordchars
numchars = self.numchars
whitespace = self.whitespace
while not self.eof:
if self.charstack:
nextchar = self.charstack.pop(0)
else:
nextchar = self.instream.read(1)
while nextchar == '\x00':
nextchar = self.instream.read(1)
if not nextchar:
self.eof = True
break
elif not state:
token = nextchar
if nextchar in wordchars:
state = 'a'
elif nextchar in numchars:
state = '0'
elif nextchar in whitespace:
token = ' '
break # emit token
else:
break # emit token
elif state == 'a':
seenletters = True
if nextchar in wordchars:
token += nextchar
elif nextchar == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0':
if nextchar in numchars:
token += nextchar
elif nextchar == '.':
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == 'a.':
seenletters = True
if nextchar == '.' or nextchar in wordchars:
token += nextchar
elif nextchar in numchars and token[-1] == '.':
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0.':
if nextchar == '.' or nextchar in numchars:
token += nextchar
elif nextchar in wordchars and token[-1] == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
if (state in ('a.', '0.') and
(seenletters or token.count('.') > 1 or token[-1] == '.')):
l = token.split('.')
token = l[0]
for tok in l[1:]:
self.tokenstack.append('.')
if tok:
self.tokenstack.append(tok)
return token
def __iter__(self):
return self
def next(self):
token = self.get_token()
if token is None:
raise StopIteration
return token
def split(cls, s):
return list(cls(s))
split = classmethod(split)
class _resultbase(object):
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def _repr(self, classname):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, `value`))
return "%s(%s)" % (classname, ", ".join(l))
def __repr__(self):
return self._repr(self.__class__.__name__)
class parserinfo(object):
# m from a.m/p.m, t from ISO T separator
JUMP = [" ", ".", ",", ";", "-", "/", "'",
"at", "on", "and", "ad", "m", "t", "of",
"st", "nd", "rd", "th"]
WEEKDAYS = [("Mon", "Monday"),
("Tue", "Tuesday"),
("Wed", "Wednesday"),
("Thu", "Thursday"),
("Fri", "Friday"),
("Sat", "Saturday"),
("Sun", "Sunday")]
MONTHS = [("Jan", "January"),
("Feb", "February"),
("Mar", "March"),
("Apr", "April"),
("May", "May"),
("Jun", "June"),
("Jul", "July"),
("Aug", "August"),
("Sep", "September"),
("Oct", "October"),
("Nov", "November"),
("Dec", "December")]
HMS = [("h", "hour", "hours"),
("m", "minute", "minutes"),
("s", "second", "seconds")]
AMPM = [("am", "a"),
("pm", "p")]
UTCZONE = ["UTC", "GMT", "Z"]
PERTAIN = ["of"]
TZOFFSET = {}
def __init__(self, dayfirst=False, yearfirst=False):
self._jump = self._convert(self.JUMP)
self._weekdays = self._convert(self.WEEKDAYS)
self._months = self._convert(self.MONTHS)
self._hms = self._convert(self.HMS)
self._ampm = self._convert(self.AMPM)
self._utczone = self._convert(self.UTCZONE)
self._pertain = self._convert(self.PERTAIN)
self.dayfirst = dayfirst
self.yearfirst = yearfirst
self._year = time.localtime().tm_year
self._century = self._year//100*100
def _convert(self, lst):
dct = {}
for i in range(len(lst)):
v = lst[i]
if isinstance(v, tuple):
for v in v:
dct[v.lower()] = i
else:
dct[v.lower()] = i
return dct
def jump(self, name):
return name.lower() in self._jump
def weekday(self, name):
if len(name) >= 3:
try:
return self._weekdays[name.lower()]
except KeyError:
pass
return None
def month(self, name):
if len(name) >= 3:
try:
return self._months[name.lower()]+1
except KeyError:
pass
return None
def hms(self, name):
try:
return self._hms[name.lower()]
except KeyError:
return None
def ampm(self, name):
try:
return self._ampm[name.lower()]
except KeyError:
return None
def pertain(self, name):
return name.lower() in self._pertain
def utczone(self, name):
return name.lower() in self._utczone
def tzoffset(self, name):
if name in self._utczone:
return 0
return self.TZOFFSET.get(name)
def convertyear(self, year):
if year < 100:
year += self._century
if abs(year-self._year) >= 50:
if year < self._year:
year += 100
else:
year -= 100
return year
def validate(self, res):
# move to info
if res.year is not None:
res.year = self.convertyear(res.year)
if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z':
res.tzname = "UTC"
res.tzoffset = 0
elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname):
res.tzoffset = 0
return True
class parser(object):
def __init__(self, info=None):
self.info = info or parserinfo()
def parse(self, timestr, default=None,
ignoretz=False, tzinfos=None,
**kwargs):
if not default:
default = datetime.datetime.now().replace(hour=0, minute=0,
second=0, microsecond=0)
res = self._parse(timestr, **kwargs)
if res is None:
raise ValueError, "unknown string format"
repl = {}
for attr in ["year", "month", "day", "hour",
"minute", "second", "microsecond"]:
value = getattr(res, attr)
if value is not None:
repl[attr] = value
ret = default.replace(**repl)
if res.weekday is not None and not res.day:
ret = ret+relativedelta.relativedelta(weekday=res.weekday)
if not ignoretz:
if callable(tzinfos) or tzinfos and res.tzname in tzinfos:
if callable(tzinfos):
tzdata = tzinfos(res.tzname, res.tzoffset)
else:
tzdata = tzinfos.get(res.tzname)
if isinstance(tzdata, datetime.tzinfo):
tzinfo = tzdata
elif isinstance(tzdata, basestring):
tzinfo = tz.tzstr(tzdata)
elif isinstance(tzdata, int):
tzinfo = tz.tzoffset(res.tzname, tzdata)
else:
raise ValueError, "offset must be tzinfo subclass, " \
"tz string, or int offset"
ret = ret.replace(tzinfo=tzinfo)
elif res.tzname and res.tzname in time.tzname:
ret = ret.replace(tzinfo=tz.tzlocal())
elif res.tzoffset == 0:
ret = ret.replace(tzinfo=tz.tzutc())
elif res.tzoffset:
ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
return ret
class _result(_resultbase):
__slots__ = ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond",
"tzname", "tzoffset"]
def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False):
info = self.info
if dayfirst is None:
dayfirst = info.dayfirst
if yearfirst is None:
yearfirst = info.yearfirst
res = self._result()
l = _timelex.split(timestr)
try:
# year/month/day list
ymd = []
# Index of the month string in ymd
mstridx = -1
len_l = len(l)
i = 0
while i < len_l:
# Check if it's a number
try:
value_repr = l[i]
value = float(value_repr)
except ValueError:
value = None
if value is not None:
# Token is a number
len_li = len(l[i])
i += 1
if (len(ymd) == 3 and len_li in (2, 4)
and (i >= len_l or (l[i] != ':' and
info.hms(l[i]) is None))):
# 19990101T23[59]
s = l[i-1]
res.hour = int(s[:2])
if len_li == 4:
res.minute = int(s[2:])
elif len_li == 6 or (len_li > 6 and l[i-1].find('.') == 6):
# YYMMDD or HHMMSS[.ss]
s = l[i-1]
if not ymd and l[i-1].find('.') == -1:
ymd.append(info.convertyear(int(s[:2])))
ymd.append(int(s[2:4]))
ymd.append(int(s[4:]))
else:
# 19990101T235959[.59]
res.hour = int(s[:2])
res.minute = int(s[2:4])
res.second, res.microsecond = _parsems(s[4:])
elif len_li == 8:
# YYYYMMDD
s = l[i-1]
ymd.append(int(s[:4]))
ymd.append(int(s[4:6]))
ymd.append(int(s[6:]))
elif len_li in (12, 14):
# YYYYMMDDhhmm[ss]
s = l[i-1]
ymd.append(int(s[:4]))
ymd.append(int(s[4:6]))
ymd.append(int(s[6:8]))
res.hour = int(s[8:10])
res.minute = int(s[10:12])
if len_li == 14:
res.second = int(s[12:])
elif ((i < len_l and info.hms(l[i]) is not None) or
(i+1 < len_l and l[i] == ' ' and
info.hms(l[i+1]) is not None)):
# HH[ ]h or MM[ ]m or SS[.ss][ ]s
if l[i] == ' ':
i += 1
idx = info.hms(l[i])
while True:
if idx == 0:
res.hour = int(value)
if value%1:
res.minute = int(60*(value%1))
elif idx == 1:
res.minute = int(value)
if value%1:
res.second = int(60*(value%1))
elif idx == 2:
res.second, res.microsecond = \
_parsems(value_repr)
i += 1
if i >= len_l or idx == 2:
break
# 12h00
try:
value_repr = l[i]
value = float(value_repr)
except ValueError:
break
else:
i += 1
idx += 1
if i < len_l:
newidx = info.hms(l[i])
if newidx is not None:
idx = newidx
elif i+1 < len_l and l[i] == ':':
# HH:MM[:SS[.ss]]
res.hour = int(value)
i += 1
value = float(l[i])
res.minute = int(value)
if value%1:
res.second = int(60*(value%1))
i += 1
if i < len_l and l[i] == ':':
res.second, res.microsecond = _parsems(l[i+1])
i += 2
elif i < len_l and l[i] in ('-', '/', '.'):
sep = l[i]
ymd.append(int(value))
i += 1
if i < len_l and not info.jump(l[i]):
try:
# 01-01[-01]
ymd.append(int(l[i]))
except ValueError:
# 01-Jan[-01]
value = info.month(l[i])
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd)-1
else:
return None
i += 1
if i < len_l and l[i] == sep:
# We have three members
i += 1
value = info.month(l[i])
if value is not None:
ymd.append(value)
mstridx = len(ymd)-1
assert mstridx == -1
else:
ymd.append(int(l[i]))
i += 1
elif i >= len_l or info.jump(l[i]):
if i+1 < len_l and info.ampm(l[i+1]) is not None:
# 12 am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i+1]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i+1]) == 0:
res.hour = 0
i += 1
else:
# Year, month or day
ymd.append(int(value))
i += 1
elif info.ampm(l[i]) is not None:
# 12am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i]) == 0:
res.hour = 0
i += 1
elif not fuzzy:
return None
else:
i += 1
continue
# Check weekday
value = info.weekday(l[i])
if value is not None:
res.weekday = value
i += 1
continue
# Check month name
value = info.month(l[i])
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd)-1
i += 1
if i < len_l:
if l[i] in ('-', '/'):
# Jan-01[-99]
sep = l[i]
i += 1
ymd.append(int(l[i]))
i += 1
if i < len_l and l[i] == sep:
# Jan-01-99
i += 1
ymd.append(int(l[i]))
i += 1
elif (i+3 < len_l and l[i] == l[i+2] == ' '
and info.pertain(l[i+1])):
# Jan of 01
# In this case, 01 is clearly year
try:
value = int(l[i+3])
except ValueError:
# Wrong guess
pass
else:
# Convert it here to become unambiguous
ymd.append(info.convertyear(value))
i += 4
continue
# Check am/pm
value = info.ampm(l[i])
if value is not None:
if value == 1 and res.hour < 12:
res.hour += 12
elif value == 0 and res.hour == 12:
res.hour = 0
i += 1
continue
# Check for a timezone name
if (res.hour is not None and len(l[i]) <= 5 and
res.tzname is None and res.tzoffset is None and
not [x for x in l[i] if x not in string.ascii_uppercase]):
res.tzname = l[i]
res.tzoffset = info.tzoffset(res.tzname)
i += 1
# Check for something like GMT+3, or BRST+3. Notice
# that it doesn't mean "I am 3 hours after GMT", but
# "my time +3 is GMT". If found, we reverse the
# logic so that timezone parsing code will get it
# right.
if i < len_l and l[i] in ('+', '-'):
l[i] = ('+', '-')[l[i] == '+']
res.tzoffset = None
if info.utczone(res.tzname):
# With something like GMT+3, the timezone
# is *not* GMT.
res.tzname = None
continue
# Check for a numbered timezone
if res.hour is not None and l[i] in ('+', '-'):
signal = (-1,1)[l[i] == '+']
i += 1
len_li = len(l[i])
if len_li == 4:
# -0300
res.tzoffset = int(l[i][:2])*3600+int(l[i][2:])*60
elif i+1 < len_l and l[i+1] == ':':
# -03:00
res.tzoffset = int(l[i])*3600+int(l[i+2])*60
i += 2
elif len_li <= 2:
# -[0]3
res.tzoffset = int(l[i][:2])*3600
else:
return None
i += 1
res.tzoffset *= signal
# Look for a timezone name between parenthesis
if (i+3 < len_l and
info.jump(l[i]) and l[i+1] == '(' and l[i+3] == ')' and
3 <= len(l[i+2]) <= 5 and
not [x for x in l[i+2]
if x not in string.ascii_uppercase]):
# -0300 (BRST)
res.tzname = l[i+2]
i += 4
continue
# Check jumps
if not (info.jump(l[i]) or fuzzy):
return None
i += 1
# Process year/month/day
len_ymd = len(ymd)
if len_ymd > 3:
# More than three members!?
return None
elif len_ymd == 1 or (mstridx != -1 and len_ymd == 2):
# One member, or two members with a month string
if mstridx != -1:
res.month = ymd[mstridx]
del ymd[mstridx]
if len_ymd > 1 or mstridx == -1:
if ymd[0] > 31:
res.year = ymd[0]
else:
res.day = ymd[0]
elif len_ymd == 2:
# Two members with numbers
if ymd[0] > 31:
# 99-01
res.year, res.month = ymd
elif ymd[1] > 31:
# 01-99
res.month, res.year = ymd
elif dayfirst and ymd[1] <= 12:
# 13-01
res.day, res.month = ymd
else:
# 01-13
res.month, res.day = ymd
if len_ymd == 3:
# Three members
if mstridx == 0:
res.month, res.day, res.year = ymd
elif mstridx == 1:
if ymd[0] > 31 or (yearfirst and ymd[2] <= 31):
# 99-Jan-01
res.year, res.month, res.day = ymd
else:
# 01-Jan-01
# Give precendence to day-first, since
# two-digit years is usually hand-written.
res.day, res.month, res.year = ymd
elif mstridx == 2:
# WTF!?
if ymd[1] > 31:
# 01-99-Jan
res.day, res.year, res.month = ymd
else:
# 99-01-Jan
res.year, res.day, res.month = ymd
else:
if ymd[0] > 31 or \
(yearfirst and ymd[1] <= 12 and ymd[2] <= 31):
# 99-01-01
res.year, res.month, res.day = ymd
elif ymd[0] > 12 or (dayfirst and ymd[1] <= 12):
# 13-01-01
res.day, res.month, res.year = ymd
else:
# 01-13-01
res.month, res.day, res.year = ymd
except (IndexError, ValueError, AssertionError):
return None
if not info.validate(res):
return None
return res
DEFAULTPARSER = parser()
def parse(timestr, parserinfo=None, **kwargs):
if parserinfo:
return parser(parserinfo).parse(timestr, **kwargs)
else:
return DEFAULTPARSER.parse(timestr, **kwargs)
class _tzparser(object):
class _result(_resultbase):
__slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset",
"start", "end"]
class _attr(_resultbase):
__slots__ = ["month", "week", "weekday",
"yday", "jyday", "day", "time"]
def __repr__(self):
return self._repr("")
def __init__(self):
_resultbase.__init__(self)
self.start = self._attr()
self.end = self._attr()
def parse(self, tzstr):
res = self._result()
l = _timelex.split(tzstr)
try:
len_l = len(l)
i = 0
while i < len_l:
# BRST+3[BRDT[+2]]
j = i
while j < len_l and not [x for x in l[j]
if x in "0123456789:,-+"]:
j += 1
if j != i:
if not res.stdabbr:
offattr = "stdoffset"
res.stdabbr = "".join(l[i:j])
else:
offattr = "dstoffset"
res.dstabbr = "".join(l[i:j])
i = j
if (i < len_l and
(l[i] in ('+', '-') or l[i][0] in "0123456789")):
if l[i] in ('+', '-'):
# Yes, that's right. See the TZ variable
# documentation.
signal = (1,-1)[l[i] == '+']
i += 1
else:
signal = -1
len_li = len(l[i])
if len_li == 4:
# -0300
setattr(res, offattr,
(int(l[i][:2])*3600+int(l[i][2:])*60)*signal)
elif i+1 < len_l and l[i+1] == ':':
# -03:00
setattr(res, offattr,
(int(l[i])*3600+int(l[i+2])*60)*signal)
i += 2
elif len_li <= 2:
# -[0]3
setattr(res, offattr,
int(l[i][:2])*3600*signal)
else:
return None
i += 1
if res.dstabbr:
break
else:
break
if i < len_l:
for j in range(i, len_l):
if l[j] == ';': l[j] = ','
assert l[i] == ','
i += 1
if i >= len_l:
pass
elif (8 <= l.count(',') <= 9 and
not [y for x in l[i:] if x != ','
for y in x if y not in "0123456789"]):
# GMT0BST,3,0,30,3600,10,0,26,7200[,3600]
for x in (res.start, res.end):
x.month = int(l[i])
i += 2
if l[i] == '-':
value = int(l[i+1])*-1
i += 1
else:
value = int(l[i])
i += 2
if value:
x.week = value
x.weekday = (int(l[i])-1)%7
else:
x.day = int(l[i])
i += 2
x.time = int(l[i])
i += 2
if i < len_l:
if l[i] in ('-','+'):
signal = (-1,1)[l[i] == "+"]
i += 1
else:
signal = 1
res.dstoffset = (res.stdoffset+int(l[i]))*signal
elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
not [y for x in l[i:] if x not in (',','/','J','M',
'.','-',':')
for y in x if y not in "0123456789"]):
for x in (res.start, res.end):
if l[i] == 'J':
# non-leap year day (1 based)
i += 1
x.jyday = int(l[i])
elif l[i] == 'M':
# month[-.]week[-.]weekday
i += 1
x.month = int(l[i])
i += 1
assert l[i] in ('-', '.')
i += 1
x.week = int(l[i])
if x.week == 5:
x.week = -1
i += 1
assert l[i] in ('-', '.')
i += 1
x.weekday = (int(l[i])-1)%7
else:
# year day (zero based)
x.yday = int(l[i])+1
i += 1
if i < len_l and l[i] == '/':
i += 1
# start time
len_li = len(l[i])
if len_li == 4:
# -0300
x.time = (int(l[i][:2])*3600+int(l[i][2:])*60)
elif i+1 < len_l and l[i+1] == ':':
# -03:00
x.time = int(l[i])*3600+int(l[i+2])*60
i += 2
if i+1 < len_l and l[i+1] == ':':
i += 2
x.time += int(l[i])
elif len_li <= 2:
# -[0]3
x.time = (int(l[i][:2])*3600)
else:
return None
i += 1
assert i == len_l or l[i] == ','
i += 1
assert i >= len_l
except (IndexError, ValueError, AssertionError):
return None
return res
DEFAULTTZPARSER = _tzparser()
def _parsetz(tzstr):
return DEFAULTTZPARSER.parse(tzstr)
def _parsems(value):
"""Parse a I[.F] seconds value into (seconds, microseconds)."""
if "." not in value:
return int(value), 0
else:
i, f = value.split(".")
return int(i), int(f.ljust(6, "0")[:6])
# vim:ts=4:sw=4:et
| 32,464 | Python | .py | 796 | 21.721106 | 81 | 0.367311 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,597 | tz.py | midgetspy_Sick-Beard/lib/dateutil/tz.py | """
Copyright (c) 2003-2007 Gustavo Niemeyer <[email protected]>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <[email protected]>"
__license__ = "PSF License"
import datetime
import struct
import time
import sys
import os
relativedelta = None
parser = None
rrule = None
__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
"tzstr", "tzical", "tzwin", "tzwinlocal", "gettz"]
try:
from dateutil.tzwin import tzwin, tzwinlocal
except (ImportError, OSError):
tzwin, tzwinlocal = None, None
ZERO = datetime.timedelta(0)
EPOCHORDINAL = datetime.datetime.utcfromtimestamp(0).toordinal()
class tzutc(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
def dst(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def __eq__(self, other):
return (isinstance(other, tzutc) or
(isinstance(other, tzoffset) and other._offset == ZERO))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzoffset(datetime.tzinfo):
def __init__(self, name, offset):
self._name = name
self._offset = datetime.timedelta(seconds=offset)
def utcoffset(self, dt):
return self._offset
def dst(self, dt):
return ZERO
def tzname(self, dt):
return self._name
def __eq__(self, other):
return (isinstance(other, tzoffset) and
self._offset == other._offset)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
`self._name`,
self._offset.days*86400+self._offset.seconds)
__reduce__ = object.__reduce__
class tzlocal(datetime.tzinfo):
_std_offset = datetime.timedelta(seconds=-time.timezone)
if time.daylight:
_dst_offset = datetime.timedelta(seconds=-time.altzone)
else:
_dst_offset = _std_offset
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
# We can't use mktime here. It is unstable when deciding if
# the hour near to a change is DST or not.
#
# timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
# dt.minute, dt.second, dt.weekday(), 0, -1))
# return time.localtime(timestamp).tm_isdst
#
# The code above yields the following result:
#
#>>> import tz, datetime
#>>> t = tz.tzlocal()
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRDT'
#>>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
#'BRST'
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRST'
#>>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
#'BRDT'
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRDT'
#
# Here is a more stable implementation:
#
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
return time.localtime(timestamp+time.timezone).tm_isdst
def __eq__(self, other):
if not isinstance(other, tzlocal):
return False
return (self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset)
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class _ttinfo(object):
__slots__ = ["offset", "delta", "isdst", "abbr", "isstd", "isgmt"]
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def __repr__(self):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, `value`))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
def __eq__(self, other):
if not isinstance(other, _ttinfo):
return False
return (self.offset == other.offset and
self.delta == other.delta and
self.isdst == other.isdst and
self.abbr == other.abbr and
self.isstd == other.isstd and
self.isgmt == other.isgmt)
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
state = {}
for name in self.__slots__:
state[name] = getattr(self, name, None)
return state
def __setstate__(self, state):
for name in self.__slots__:
if name in state:
setattr(self, name, state[name])
class tzfile(datetime.tzinfo):
# http://www.twinsun.com/tz/tz-link.htm
# ftp://elsie.nci.nih.gov/pub/tz*.tar.gz
def __init__(self, fileobj):
if isinstance(fileobj, basestring):
self._filename = fileobj
fileobj = open(fileobj)
elif hasattr(fileobj, "name"):
self._filename = fileobj.name
else:
self._filename = `fileobj`
# From tzfile(5):
#
# The time zone information files used by tzset(3)
# begin with the magic characters "TZif" to identify
# them as time zone information files, followed by
# sixteen bytes reserved for future use, followed by
# six four-byte values of type long, written in a
# ``standard'' byte order (the high-order byte
# of the value is written first).
if fileobj.read(4) != "TZif":
raise ValueError, "magic not found"
fileobj.read(16)
(
# The number of UTC/local indicators stored in the file.
ttisgmtcnt,
# The number of standard/wall indicators stored in the file.
ttisstdcnt,
# The number of leap seconds for which data is
# stored in the file.
leapcnt,
# The number of "transition times" for which data
# is stored in the file.
timecnt,
# The number of "local time types" for which data
# is stored in the file (must not be zero).
typecnt,
# The number of characters of "time zone
# abbreviation strings" stored in the file.
charcnt,
) = struct.unpack(">6l", fileobj.read(24))
# The above header is followed by tzh_timecnt four-byte
# values of type long, sorted in ascending order.
# These values are written in ``standard'' byte order.
# Each is used as a transition time (as returned by
# time(2)) at which the rules for computing local time
# change.
if timecnt:
self._trans_list = struct.unpack(">%dl" % timecnt,
fileobj.read(timecnt*4))
else:
self._trans_list = []
# Next come tzh_timecnt one-byte values of type unsigned
# char; each one tells which of the different types of
# ``local time'' types described in the file is associated
# with the same-indexed transition time. These values
# serve as indices into an array of ttinfo structures that
# appears next in the file.
if timecnt:
self._trans_idx = struct.unpack(">%dB" % timecnt,
fileobj.read(timecnt))
else:
self._trans_idx = []
# Each ttinfo structure is written as a four-byte value
# for tt_gmtoff of type long, in a standard byte
# order, followed by a one-byte value for tt_isdst
# and a one-byte value for tt_abbrind. In each
# structure, tt_gmtoff gives the number of
# seconds to be added to UTC, tt_isdst tells whether
# tm_isdst should be set by localtime(3), and
# tt_abbrind serves as an index into the array of
# time zone abbreviation characters that follow the
# ttinfo structure(s) in the file.
ttinfo = []
for i in range(typecnt):
ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
abbr = fileobj.read(charcnt)
# Then there are tzh_leapcnt pairs of four-byte
# values, written in standard byte order; the
# first value of each pair gives the time (as
# returned by time(2)) at which a leap second
# occurs; the second gives the total number of
# leap seconds to be applied after the given time.
# The pairs of values are sorted in ascending order
# by time.
# Not used, for now
if leapcnt:
leap = struct.unpack(">%dl" % (leapcnt*2),
fileobj.read(leapcnt*8))
# Then there are tzh_ttisstdcnt standard/wall
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as standard
# time or wall clock time, and are used when
# a time zone file is used in handling POSIX-style
# time zone environment variables.
if ttisstdcnt:
isstd = struct.unpack(">%db" % ttisstdcnt,
fileobj.read(ttisstdcnt))
# Finally, there are tzh_ttisgmtcnt UTC/local
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as UTC or
# local time, and are used when a time zone file
# is used in handling POSIX-style time zone envi-
# ronment variables.
if ttisgmtcnt:
isgmt = struct.unpack(">%db" % ttisgmtcnt,
fileobj.read(ttisgmtcnt))
# ** Everything has been read **
# Build ttinfo list
self._ttinfo_list = []
for i in range(typecnt):
gmtoff, isdst, abbrind = ttinfo[i]
# Round to full-minutes if that's not the case. Python's
# datetime doesn't accept sub-minute timezones. Check
# http://python.org/sf/1447945 for some information.
gmtoff = (gmtoff+30)//60*60
tti = _ttinfo()
tti.offset = gmtoff
tti.delta = datetime.timedelta(seconds=gmtoff)
tti.isdst = isdst
tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
self._ttinfo_list.append(tti)
# Replace ttinfo indexes for ttinfo objects.
trans_idx = []
for idx in self._trans_idx:
trans_idx.append(self._ttinfo_list[idx])
self._trans_idx = tuple(trans_idx)
# Set standard, dst, and before ttinfos. before will be
# used when a given time is before any transitions,
# and will be set to the first non-dst ttinfo, or to
# the first dst, if all of them are dst.
self._ttinfo_std = None
self._ttinfo_dst = None
self._ttinfo_before = None
if self._ttinfo_list:
if not self._trans_list:
self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
else:
for i in range(timecnt-1,-1,-1):
tti = self._trans_idx[i]
if not self._ttinfo_std and not tti.isdst:
self._ttinfo_std = tti
elif not self._ttinfo_dst and tti.isdst:
self._ttinfo_dst = tti
if self._ttinfo_std and self._ttinfo_dst:
break
else:
if self._ttinfo_dst and not self._ttinfo_std:
self._ttinfo_std = self._ttinfo_dst
for tti in self._ttinfo_list:
if not tti.isdst:
self._ttinfo_before = tti
break
else:
self._ttinfo_before = self._ttinfo_list[0]
# Now fix transition times to become relative to wall time.
#
# I'm not sure about this. In my tests, the tz source file
# is setup to wall time, and in the binary file isstd and
# isgmt are off, so it should be in wall time. OTOH, it's
# always in gmt time. Let me know if you have comments
# about this.
laststdoffset = 0
self._trans_list = list(self._trans_list)
for i in range(len(self._trans_list)):
tti = self._trans_idx[i]
if not tti.isdst:
# This is std time.
self._trans_list[i] += tti.offset
laststdoffset = tti.offset
else:
# This is dst time. Convert to std.
self._trans_list[i] += laststdoffset
self._trans_list = tuple(self._trans_list)
def _find_ttinfo(self, dt, laststd=0):
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
idx = 0
for trans in self._trans_list:
if timestamp < trans:
break
idx += 1
else:
return self._ttinfo_std
if idx == 0:
return self._ttinfo_before
if laststd:
while idx > 0:
tti = self._trans_idx[idx-1]
if not tti.isdst:
return tti
idx -= 1
else:
return self._ttinfo_std
else:
return self._trans_idx[idx-1]
def utcoffset(self, dt):
if not self._ttinfo_std:
return ZERO
return self._find_ttinfo(dt).delta
def dst(self, dt):
if not self._ttinfo_dst:
return ZERO
tti = self._find_ttinfo(dt)
if not tti.isdst:
return ZERO
# The documentation says that utcoffset()-dst() must
# be constant for every dt.
return tti.delta-self._find_ttinfo(dt, laststd=1).delta
# An alternative for that would be:
#
# return self._ttinfo_dst.offset-self._ttinfo_std.offset
#
# However, this class stores historical changes in the
# dst offset, so I belive that this wouldn't be the right
# way to implement this.
def tzname(self, dt):
if not self._ttinfo_std:
return None
return self._find_ttinfo(dt).abbr
def __eq__(self, other):
if not isinstance(other, tzfile):
return False
return (self._trans_list == other._trans_list and
self._trans_idx == other._trans_idx and
self._ttinfo_list == other._ttinfo_list)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, `self._filename`)
def __reduce__(self):
if not os.path.isfile(self._filename):
raise ValueError, "Unpickable %s class" % self.__class__.__name__
return (self.__class__, (self._filename,))
class tzrange(datetime.tzinfo):
def __init__(self, stdabbr, stdoffset=None,
dstabbr=None, dstoffset=None,
start=None, end=None):
global relativedelta
if not relativedelta:
from dateutil import relativedelta
self._std_abbr = stdabbr
self._dst_abbr = dstabbr
if stdoffset is not None:
self._std_offset = datetime.timedelta(seconds=stdoffset)
else:
self._std_offset = ZERO
if dstoffset is not None:
self._dst_offset = datetime.timedelta(seconds=dstoffset)
elif dstabbr and stdoffset is not None:
self._dst_offset = self._std_offset+datetime.timedelta(hours=+1)
else:
self._dst_offset = ZERO
if dstabbr and start is None:
self._start_delta = relativedelta.relativedelta(
hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
else:
self._start_delta = start
if dstabbr and end is None:
self._end_delta = relativedelta.relativedelta(
hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
else:
self._end_delta = end
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
def tzname(self, dt):
if self._isdst(dt):
return self._dst_abbr
else:
return self._std_abbr
def _isdst(self, dt):
if not self._start_delta:
return False
year = datetime.datetime(dt.year,1,1)
start = year+self._start_delta
end = year+self._end_delta
dt = dt.replace(tzinfo=None)
if start < end:
return dt >= start and dt < end
else:
return dt >= start or dt < end
def __eq__(self, other):
if not isinstance(other, tzrange):
return False
return (self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr and
self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._start_delta == other._start_delta and
self._end_delta == other._end_delta)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(...)" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzstr(tzrange):
def __init__(self, s):
global parser
if not parser:
from dateutil import parser
self._s = s
res = parser._parsetz(s)
if res is None:
raise ValueError, "unknown string format"
# Here we break the compatibility with the TZ variable handling.
# GMT-3 actually *means* the timezone -3.
if res.stdabbr in ("GMT", "UTC"):
res.stdoffset *= -1
# We must initialize it first, since _delta() needs
# _std_offset and _dst_offset set. Use False in start/end
# to avoid building it two times.
tzrange.__init__(self, res.stdabbr, res.stdoffset,
res.dstabbr, res.dstoffset,
start=False, end=False)
if not res.dstabbr:
self._start_delta = None
self._end_delta = None
else:
self._start_delta = self._delta(res.start)
if self._start_delta:
self._end_delta = self._delta(res.end, isend=1)
def _delta(self, x, isend=0):
kwargs = {}
if x.month is not None:
kwargs["month"] = x.month
if x.weekday is not None:
kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week)
if x.week > 0:
kwargs["day"] = 1
else:
kwargs["day"] = 31
elif x.day:
kwargs["day"] = x.day
elif x.yday is not None:
kwargs["yearday"] = x.yday
elif x.jyday is not None:
kwargs["nlyearday"] = x.jyday
if not kwargs:
# Default is to start on first sunday of april, and end
# on last sunday of october.
if not isend:
kwargs["month"] = 4
kwargs["day"] = 1
kwargs["weekday"] = relativedelta.SU(+1)
else:
kwargs["month"] = 10
kwargs["day"] = 31
kwargs["weekday"] = relativedelta.SU(-1)
if x.time is not None:
kwargs["seconds"] = x.time
else:
# Default is 2AM.
kwargs["seconds"] = 7200
if isend:
# Convert to standard time, to follow the documented way
# of working with the extra hour. See the documentation
# of the tzinfo class.
delta = self._dst_offset-self._std_offset
kwargs["seconds"] -= delta.seconds+delta.days*86400
return relativedelta.relativedelta(**kwargs)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, `self._s`)
class _tzicalvtzcomp:
def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
tzname=None, rrule=None):
self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
self.tzoffsetdiff = self.tzoffsetto-self.tzoffsetfrom
self.isdst = isdst
self.tzname = tzname
self.rrule = rrule
class _tzicalvtz(datetime.tzinfo):
def __init__(self, tzid, comps=[]):
self._tzid = tzid
self._comps = comps
self._cachedate = []
self._cachecomp = []
def _find_comp(self, dt):
if len(self._comps) == 1:
return self._comps[0]
dt = dt.replace(tzinfo=None)
try:
return self._cachecomp[self._cachedate.index(dt)]
except ValueError:
pass
lastcomp = None
lastcompdt = None
for comp in self._comps:
if not comp.isdst:
# Handle the extra hour in DST -> STD
compdt = comp.rrule.before(dt-comp.tzoffsetdiff, inc=True)
else:
compdt = comp.rrule.before(dt, inc=True)
if compdt and (not lastcompdt or lastcompdt < compdt):
lastcompdt = compdt
lastcomp = comp
if not lastcomp:
# RFC says nothing about what to do when a given
# time is before the first onset date. We'll look for the
# first standard component, or the first component, if
# none is found.
for comp in self._comps:
if not comp.isdst:
lastcomp = comp
break
else:
lastcomp = comp[0]
self._cachedate.insert(0, dt)
self._cachecomp.insert(0, lastcomp)
if len(self._cachedate) > 10:
self._cachedate.pop()
self._cachecomp.pop()
return lastcomp
def utcoffset(self, dt):
return self._find_comp(dt).tzoffsetto
def dst(self, dt):
comp = self._find_comp(dt)
if comp.isdst:
return comp.tzoffsetdiff
else:
return ZERO
def tzname(self, dt):
return self._find_comp(dt).tzname
def __repr__(self):
return "<tzicalvtz %s>" % `self._tzid`
__reduce__ = object.__reduce__
class tzical:
def __init__(self, fileobj):
global rrule
if not rrule:
from dateutil import rrule
if isinstance(fileobj, basestring):
self._s = fileobj
fileobj = open(fileobj)
elif hasattr(fileobj, "name"):
self._s = fileobj.name
else:
self._s = `fileobj`
self._vtz = {}
self._parse_rfc(fileobj.read())
def keys(self):
return self._vtz.keys()
def get(self, tzid=None):
if tzid is None:
keys = self._vtz.keys()
if len(keys) == 0:
raise ValueError, "no timezones defined"
elif len(keys) > 1:
raise ValueError, "more than one timezone available"
tzid = keys[0]
return self._vtz.get(tzid)
def _parse_offset(self, s):
s = s.strip()
if not s:
raise ValueError, "empty offset"
if s[0] in ('+', '-'):
signal = (-1,+1)[s[0]=='+']
s = s[1:]
else:
signal = +1
if len(s) == 4:
return (int(s[:2])*3600+int(s[2:])*60)*signal
elif len(s) == 6:
return (int(s[:2])*3600+int(s[2:4])*60+int(s[4:]))*signal
else:
raise ValueError, "invalid offset: "+s
def _parse_rfc(self, s):
lines = s.splitlines()
if not lines:
raise ValueError, "empty string"
# Unfold
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
tzid = None
comps = []
invtz = False
comptype = None
for line in lines:
if not line:
continue
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError, "empty property name"
name = parms[0].upper()
parms = parms[1:]
if invtz:
if name == "BEGIN":
if value in ("STANDARD", "DAYLIGHT"):
# Process component
pass
else:
raise ValueError, "unknown component: "+value
comptype = value
founddtstart = False
tzoffsetfrom = None
tzoffsetto = None
rrulelines = []
tzname = None
elif name == "END":
if value == "VTIMEZONE":
if comptype:
raise ValueError, \
"component not closed: "+comptype
if not tzid:
raise ValueError, \
"mandatory TZID not found"
if not comps:
raise ValueError, \
"at least one component is needed"
# Process vtimezone
self._vtz[tzid] = _tzicalvtz(tzid, comps)
invtz = False
elif value == comptype:
if not founddtstart:
raise ValueError, \
"mandatory DTSTART not found"
if tzoffsetfrom is None:
raise ValueError, \
"mandatory TZOFFSETFROM not found"
if tzoffsetto is None:
raise ValueError, \
"mandatory TZOFFSETFROM not found"
# Process component
rr = None
if rrulelines:
rr = rrule.rrulestr("\n".join(rrulelines),
compatible=True,
ignoretz=True,
cache=True)
comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto,
(comptype == "DAYLIGHT"),
tzname, rr)
comps.append(comp)
comptype = None
else:
raise ValueError, \
"invalid component end: "+value
elif comptype:
if name == "DTSTART":
rrulelines.append(line)
founddtstart = True
elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"):
rrulelines.append(line)
elif name == "TZOFFSETFROM":
if parms:
raise ValueError, \
"unsupported %s parm: %s "%(name, parms[0])
tzoffsetfrom = self._parse_offset(value)
elif name == "TZOFFSETTO":
if parms:
raise ValueError, \
"unsupported TZOFFSETTO parm: "+parms[0]
tzoffsetto = self._parse_offset(value)
elif name == "TZNAME":
if parms:
raise ValueError, \
"unsupported TZNAME parm: "+parms[0]
tzname = value
elif name == "COMMENT":
pass
else:
raise ValueError, "unsupported property: "+name
else:
if name == "TZID":
if parms:
raise ValueError, \
"unsupported TZID parm: "+parms[0]
tzid = value
elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
pass
else:
raise ValueError, "unsupported property: "+name
elif name == "BEGIN" and value == "VTIMEZONE":
tzid = None
comps = []
invtz = True
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, `self._s`)
if sys.platform != "win32":
TZFILES = ["/etc/localtime", "localtime"]
TZPATHS = ["/usr/share/zoneinfo", "/usr/lib/zoneinfo", "/etc/zoneinfo"]
else:
TZFILES = []
TZPATHS = []
def gettz(name=None):
tz = None
if not name:
try:
name = os.environ["TZ"]
except KeyError:
pass
if name is None or name == ":":
for filepath in TZFILES:
if not os.path.isabs(filepath):
filename = filepath
for path in TZPATHS:
filepath = os.path.join(path, filename)
if os.path.isfile(filepath):
break
else:
continue
if os.path.isfile(filepath):
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = tzlocal()
else:
if name.startswith(":"):
name = name[:-1]
if os.path.isabs(name):
if os.path.isfile(name):
tz = tzfile(name)
else:
tz = None
else:
for path in TZPATHS:
filepath = os.path.join(path, name)
if not os.path.isfile(filepath):
filepath = filepath.replace(' ','_')
if not os.path.isfile(filepath):
continue
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = None
if tzwin:
try:
tz = tzwin(name)
except OSError:
pass
if not tz:
from dateutil.zoneinfo import gettz
tz = gettz(name)
if not tz:
for c in name:
# name must have at least one offset to be a tzstr
if c in "0123456789":
try:
tz = tzstr(name)
except ValueError:
pass
break
else:
if name in ("GMT", "UTC"):
tz = tzutc()
elif name in time.tzname:
tz = tzlocal()
return tz
# vim:ts=4:sw=4:et
| 32,741 | Python | .py | 827 | 26.438936 | 77 | 0.507151 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,598 | __init__.py | midgetspy_Sick-Beard/lib/dateutil/zoneinfo/__init__.py | """
Copyright (c) 2003-2005 Gustavo Niemeyer <[email protected]>
This module offers extensions to the standard python 2.3+
datetime module.
"""
from dateutil.tz import tzfile
from tarfile import TarFile
import os
__author__ = "Gustavo Niemeyer <[email protected]>"
__license__ = "PSF License"
__all__ = ["setcachesize", "gettz", "rebuild"]
CACHE = []
CACHESIZE = 10
class tzfile(tzfile):
def __reduce__(self):
return (gettz, (self._filename,))
def getzoneinfofile():
filenames = os.listdir(os.path.join(os.path.dirname(__file__)))
filenames.sort()
filenames.reverse()
for entry in filenames:
if entry.startswith("zoneinfo") and ".tar." in entry:
return os.path.join(os.path.dirname(__file__), entry)
return None
ZONEINFOFILE = getzoneinfofile()
del getzoneinfofile
def setcachesize(size):
global CACHESIZE, CACHE
CACHESIZE = size
del CACHE[size:]
def gettz(name):
tzinfo = None
if ZONEINFOFILE:
for cachedname, tzinfo in CACHE:
if cachedname == name:
break
else:
tf = TarFile.open(ZONEINFOFILE)
try:
zonefile = tf.extractfile(name)
except KeyError:
tzinfo = None
else:
tzinfo = tzfile(zonefile)
tf.close()
CACHE.insert(0, (name, tzinfo))
del CACHE[CACHESIZE:]
return tzinfo
def rebuild(filename, tag=None, format="gz"):
import tempfile, shutil
tmpdir = tempfile.mkdtemp()
zonedir = os.path.join(tmpdir, "zoneinfo")
moduledir = os.path.dirname(__file__)
if tag: tag = "-"+tag
targetname = "zoneinfo%s.tar.%s" % (tag, format)
try:
tf = TarFile.open(filename)
for name in tf.getnames():
if not (name.endswith(".sh") or
name.endswith(".tab") or
name == "leapseconds"):
tf.extract(name, tmpdir)
filepath = os.path.join(tmpdir, name)
os.system("zic -d %s %s" % (zonedir, filepath))
tf.close()
target = os.path.join(moduledir, targetname)
for entry in os.listdir(moduledir):
if entry.startswith("zoneinfo") and ".tar." in entry:
os.unlink(os.path.join(moduledir, entry))
tf = TarFile.open(target, "w:%s" % format)
for entry in os.listdir(zonedir):
entrypath = os.path.join(zonedir, entry)
tf.add(entrypath, entry)
tf.close()
finally:
shutil.rmtree(tmpdir)
| 2,575 | Python | .py | 76 | 26.052632 | 67 | 0.600884 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,599 | __init__.py | midgetspy_Sick-Beard/lib/oauth2/__init__.py | """
The MIT License
Copyright (c) 2007 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import urllib
import time
import random
import urlparse
import hmac
import binascii
import lib.httplib2 as httplib2
try:
from urlparse import parse_qs, parse_qsl
except ImportError:
from cgi import parse_qs, parse_qsl
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occured.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s, safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {
'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret
}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None):
if method is not None:
self.method = method
if url is not None:
self.url = url
if parameters is not None:
self.update(parameters)
@setter
def url(self, value):
parts = urlparse.urlparse(value)
scheme, netloc, path = parts[:3]
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme != 'http' and scheme != 'https':
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
value = '%s://%s%s' % (scheme, netloc, path)
self.__dict__['url'] = value
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
return self.encode_postdata(self)
def encode_postdata(self, data):
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(data, True)
def to_url(self):
"""Serialize as a URL for a GET request."""
return '%s?%s' % (self.url, self.to_postdata())
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = [(k, v) for k, v in self.items() if k != 'oauth_signature']
encoded_str = urllib.urlencode(sorted(items), True)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
return Request(http_method, http_url, parameters)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class Server(object):
"""A skeletal implementation of a service provider, providing protected
resources to requests from authorized consumers.
This class implements the logic to check requests for authorization. You
can use it with your web server or web framework to protect certain
resources with OAuth.
"""
timestamp_threshold = 300 # In seconds, five minutes.
version = VERSION
signature_methods = None
def __init__(self, signature_methods=None):
self.signature_methods = signature_methods or {}
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.name] = signature_method
return self.signature_methods
def verify_request(self, request, consumer, token):
"""Verifies an api call and checks all the parameters."""
version = self._get_version(request)
self._check_signature(request, consumer, token)
parameters = request.get_nonoauth_parameters()
return parameters
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _get_version(self, request):
"""Verify the correct version request for this server."""
try:
version = request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise Error('OAuth version %s not supported.' % str(version))
return version
def _get_signature_method(self, request):
"""Figure out the signature with some defaults."""
try:
signature_method = request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise Error('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_verifier(self, request):
return request.get_parameter('oauth_verifier')
def _check_signature(self, request, consumer, token):
timestamp, nonce = request._get_timestamp_nonce()
self._check_timestamp(timestamp)
signature_method = self._get_signature_method(request)
try:
signature = request.get_parameter('oauth_signature')
except:
raise MissingSignature('Missing oauth_signature.')
# Validate the signature.
valid = signature_method.check(request, consumer, token, signature)
if not valid:
key, base = signature_method.signing_base(request, consumer, token)
raise Error('Invalid signature. Expected signature base '
'string: %s' % base)
built = signature_method.sign(request, consumer, token)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise Error('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold))
class Client(httplib2.Http):
"""OAuthClient is a worker to attempt to execute a request."""
def __init__(self, consumer, token=None, cache=None, timeout=None,
proxy_info=None):
if consumer is not None and not isinstance(consumer, Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, Token):
raise ValueError("Invalid token.")
self.consumer = consumer
self.token = token
self.method = SignatureMethod_HMAC_SHA1()
httplib2.Http.__init__(self, cache=cache, timeout=timeout,
proxy_info=proxy_info)
def set_signature_method(self, method):
if not isinstance(method, SignatureMethod):
raise ValueError("Invalid signature method.")
self.method = method
def request(self, uri, method="GET", body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None,
force_auth_header=False):
if not isinstance(headers, dict):
headers = {}
if body and method == "POST":
parameters = dict(parse_qsl(body))
elif method == "GET":
parsed = urlparse.urlparse(uri)
parameters = parse_qs(parsed.query)
else:
parameters = None
req = Request.from_consumer_and_token(self.consumer, token=self.token,
http_method=method, http_url=uri, parameters=parameters)
req.sign_request(self.method, self.consumer, self.token)
if force_auth_header:
# ensure we always send Authorization
headers.update(req.to_header())
if method == "POST":
if not force_auth_header:
body = req.to_postdata()
else:
body = req.encode_postdata(req.get_nonoauth_parameters())
headers['Content-Type'] = 'application/x-www-form-urlencoded'
elif method == "GET":
if not force_auth_header:
uri = req.to_url()
else:
if not force_auth_header:
# don't call update twice.
headers.update(req.to_header())
return httplib2.Http.request(self, uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=connection_type)
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
sig = (
escape(request.method),
escape(request.url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
# HMAC object.
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except ImportError:
import sha # Deprecated
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
| 23,525 | Python | .py | 534 | 35.166667 | 134 | 0.638441 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
Subsets and Splits