sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def combine_xml_points(l, units, handle_units):
"""Combine multiple Point tags into an array."""
ret = {}
for item in l:
for key, value in item.items():
ret.setdefault(key, []).append(value)
for key, value in ret.items():
if key != 'date':
ret[key] = handle_units(value, units.get(key, None))
return ret | Combine multiple Point tags into an array. | entailment |
def parse_xml_dataset(elem, handle_units):
"""Create a netCDF-like dataset from XML data."""
points, units = zip(*[parse_xml_point(p) for p in elem.findall('point')])
# Group points by the contents of each point
datasets = {}
for p in points:
datasets.setdefault(tuple(p), []).append(p)
all_units = combine_dicts(units)
return [combine_xml_points(d, all_units, handle_units) for d in datasets.values()] | Create a netCDF-like dataset from XML data. | entailment |
def parse_csv_response(data, unit_handler):
"""Handle CSV-formatted HTTP responses."""
return squish([parse_csv_dataset(d, unit_handler) for d in data.split(b'\n\n')]) | Handle CSV-formatted HTTP responses. | entailment |
def parse_csv_header(line):
"""Parse the CSV header returned by TDS."""
units = {}
names = []
for var in line.split(','):
start = var.find('[')
if start < 0:
names.append(str(var))
continue
else:
names.append(str(var[:start]))
end = var.find(']', start)
unitstr = var[start + 1:end]
eq = unitstr.find('=')
if eq >= 0:
# go past = and ", skip final "
units[names[-1]] = unitstr[eq + 2:-1]
return names, units | Parse the CSV header returned by TDS. | entailment |
def parse_csv_dataset(data, handle_units):
"""Parse CSV data into a netCDF-like dataset."""
fobj = BytesIO(data)
names, units = parse_csv_header(fobj.readline().decode('utf-8'))
arrs = np.genfromtxt(fobj, dtype=None, names=names, delimiter=',', unpack=True,
converters={'date': lambda s: parse_iso_date(s.decode('utf-8'))})
d = {}
for f in arrs.dtype.fields:
dat = arrs[f]
if dat.dtype == np.object:
dat = dat.tolist()
d[f] = handle_units(dat, units.get(f, None))
return d | Parse CSV data into a netCDF-like dataset. | entailment |
def validate_query(self, query):
"""Validate a query.
Determines whether `query` is well-formed. This includes checking for all
required parameters, as well as checking parameters for valid values.
Parameters
----------
query : NCSSQuery
The query to validate
Returns
-------
valid : bool
Whether `query` is valid.
"""
# Make sure all variables are in the dataset
return bool(query.var) and all(var in self.variables for var in query.var) | Validate a query.
Determines whether `query` is well-formed. This includes checking for all
required parameters, as well as checking parameters for valid values.
Parameters
----------
query : NCSSQuery
The query to validate
Returns
-------
valid : bool
Whether `query` is valid. | entailment |
def get_data(self, query):
"""Fetch parsed data from a THREDDS server using NCSS.
Requests data from the NCSS endpoint given the parameters in `query` and
handles parsing of the returned content based on the mimetype.
Parameters
----------
query : NCSSQuery
The parameters to send to the NCSS endpoint
Returns
-------
Parsed data response from the server. Exact format depends on the format of the
response.
See Also
--------
get_data_raw
"""
resp = self.get_query(query)
return response_handlers(resp, self.unit_handler) | Fetch parsed data from a THREDDS server using NCSS.
Requests data from the NCSS endpoint given the parameters in `query` and
handles parsing of the returned content based on the mimetype.
Parameters
----------
query : NCSSQuery
The parameters to send to the NCSS endpoint
Returns
-------
Parsed data response from the server. Exact format depends on the format of the
response.
See Also
--------
get_data_raw | entailment |
def projection_box(self, min_x, min_y, max_x, max_y):
"""Add a bounding box in projected (native) coordinates to the query.
This adds a request for a spatial bounding box, bounded by (`min_x`, `max_x`) for
x direction and (`min_y`, `max_y`) for the y direction. This modifies the query
in-place, but returns ``self`` so that multiple queries can be chained together
on one line.
This replaces any existing spatial queries that have been set.
Parameters
----------
min_x : float
The left edge of the bounding box
min_y : float
The bottom edge of the bounding box
max_x : float
The right edge of the bounding box
max_y: float
The top edge of the bounding box
Returns
-------
self : NCSSQuery
Returns self for chaining calls
"""
self._set_query(self.spatial_query, minx=min_x, miny=min_y,
maxx=max_x, maxy=max_y)
return self | Add a bounding box in projected (native) coordinates to the query.
This adds a request for a spatial bounding box, bounded by (`min_x`, `max_x`) for
x direction and (`min_y`, `max_y`) for the y direction. This modifies the query
in-place, but returns ``self`` so that multiple queries can be chained together
on one line.
This replaces any existing spatial queries that have been set.
Parameters
----------
min_x : float
The left edge of the bounding box
min_y : float
The bottom edge of the bounding box
max_x : float
The right edge of the bounding box
max_y: float
The top edge of the bounding box
Returns
-------
self : NCSSQuery
Returns self for chaining calls | entailment |
def strides(self, time=None, spatial=None):
"""Set time and/or spatial (horizontal) strides.
This is only used on grid requests. Used to skip points in the returned data.
This modifies the query in-place, but returns `self` so that multiple queries
can be chained together on one line.
Parameters
----------
time : int, optional
Stride for times returned. Defaults to None, which is equivalent to 1.
spatial : int, optional
Stride for horizontal grid. Defaults to None, which is equivalent to 1.
Returns
-------
self : NCSSQuery
Returns self for chaining calls
"""
if time:
self.add_query_parameter(timeStride=time)
if spatial:
self.add_query_parameter(horizStride=spatial)
return self | Set time and/or spatial (horizontal) strides.
This is only used on grid requests. Used to skip points in the returned data.
This modifies the query in-place, but returns `self` so that multiple queries
can be chained together on one line.
Parameters
----------
time : int, optional
Stride for times returned. Defaults to None, which is equivalent to 1.
spatial : int, optional
Stride for horizontal grid. Defaults to None, which is equivalent to 1.
Returns
-------
self : NCSSQuery
Returns self for chaining calls | entailment |
def register(self, mimetype):
"""Register a function to handle a particular mimetype."""
def dec(func):
self._reg[mimetype] = func
return func
return dec | Register a function to handle a particular mimetype. | entailment |
def handle_typed_values(val, type_name, value_type):
"""Translate typed values into the appropriate python object.
Takes an element name, value, and type and returns a list
with the string value(s) properly converted to a python type.
TypedValues are handled in ucar.ma2.DataType in netcdfJava
in the DataType enum. Possibilities are:
"boolean"
"byte"
"char"
"short"
"int"
"long"
"float"
"double"
"Sequence"
"String"
"Structure"
"enum1"
"enum2"
"enum4"
"opaque"
"object"
All of these are values written as strings in the xml, so simply
applying int, float to the values will work in most cases (i.e.
the TDS encodes them as string values properly).
Examle XML element:
<attribute name="scale_factor" type="double" value="0.0010000000474974513"/>
Parameters
----------
val : string
The string representation of the value attribute of the xml element
type_name : string
The string representation of the name attribute of the xml element
value_type : string
The string representation of the type attribute of the xml element
Returns
-------
val : list
A list containing the properly typed python values.
"""
if value_type in ['byte', 'short', 'int', 'long']:
try:
val = [int(v) for v in re.split('[ ,]', val) if v]
except ValueError:
log.warning('Cannot convert "%s" to int. Keeping type as str.', val)
elif value_type in ['float', 'double']:
try:
val = [float(v) for v in re.split('[ ,]', val) if v]
except ValueError:
log.warning('Cannot convert "%s" to float. Keeping type as str.', val)
elif value_type == 'boolean':
try:
# special case for boolean type
val = val.split()
# values must be either true or false
for potential_bool in val:
if potential_bool not in ['true', 'false']:
raise ValueError
val = [True if item == 'true' else False for item in val]
except ValueError:
msg = 'Cannot convert values %s to boolean.'
msg += ' Keeping type as str.'
log.warning(msg, val)
elif value_type == 'String':
# nothing special for String type
pass
else:
# possibilities - Sequence, Structure, enum, opaque, object,
# and char.
# Not sure how to handle these as I do not have an example
# of how they would show up in dataset.xml
log.warning('%s type %s not understood. Keeping as String.',
type_name, value_type)
if not isinstance(val, list):
val = [val]
return val | Translate typed values into the appropriate python object.
Takes an element name, value, and type and returns a list
with the string value(s) properly converted to a python type.
TypedValues are handled in ucar.ma2.DataType in netcdfJava
in the DataType enum. Possibilities are:
"boolean"
"byte"
"char"
"short"
"int"
"long"
"float"
"double"
"Sequence"
"String"
"Structure"
"enum1"
"enum2"
"enum4"
"opaque"
"object"
All of these are values written as strings in the xml, so simply
applying int, float to the values will work in most cases (i.e.
the TDS encodes them as string values properly).
Examle XML element:
<attribute name="scale_factor" type="double" value="0.0010000000474974513"/>
Parameters
----------
val : string
The string representation of the value attribute of the xml element
type_name : string
The string representation of the name attribute of the xml element
value_type : string
The string representation of the type attribute of the xml element
Returns
-------
val : list
A list containing the properly typed python values. | entailment |
def _get_data(self, time, site_id):
r"""Download and parse upper air observations from an online archive.
Parameters
----------
time : datetime
The date and time of the desired observation.
site_id : str
The three letter ICAO identifier of the station for which data should be
downloaded.
Returns
-------
:class:`pandas.DataFrame` containing the data
"""
raw_data = self._get_data_raw(time, site_id)
soup = BeautifulSoup(raw_data, 'html.parser')
tabular_data = StringIO(soup.find_all('pre')[0].contents[0])
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(tabular_data, skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
df['u_wind'], df['v_wind'] = get_wind_components(df['speed'],
np.deg2rad(df['direction']))
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
# Parse metadata
meta_data = soup.find_all('pre')[1].contents[0]
lines = meta_data.splitlines()
# If the station doesn't have a name identified we need to insert a
# record showing this for parsing to proceed.
if 'Station number' in lines[1]:
lines.insert(1, 'Station identifier: ')
station = lines[1].split(':')[1].strip()
station_number = int(lines[2].split(':')[1].strip())
sounding_time = datetime.strptime(lines[3].split(':')[1].strip(), '%y%m%d/%H%M')
latitude = float(lines[4].split(':')[1].strip())
longitude = float(lines[5].split(':')[1].strip())
elevation = float(lines[6].split(':')[1].strip())
df['station'] = station
df['station_number'] = station_number
df['time'] = sounding_time
df['latitude'] = latitude
df['longitude'] = longitude
df['elevation'] = elevation
# Add unit dictionary
df.units = {'pressure': 'hPa',
'height': 'meter',
'temperature': 'degC',
'dewpoint': 'degC',
'direction': 'degrees',
'speed': 'knot',
'u_wind': 'knot',
'v_wind': 'knot',
'station': None,
'station_number': None,
'time': None,
'latitude': 'degrees',
'longitude': 'degrees',
'elevation': 'meter'}
return df | r"""Download and parse upper air observations from an online archive.
Parameters
----------
time : datetime
The date and time of the desired observation.
site_id : str
The three letter ICAO identifier of the station for which data should be
downloaded.
Returns
-------
:class:`pandas.DataFrame` containing the data | entailment |
def _get_data_raw(self, time, site_id):
"""Download data from the University of Wyoming's upper air archive.
Parameters
----------
time : datetime
Date and time for which data should be downloaded
site_id : str
Site id for which data should be downloaded
Returns
-------
text of the server response
"""
path = ('?region=naconf&TYPE=TEXT%3ALIST'
'&YEAR={time:%Y}&MONTH={time:%m}&FROM={time:%d%H}&TO={time:%d%H}'
'&STNM={stid}').format(time=time, stid=site_id)
resp = self.get_path(path)
# See if the return is valid, but has no data
if resp.text.find('Can\'t') != -1:
raise ValueError(
'No data available for {time:%Y-%m-%d %HZ} '
'for station {stid}.'.format(time=time, stid=site_id))
return resp.text | Download data from the University of Wyoming's upper air archive.
Parameters
----------
time : datetime
Date and time for which data should be downloaded
site_id : str
Site id for which data should be downloaded
Returns
-------
text of the server response | entailment |
def read_ncstream_data(fobj):
"""Handle reading an NcStream v1 data block from a file-like object."""
data = read_proto_object(fobj, stream.Data)
if data.dataType in (stream.STRING, stream.OPAQUE) or data.vdata:
log.debug('Reading string/opaque/vlen')
num_obj = read_var_int(fobj)
log.debug('Num objects: %d', num_obj)
blocks = [read_block(fobj) for _ in range(num_obj)]
if data.dataType == stream.STRING:
blocks = [b.decode('utf-8', errors='ignore') for b in blocks]
# Again endian isn't coded properly
dt = data_type_to_numpy(data.dataType).newbyteorder('>')
if data.vdata:
return np.array([np.frombuffer(b, dtype=dt) for b in blocks])
else:
return np.array(blocks, dtype=dt)
elif data.dataType in _dtypeLookup:
log.debug('Reading array data')
bin_data = read_block(fobj)
log.debug('Binary data: %s', bin_data)
# Hard code to big endian for now since it's not encoded correctly
dt = data_type_to_numpy(data.dataType).newbyteorder('>')
# Handle decompressing the bytes
if data.compress == stream.DEFLATE:
bin_data = zlib.decompress(bin_data)
assert len(bin_data) == data.uncompressedSize
elif data.compress != stream.NONE:
raise NotImplementedError('Compression type {0} not implemented!'.format(
data.compress))
# Turn bytes into an array
return reshape_array(data, np.frombuffer(bin_data, dtype=dt))
elif data.dataType == stream.STRUCTURE:
sd = read_proto_object(fobj, stream.StructureData)
# Make a datatype appropriate to the rows of struct
endian = '>' if data.bigend else '<'
dt = np.dtype([(endian, np.void, sd.rowLength)])
# Turn bytes into an array
return reshape_array(data, np.frombuffer(sd.data, dtype=dt))
elif data.dataType == stream.SEQUENCE:
log.debug('Reading sequence')
blocks = []
magic = read_magic(fobj)
while magic != MAGIC_VEND:
if magic == MAGIC_VDATA:
log.error('Bad magic for struct/seq data!')
blocks.append(read_proto_object(fobj, stream.StructureData))
magic = read_magic(fobj)
return data, blocks
else:
raise NotImplementedError("Don't know how to handle data type: {0}".format(
data.dataType)) | Handle reading an NcStream v1 data block from a file-like object. | entailment |
def read_ncstream_err(fobj):
"""Handle reading an NcStream error from a file-like object and raise as error."""
err = read_proto_object(fobj, stream.Error)
raise RuntimeError(err.message) | Handle reading an NcStream error from a file-like object and raise as error. | entailment |
def read_messages(fobj, magic_table):
"""Read messages from a file-like object until stream is exhausted."""
messages = []
while True:
magic = read_magic(fobj)
if not magic:
break
func = magic_table.get(magic)
if func is not None:
messages.append(func(fobj))
else:
log.error('Unknown magic: ' + str(' '.join('{0:02x}'.format(b)
for b in bytearray(magic))))
return messages | Read messages from a file-like object until stream is exhausted. | entailment |
def read_proto_object(fobj, klass):
"""Read a block of data and parse using the given protobuf object."""
log.debug('%s chunk', klass.__name__)
obj = klass()
obj.ParseFromString(read_block(fobj))
log.debug('Header: %s', str(obj))
return obj | Read a block of data and parse using the given protobuf object. | entailment |
def read_block(fobj):
"""Read a block.
Reads a block from a file object by first reading the number of bytes to read, which must
be encoded as a variable-byte length integer.
Parameters
----------
fobj : file-like object
The file to read from.
Returns
-------
bytes
block of bytes read
"""
num = read_var_int(fobj)
log.debug('Next block: %d bytes', num)
return fobj.read(num) | Read a block.
Reads a block from a file object by first reading the number of bytes to read, which must
be encoded as a variable-byte length integer.
Parameters
----------
fobj : file-like object
The file to read from.
Returns
-------
bytes
block of bytes read | entailment |
def process_vlen(data_header, array):
"""Process vlen coming back from NCStream v2.
This takes the array of values and slices into an object array, with entries containing
the appropriate pieces of the original array. Sizes are controlled by the passed in
`data_header`.
Parameters
----------
data_header : Header
array : :class:`numpy.ndarray`
Returns
-------
ndarray
object array containing sub-sequences from the original primitive array
"""
source = iter(array)
return np.array([np.fromiter(itertools.islice(source, size), dtype=array.dtype)
for size in data_header.vlens]) | Process vlen coming back from NCStream v2.
This takes the array of values and slices into an object array, with entries containing
the appropriate pieces of the original array. Sizes are controlled by the passed in
`data_header`.
Parameters
----------
data_header : Header
array : :class:`numpy.ndarray`
Returns
-------
ndarray
object array containing sub-sequences from the original primitive array | entailment |
def datacol_to_array(datacol):
"""Convert DataCol from NCStream v2 into an array with appropriate type.
Depending on the data type specified, this extracts data from the appropriate members
and packs into a :class:`numpy.ndarray`, recursing as necessary for compound data types.
Parameters
----------
datacol : DataCol
Returns
-------
ndarray
array containing extracted data
"""
if datacol.dataType == stream.STRING:
arr = np.array(datacol.stringdata, dtype=np.object)
elif datacol.dataType == stream.OPAQUE:
arr = np.array(datacol.opaquedata, dtype=np.object)
elif datacol.dataType == stream.STRUCTURE:
members = OrderedDict((mem.name, datacol_to_array(mem))
for mem in datacol.structdata.memberData)
log.debug('Struct members:\n%s', str(members))
# str() around name necessary because protobuf gives unicode names, but dtype doesn't
# support them on Python 2
dt = np.dtype([(str(name), arr.dtype) for name, arr in members.items()])
log.debug('Struct dtype: %s', str(dt))
arr = np.empty((datacol.nelems,), dtype=dt)
for name, arr_data in members.items():
arr[name] = arr_data
else:
# Make an appropriate datatype
endian = '>' if datacol.bigend else '<'
dt = data_type_to_numpy(datacol.dataType).newbyteorder(endian)
# Turn bytes into an array
arr = np.frombuffer(datacol.primdata, dtype=dt)
if arr.size != datacol.nelems:
log.warning('Array size %d does not agree with nelems %d',
arr.size, datacol.nelems)
if datacol.isVlen:
arr = process_vlen(datacol, arr)
if arr.dtype == np.object_:
arr = reshape_array(datacol, arr)
else:
# In this case, the array collapsed, need different resize that
# correctly sizes from elements
shape = tuple(r.size for r in datacol.section.range) + (datacol.vlens[0],)
arr = arr.reshape(*shape)
else:
arr = reshape_array(datacol, arr)
return arr | Convert DataCol from NCStream v2 into an array with appropriate type.
Depending on the data type specified, this extracts data from the appropriate members
and packs into a :class:`numpy.ndarray`, recursing as necessary for compound data types.
Parameters
----------
datacol : DataCol
Returns
-------
ndarray
array containing extracted data | entailment |
def reshape_array(data_header, array):
"""Extract the appropriate array shape from the header.
Can handle taking a data header and either bytes containing data or a StructureData
instance, which will have binary data as well as some additional information.
Parameters
----------
array : :class:`numpy.ndarray`
data_header : Data
"""
shape = tuple(r.size for r in data_header.section.range)
if shape:
return array.reshape(*shape)
else:
return array | Extract the appropriate array shape from the header.
Can handle taking a data header and either bytes containing data or a StructureData
instance, which will have binary data as well as some additional information.
Parameters
----------
array : :class:`numpy.ndarray`
data_header : Data | entailment |
def data_type_to_numpy(datatype, unsigned=False):
"""Convert an ncstream datatype to a numpy one."""
basic_type = _dtypeLookup[datatype]
if datatype in (stream.STRING, stream.OPAQUE):
return np.dtype(basic_type)
if unsigned:
basic_type = basic_type.replace('i', 'u')
return np.dtype('=' + basic_type) | Convert an ncstream datatype to a numpy one. | entailment |
def struct_to_dtype(struct):
"""Convert a Structure specification to a numpy structured dtype."""
# str() around name necessary because protobuf gives unicode names, but dtype doesn't
# support them on Python 2
fields = [(str(var.name), data_type_to_numpy(var.dataType, var.unsigned))
for var in struct.vars]
for s in struct.structs:
fields.append((str(s.name), struct_to_dtype(s)))
log.debug('Structure fields: %s', fields)
dt = np.dtype(fields)
return dt | Convert a Structure specification to a numpy structured dtype. | entailment |
def unpack_variable(var):
"""Unpack an NCStream Variable into information we can use."""
# If we actually get a structure instance, handle turning that into a variable
if var.dataType == stream.STRUCTURE:
return None, struct_to_dtype(var), 'Structure'
elif var.dataType == stream.SEQUENCE:
log.warning('Sequence support not implemented!')
dt = data_type_to_numpy(var.dataType, var.unsigned)
if var.dataType == stream.OPAQUE:
type_name = 'opaque'
elif var.dataType == stream.STRING:
type_name = 'string'
else:
type_name = dt.name
if var.data:
log.debug('Storing variable data: %s %s', dt, var.data)
if var.dataType == stream.STRING:
data = var.data
else:
# Always sent big endian
data = np.frombuffer(var.data, dtype=dt.newbyteorder('>'))
else:
data = None
return data, dt, type_name | Unpack an NCStream Variable into information we can use. | entailment |
def unpack_attribute(att):
"""Unpack an embedded attribute into a python or numpy object."""
if att.unsigned:
log.warning('Unsupported unsigned attribute!')
# TDS 5.0 now has a dataType attribute that takes precedence
if att.len == 0: # Empty
val = None
elif att.dataType == stream.STRING: # Then look for new datatype string
val = att.sdata
elif att.dataType: # Then a non-zero new data type
val = np.frombuffer(att.data,
dtype='>' + _dtypeLookup[att.dataType], count=att.len)
elif att.type: # Then non-zero old-data type0
val = np.frombuffer(att.data,
dtype=_attrConverters[att.type], count=att.len)
elif att.sdata: # This leaves both 0, try old string
val = att.sdata
else: # Assume new datatype is Char (0)
val = np.array(att.data, dtype=_dtypeLookup[att.dataType])
if att.len == 1:
val = val[0]
return att.name, val | Unpack an embedded attribute into a python or numpy object. | entailment |
def read_var_int(file_obj):
"""Read a variable-length integer.
Parameters
----------
file_obj : file-like object
The file to read from.
Returns
-------
int
the variable-length value read
"""
# Read all bytes from here, stopping with the first one that does not have
# the MSB set. Save the lower 7 bits, and keep stacking to the *left*.
val = 0
shift = 0
while True:
# Read next byte
next_val = ord(file_obj.read(1))
val |= ((next_val & 0x7F) << shift)
shift += 7
if not next_val & 0x80:
break
return val | Read a variable-length integer.
Parameters
----------
file_obj : file-like object
The file to read from.
Returns
-------
int
the variable-length value read | entailment |
def fetch_data(self, **var):
"""Retrieve data from CDMRemote for one or more variables."""
varstr = ','.join(name + self._convert_indices(ind)
for name, ind in var.items())
query = self.query().add_query_parameter(req='data', var=varstr)
return self._fetch(query) | Retrieve data from CDMRemote for one or more variables. | entailment |
def query(self):
"""Generate a new query for CDMRemote.
This handles turning on compression if necessary.
Returns
-------
HTTPQuery
The created query.
"""
q = super(CDMRemote, self).query()
# Turn on compression if it's been set on the object
if self.deflate:
q.add_query_parameter(deflate=self.deflate)
return q | Generate a new query for CDMRemote.
This handles turning on compression if necessary.
Returns
-------
HTTPQuery
The created query. | entailment |
def check_token(func):
"""检查 access token 是否有效."""
@wraps(func)
def wrapper(*args, **kwargs):
response = func(*args, **kwargs)
if response.status_code == 401:
raise InvalidToken('Access token invalid or no longer valid')
else:
return response
return wrapper | 检查 access token 是否有效. | entailment |
def upload(self, remote_path, file_content, ondup=None, **kwargs):
"""上传单个文件(<2G).
| 百度PCS服务目前支持最大2G的单个文件上传。
| 如需支持超大文件(>2G)的断点续传,请参考下面的“分片文件上传”方法。
:param remote_path: 网盘中文件的保存路径(包含文件名)。
必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param file_content: 上传文件的内容/文件对象 。
(e.g. ``open('foobar', 'rb')`` )
:param ondup: (可选)
* 'overwrite':表示覆盖同名文件;
* 'newcopy':表示生成文件副本并进行重命名,命名规则为“
文件名_日期.后缀”。
:return: Response 对象
"""
params = {
'path': remote_path,
'ondup': ondup
}
files = {'file': ('file', file_content, '')}
url = 'https://c.pcs.baidu.com/rest/2.0/pcs/file'
return self._request('file', 'upload', url=url, extra_params=params,
files=files, **kwargs) | 上传单个文件(<2G).
| 百度PCS服务目前支持最大2G的单个文件上传。
| 如需支持超大文件(>2G)的断点续传,请参考下面的“分片文件上传”方法。
:param remote_path: 网盘中文件的保存路径(包含文件名)。
必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param file_content: 上传文件的内容/文件对象 。
(e.g. ``open('foobar', 'rb')`` )
:param ondup: (可选)
* 'overwrite':表示覆盖同名文件;
* 'newcopy':表示生成文件副本并进行重命名,命名规则为“
文件名_日期.后缀”。
:return: Response 对象 | entailment |
def upload_tmpfile(self, file_content, **kwargs):
"""分片上传—文件分片及上传.
百度 PCS 服务支持每次直接上传最大2G的单个文件。
如需支持上传超大文件(>2G),则可以通过组合调用分片文件上传的
``upload_tmpfile`` 方法和 ``upload_superfile`` 方法实现:
1. 首先,将超大文件分割为2G以内的单文件,并调用 ``upload_tmpfile``
将分片文件依次上传;
2. 其次,调用 ``upload_superfile`` ,完成分片文件的重组。
除此之外,如果应用中需要支持断点续传的功能,
也可以通过分片上传文件并调用 ``upload_superfile`` 接口的方式实现。
:param file_content: 上传文件的内容/文件对象
(e.g. ``open('foobar', 'rb')`` )
:return: Response 对象
"""
params = {
'type': 'tmpfile'
}
files = {'file': ('file', file_content, '')}
url = 'https://c.pcs.baidu.com/rest/2.0/pcs/file'
return self._request('file', 'upload', url=url, extra_params=params,
files=files, **kwargs) | 分片上传—文件分片及上传.
百度 PCS 服务支持每次直接上传最大2G的单个文件。
如需支持上传超大文件(>2G),则可以通过组合调用分片文件上传的
``upload_tmpfile`` 方法和 ``upload_superfile`` 方法实现:
1. 首先,将超大文件分割为2G以内的单文件,并调用 ``upload_tmpfile``
将分片文件依次上传;
2. 其次,调用 ``upload_superfile`` ,完成分片文件的重组。
除此之外,如果应用中需要支持断点续传的功能,
也可以通过分片上传文件并调用 ``upload_superfile`` 接口的方式实现。
:param file_content: 上传文件的内容/文件对象
(e.g. ``open('foobar', 'rb')`` )
:return: Response 对象 | entailment |
def upload_superfile(self, remote_path, block_list, ondup=None, **kwargs):
"""分片上传—合并分片文件.
与分片文件上传的 ``upload_tmpfile`` 方法配合使用,
可实现超大文件(>2G)上传,同时也可用于断点续传的场景。
:param remote_path: 网盘中文件的保存路径(包含文件名)。
必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param block_list: 子文件内容的 MD5 值列表;子文件至少两个,最多1024个。
:type block_list: list
:param ondup: (可选)
* 'overwrite':表示覆盖同名文件;
* 'newcopy':表示生成文件副本并进行重命名,命名规则为“
文件名_日期.后缀”。
:return: Response 对象
"""
params = {
'path': remote_path,
'ondup': ondup
}
data = {
'param': json.dumps({'block_list': block_list}),
}
return self._request('file', 'createsuperfile', extra_params=params,
data=data, **kwargs) | 分片上传—合并分片文件.
与分片文件上传的 ``upload_tmpfile`` 方法配合使用,
可实现超大文件(>2G)上传,同时也可用于断点续传的场景。
:param remote_path: 网盘中文件的保存路径(包含文件名)。
必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param block_list: 子文件内容的 MD5 值列表;子文件至少两个,最多1024个。
:type block_list: list
:param ondup: (可选)
* 'overwrite':表示覆盖同名文件;
* 'newcopy':表示生成文件副本并进行重命名,命名规则为“
文件名_日期.后缀”。
:return: Response 对象 | entailment |
def mkdir(self, remote_path, **kwargs):
"""为当前用户创建一个目录.
:param remote_path: 网盘中目录的路径,必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:return: Response 对象
"""
data = {
'path': remote_path
}
return self._request('file', 'mkdir', data=data, **kwargs) | 为当前用户创建一个目录.
:param remote_path: 网盘中目录的路径,必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:return: Response 对象 | entailment |
def meta(self, remote_path, **kwargs):
"""获取单个文件或目录的元信息.
:param remote_path: 网盘中文件/目录的路径,必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:return: Response 对象
"""
params = {
'path': remote_path
}
return self._request('file', 'meta', extra_params=params, **kwargs) | 获取单个文件或目录的元信息.
:param remote_path: 网盘中文件/目录的路径,必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:return: Response 对象 | entailment |
def list_files(self, remote_path, by=None, order=None,
limit=None, **kwargs):
"""获取目录下的文件列表.
:param remote_path: 网盘中目录的路径,必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param by: 排序字段,缺省根据文件类型排序:
* time(修改时间)
* name(文件名)
* size(大小,注意目录无大小)
:param order: “asc”或“desc”,缺省采用降序排序。
* asc(升序)
* desc(降序)
:param limit: 返回条目控制,参数格式为:n1-n2。
返回结果集的[n1, n2)之间的条目,缺省返回所有条目;
n1从0开始。
:return: Response 对象
"""
params = {
'path': remote_path,
'by': by,
'order': order,
'limit': limit
}
return self._request('file', 'list', extra_params=params, **kwargs) | 获取目录下的文件列表.
:param remote_path: 网盘中目录的路径,必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param by: 排序字段,缺省根据文件类型排序:
* time(修改时间)
* name(文件名)
* size(大小,注意目录无大小)
:param order: “asc”或“desc”,缺省采用降序排序。
* asc(升序)
* desc(降序)
:param limit: 返回条目控制,参数格式为:n1-n2。
返回结果集的[n1, n2)之间的条目,缺省返回所有条目;
n1从0开始。
:return: Response 对象 | entailment |
def move(self, from_path, to_path, **kwargs):
"""移动单个文件或目录.
:param from_path: 源文件/目录在网盘中的路径(包括文件名)。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param to_path: 目标文件/目录在网盘中的路径(包括文件名)。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:return: Response 对象
"""
data = {
'from': from_path,
'to': to_path,
}
return self._request('file', 'move', data=data, **kwargs) | 移动单个文件或目录.
:param from_path: 源文件/目录在网盘中的路径(包括文件名)。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param to_path: 目标文件/目录在网盘中的路径(包括文件名)。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:return: Response 对象 | entailment |
def multi_move(self, path_list, **kwargs):
"""批量移动文件或目录.
:param path_list: 源文件地址和目标文件地址对列表:
>>> path_list = [
... ('/apps/test_sdk/test.txt', # 源文件
... '/apps/test_sdk/testmkdir/b.txt' # 目标文件
... ),
... ('/apps/test_sdk/test.txt', # 源文件
... '/apps/test_sdk/testmkdir/b.txt' # 目标文件
... ),
... ]
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:type path_list: list
:return: Response 对象
"""
data = {
'param': json.dumps({
'list': [{'from': x[0], 'to': x[1]} for x in path_list]
}),
}
return self._request('file', 'move', data=data, **kwargs) | 批量移动文件或目录.
:param path_list: 源文件地址和目标文件地址对列表:
>>> path_list = [
... ('/apps/test_sdk/test.txt', # 源文件
... '/apps/test_sdk/testmkdir/b.txt' # 目标文件
... ),
... ('/apps/test_sdk/test.txt', # 源文件
... '/apps/test_sdk/testmkdir/b.txt' # 目标文件
... ),
... ]
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:type path_list: list
:return: Response 对象 | entailment |
def copy(self, from_path, to_path, **kwargs):
"""拷贝文件或目录.
:param from_path: 源文件/目录在网盘中的路径(包括文件名)。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param to_path: 目标文件/目录在网盘中的路径(包括文件名)。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:return: Response 对象
.. warning::
``move`` 操作后,源文件被移动至目标地址;
``copy`` 操作则会保留原文件。
"""
data = {
'from': from_path,
'to': to_path,
}
return self._request('file', 'copy', data=data, **kwargs) | 拷贝文件或目录.
:param from_path: 源文件/目录在网盘中的路径(包括文件名)。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param to_path: 目标文件/目录在网盘中的路径(包括文件名)。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:return: Response 对象
.. warning::
``move`` 操作后,源文件被移动至目标地址;
``copy`` 操作则会保留原文件。 | entailment |
def delete(self, remote_path, **kwargs):
"""删除单个文件或目录.
.. warning::
* 文件/目录删除后默认临时存放在回收站内,删除文件或目录的临时存放
不占用用户的空间配额;
* 存放有效期为10天,10天内可还原回原路径下,10天后则永久删除。
:param remote_path: 网盘中文件/目录的路径,路径必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:type remote_path: str
:return: Response 对象
"""
data = {
'path': remote_path
}
return self._request('file', 'delete', data=data, **kwargs) | 删除单个文件或目录.
.. warning::
* 文件/目录删除后默认临时存放在回收站内,删除文件或目录的临时存放
不占用用户的空间配额;
* 存放有效期为10天,10天内可还原回原路径下,10天后则永久删除。
:param remote_path: 网盘中文件/目录的路径,路径必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:type remote_path: str
:return: Response 对象 | entailment |
def multi_delete(self, path_list, **kwargs):
"""批量删除文件或目录.
.. warning::
* 文件/目录删除后默认临时存放在回收站内,删除文件或目录的临时存放
不占用用户的空间配额;
* 存放有效期为10天,10天内可还原回原路径下,10天后则永久删除。
:param path_list: 网盘中文件/目录的路径列表,路径必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:type path_list: list
:return: Response 对象
"""
data = {
'param': json.dumps({
'list': [{'path': path} for path in path_list]
}),
}
return self._request('file', 'delete', data=data, **kwargs) | 批量删除文件或目录.
.. warning::
* 文件/目录删除后默认临时存放在回收站内,删除文件或目录的临时存放
不占用用户的空间配额;
* 存放有效期为10天,10天内可还原回原路径下,10天后则永久删除。
:param path_list: 网盘中文件/目录的路径列表,路径必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:type path_list: list
:return: Response 对象 | entailment |
def search(self, remote_path, keyword, recurrent='0', **kwargs):
"""按文件名搜索文件(不支持查找目录).
:param remote_path: 需要检索的目录路径,路径必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:type remote_path: str
:param keyword: 关键词
:type keyword: str
:param recurrent: 是否递归。
* "0"表示不递归
* "1"表示递归
:type recurrent: str
:return: Response 对象
"""
params = {
'path': remote_path,
'wd': keyword,
're': recurrent,
}
return self._request('file', 'search', extra_params=params, **kwargs) | 按文件名搜索文件(不支持查找目录).
:param remote_path: 需要检索的目录路径,路径必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:type remote_path: str
:param keyword: 关键词
:type keyword: str
:param recurrent: 是否递归。
* "0"表示不递归
* "1"表示递归
:type recurrent: str
:return: Response 对象 | entailment |
def thumbnail(self, remote_path, height, width, quality=100, **kwargs):
"""获取指定图片文件的缩略图.
:param remote_path: 源图片的路径,路径必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param height: 指定缩略图的高度,取值范围为(0,1600]。
:type height: int
:param width: 指定缩略图的宽度,取值范围为(0,1600]。
:type width: int
:param quality: 缩略图的质量,默认为100,取值范围(0,100]。
:type quality: int
:return: Response 对象
.. warning::
有以下限制条件:
* 原图大小(0, 10M];
* 原图类型: jpg、jpeg、bmp、gif、png;
* 目标图类型:和原图的类型有关;例如:原图是gif图片,
则缩略后也为gif图片。
"""
params = {
'path': remote_path,
'height': height,
'width': width,
'quality': quality,
}
return self._request('thumbnail', 'generate', extra_params=params,
**kwargs) | 获取指定图片文件的缩略图.
:param remote_path: 源图片的路径,路径必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param height: 指定缩略图的高度,取值范围为(0,1600]。
:type height: int
:param width: 指定缩略图的宽度,取值范围为(0,1600]。
:type width: int
:param quality: 缩略图的质量,默认为100,取值范围(0,100]。
:type quality: int
:return: Response 对象
.. warning::
有以下限制条件:
* 原图大小(0, 10M];
* 原图类型: jpg、jpeg、bmp、gif、png;
* 目标图类型:和原图的类型有关;例如:原图是gif图片,
则缩略后也为gif图片。 | entailment |
def diff(self, cursor='null', **kwargs):
"""文件增量更新操作查询接口.
本接口有数秒延迟,但保证返回结果为最终一致.
:param cursor: 用于标记更新断点。
* 首次调用cursor=null;
* 非首次调用,使用最后一次调用diff接口的返回结果
中的cursor。
:type cursor: str
:return: Response 对象
"""
params = {
'cursor': cursor,
}
return self._request('file', 'diff', extra_params=params, **kwargs) | 文件增量更新操作查询接口.
本接口有数秒延迟,但保证返回结果为最终一致.
:param cursor: 用于标记更新断点。
* 首次调用cursor=null;
* 非首次调用,使用最后一次调用diff接口的返回结果
中的cursor。
:type cursor: str
:return: Response 对象 | entailment |
def video_convert(self, remote_path, video_type, **kwargs):
"""对视频文件进行转码,实现实时观看视频功能.
可下载支持 HLS/M3U8 的 `媒体云播放器 SDK <HLSSDK_>`__ 配合使用.
.. _HLSSDK:
http://developer.baidu.com/wiki/index.php?title=docs/cplat/media/sdk
:param remote_path: 需要下载的视频文件路径,以/开头的绝对路径,
需含源文件的文件名。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:type remote_path: str
:param video_type: 目前支持以下格式:
M3U8_320_240、M3U8_480_224、M3U8_480_360、
M3U8_640_480和M3U8_854_480
:type video_type: str
:return: Response 对象
.. warning::
目前这个接口支持的源文件格式如下:
+--------------------------+------------+--------------------------+
|格式名称 |扩展名 |备注 |
+==========================+============+==========================+
|Apple HTTP Live Streaming |m3u8/m3u |iOS支持的视频格式 |
+--------------------------+------------+--------------------------+
|ASF |asf |视频格式 |
+--------------------------+------------+--------------------------+
|AVI |avi |视频格式 |
+--------------------------+------------+--------------------------+
|Flash Video (FLV) |flv |Macromedia Flash视频格式 |
+--------------------------+------------+--------------------------+
|GIF Animation |gif |视频格式 |
+--------------------------+------------+--------------------------+
|Matroska |mkv |Matroska/WebM视频格式 |
+--------------------------+------------+--------------------------+
|MOV/QuickTime/MP4 |mov/mp4/m4a/|支持3GP、3GP2、PSP、iPod |
| |3gp/3g2/mj2 |之类视频格式 |
+--------------------------+------------+--------------------------+
|MPEG-PS (program stream) |mpeg |也就是VOB文件/SVCD/DVD格式|
+--------------------------+------------+--------------------------+
|MPEG-TS (transport stream)|ts | 即DVB传输流 |
+--------------------------+------------+--------------------------+
|RealMedia |rm/rmvb | Real视频格式 |
+--------------------------+------------+--------------------------+
|WebM |webm | Html视频格式 |
+--------------------------+------------+--------------------------+
"""
params = {
'path': remote_path,
'type': video_type,
}
return self._request('file', 'streaming', extra_params=params,
**kwargs) | 对视频文件进行转码,实现实时观看视频功能.
可下载支持 HLS/M3U8 的 `媒体云播放器 SDK <HLSSDK_>`__ 配合使用.
.. _HLSSDK:
http://developer.baidu.com/wiki/index.php?title=docs/cplat/media/sdk
:param remote_path: 需要下载的视频文件路径,以/开头的绝对路径,
需含源文件的文件名。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:type remote_path: str
:param video_type: 目前支持以下格式:
M3U8_320_240、M3U8_480_224、M3U8_480_360、
M3U8_640_480和M3U8_854_480
:type video_type: str
:return: Response 对象
.. warning::
目前这个接口支持的源文件格式如下:
+--------------------------+------------+--------------------------+
|格式名称 |扩展名 |备注 |
+==========================+============+==========================+
|Apple HTTP Live Streaming |m3u8/m3u |iOS支持的视频格式 |
+--------------------------+------------+--------------------------+
|ASF |asf |视频格式 |
+--------------------------+------------+--------------------------+
|AVI |avi |视频格式 |
+--------------------------+------------+--------------------------+
|Flash Video (FLV) |flv |Macromedia Flash视频格式 |
+--------------------------+------------+--------------------------+
|GIF Animation |gif |视频格式 |
+--------------------------+------------+--------------------------+
|Matroska |mkv |Matroska/WebM视频格式 |
+--------------------------+------------+--------------------------+
|MOV/QuickTime/MP4 |mov/mp4/m4a/|支持3GP、3GP2、PSP、iPod |
| |3gp/3g2/mj2 |之类视频格式 |
+--------------------------+------------+--------------------------+
|MPEG-PS (program stream) |mpeg |也就是VOB文件/SVCD/DVD格式|
+--------------------------+------------+--------------------------+
|MPEG-TS (transport stream)|ts | 即DVB传输流 |
+--------------------------+------------+--------------------------+
|RealMedia |rm/rmvb | Real视频格式 |
+--------------------------+------------+--------------------------+
|WebM |webm | Html视频格式 |
+--------------------------+------------+--------------------------+ | entailment |
def list_streams(self, file_type, start=0, limit=100,
filter_path=None, **kwargs):
"""以视频、音频、图片及文档四种类型的视图获取所创建应用程序下的
文件列表.
:param file_type: 类型分为video、audio、image及doc四种。
:param start: 返回条目控制起始值,缺省值为0。
:param limit: 返回条目控制长度,缺省为1000,可配置。
:param filter_path: 需要过滤的前缀路径,如:/apps/album
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:return: Response 对象
"""
params = {
'type': file_type,
'start': start,
'limit': limit,
'filter_path': filter_path,
}
return self._request('stream', 'list', extra_params=params,
**kwargs) | 以视频、音频、图片及文档四种类型的视图获取所创建应用程序下的
文件列表.
:param file_type: 类型分为video、audio、image及doc四种。
:param start: 返回条目控制起始值,缺省值为0。
:param limit: 返回条目控制长度,缺省为1000,可配置。
:param filter_path: 需要过滤的前缀路径,如:/apps/album
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:return: Response 对象 | entailment |
def download_stream(self, remote_path, **kwargs):
"""为当前用户下载一个流式文件.其参数和返回结果与下载单个文件的相同.
:param remote_path: 需要下载的文件路径,以/开头的绝对路径,含文件名。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:return: Response 对象
"""
params = {
'path': remote_path,
}
url = 'https://d.pcs.baidu.com/rest/2.0/pcs/file'
return self._request('stream', 'download', url=url,
extra_params=params, **kwargs) | 为当前用户下载一个流式文件.其参数和返回结果与下载单个文件的相同.
:param remote_path: 需要下载的文件路径,以/开头的绝对路径,含文件名。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:return: Response 对象 | entailment |
def rapid_upload(self, remote_path, content_length, content_md5,
content_crc32, slice_md5, ondup=None, **kwargs):
"""秒传一个文件.
.. warning::
* 被秒传文件必须大于256KB(即 256*1024 B)。
* 校验段为文件的前256KB,秒传接口需要提供校验段的MD5。
(非强一致接口,上传后请等待1秒后再读取)
:param remote_path: 上传文件的全路径名。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param content_length: 待秒传文件的长度。
:param content_md5: 待秒传文件的MD5。
:param content_crc32: 待秒传文件的CRC32。
:param slice_md5: 待秒传文件校验段的MD5。
:param ondup: (可选)
* 'overwrite':表示覆盖同名文件;
* 'newcopy':表示生成文件副本并进行重命名,命名规则为“
文件名_日期.后缀”。
:return: Response 对象
"""
data = {
'path': remote_path,
'content-length': content_length,
'content-md5': content_md5,
'content-crc32': content_crc32,
'slice-md5': slice_md5,
'ondup': ondup,
}
return self._request('file', 'rapidupload', data=data, **kwargs) | 秒传一个文件.
.. warning::
* 被秒传文件必须大于256KB(即 256*1024 B)。
* 校验段为文件的前256KB,秒传接口需要提供校验段的MD5。
(非强一致接口,上传后请等待1秒后再读取)
:param remote_path: 上传文件的全路径名。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param content_length: 待秒传文件的长度。
:param content_md5: 待秒传文件的MD5。
:param content_crc32: 待秒传文件的CRC32。
:param slice_md5: 待秒传文件校验段的MD5。
:param ondup: (可选)
* 'overwrite':表示覆盖同名文件;
* 'newcopy':表示生成文件副本并进行重命名,命名规则为“
文件名_日期.后缀”。
:return: Response 对象 | entailment |
def add_download_task(self, source_url, remote_path,
rate_limit=None, timeout=60 * 60,
expires=None, callback='', **kwargs):
"""添加离线下载任务,实现单个文件离线下载.
:param source_url: 源文件的URL。
:param remote_path: 下载后的文件保存路径。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param rate_limit: 下载限速,默认不限速。
:type rate_limit: int or long
:param timeout: 下载超时时间,默认3600秒。
:param expires: 请求失效时间,如果有,则会校验。
:type expires: int
:param callback: 下载完毕后的回调,默认为空。
:type callback: str
:return: Response 对象
"""
data = {
'source_url': source_url,
'save_path': remote_path,
'expires': expires,
'rate_limit': rate_limit,
'timeout': timeout,
'callback': callback,
}
return self._request('services/cloud_dl', 'add_task',
data=data, **kwargs) | 添加离线下载任务,实现单个文件离线下载.
:param source_url: 源文件的URL。
:param remote_path: 下载后的文件保存路径。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param rate_limit: 下载限速,默认不限速。
:type rate_limit: int or long
:param timeout: 下载超时时间,默认3600秒。
:param expires: 请求失效时间,如果有,则会校验。
:type expires: int
:param callback: 下载完毕后的回调,默认为空。
:type callback: str
:return: Response 对象 | entailment |
def query_download_tasks(self, task_ids, operate_type=1,
expires=None, **kwargs):
"""根据任务ID号,查询离线下载任务信息及进度信息。
:param task_ids: 要查询的任务ID列表
:type task_ids: list or tuple
:param operate_type:
* 0:查任务信息
* 1:查进度信息,默认为1
:param expires: 请求失效时间,如果有,则会校验。
:type expires: int
:return: Response 对象
"""
params = {
'task_ids': ','.join(map(str, task_ids)),
'op_type': operate_type,
'expires': expires,
}
return self._request('services/cloud_dl', 'query_task',
extra_params=params, **kwargs) | 根据任务ID号,查询离线下载任务信息及进度信息。
:param task_ids: 要查询的任务ID列表
:type task_ids: list or tuple
:param operate_type:
* 0:查任务信息
* 1:查进度信息,默认为1
:param expires: 请求失效时间,如果有,则会校验。
:type expires: int
:return: Response 对象 | entailment |
def list_download_tasks(self, need_task_info=1, start=0, limit=10, asc=0,
create_time=None, status=None, source_url=None,
remote_path=None, expires=None, **kwargs):
"""查询离线下载任务ID列表及任务信息.
:param need_task_info: 是否需要返回任务信息:
* 0:不需要
* 1:需要,默认为1
:param start: 查询任务起始位置,默认为0。
:param limit: 设定返回任务数量,默认为10。
:param asc:
* 0:降序,默认值
* 1:升序
:param create_time: 任务创建时间,默认为空。
:type create_time: int
:param status: 任务状态,默认为空。
0:下载成功,1:下载进行中 2:系统错误,3:资源不存在,
4:下载超时,5:资源存在但下载失败, 6:存储空间不足,
7:目标地址数据已存在, 8:任务取消.
:type status: int
:param source_url: 源地址URL,默认为空。
:param remote_path: 文件保存路径,默认为空。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param expires: 请求失效时间,如果有,则会校验。
:type expires: int
:return: Response 对象
"""
data = {
'expires': expires,
'start': start,
'limit': limit,
'asc': asc,
'source_url': source_url,
'save_path': remote_path,
'create_time': create_time,
'status': status,
'need_task_info': need_task_info,
}
return self._request('services/cloud_dl', 'list_task',
data=data, **kwargs) | 查询离线下载任务ID列表及任务信息.
:param need_task_info: 是否需要返回任务信息:
* 0:不需要
* 1:需要,默认为1
:param start: 查询任务起始位置,默认为0。
:param limit: 设定返回任务数量,默认为10。
:param asc:
* 0:降序,默认值
* 1:升序
:param create_time: 任务创建时间,默认为空。
:type create_time: int
:param status: 任务状态,默认为空。
0:下载成功,1:下载进行中 2:系统错误,3:资源不存在,
4:下载超时,5:资源存在但下载失败, 6:存储空间不足,
7:目标地址数据已存在, 8:任务取消.
:type status: int
:param source_url: 源地址URL,默认为空。
:param remote_path: 文件保存路径,默认为空。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param expires: 请求失效时间,如果有,则会校验。
:type expires: int
:return: Response 对象 | entailment |
def cancel_download_task(self, task_id, expires=None, **kwargs):
"""取消离线下载任务.
:param task_id: 要取消的任务ID号。
:type task_id: str
:param expires: 请求失效时间,如果有,则会校验。
:type expires: int
:return: Response 对象
"""
data = {
'expires': expires,
'task_id': task_id,
}
return self._request('services/cloud_dl', 'cancle_task',
data=data, **kwargs) | 取消离线下载任务.
:param task_id: 要取消的任务ID号。
:type task_id: str
:param expires: 请求失效时间,如果有,则会校验。
:type expires: int
:return: Response 对象 | entailment |
def list_recycle_bin(self, start=0, limit=1000, **kwargs):
"""获取回收站中的文件及目录列表.
:param start: 返回条目的起始值,缺省值为0
:param limit: 返回条目的长度,缺省值为1000
:return: Response 对象
"""
params = {
'start': start,
'limit': limit,
}
return self._request('file', 'listrecycle',
extra_params=params, **kwargs) | 获取回收站中的文件及目录列表.
:param start: 返回条目的起始值,缺省值为0
:param limit: 返回条目的长度,缺省值为1000
:return: Response 对象 | entailment |
def restore_recycle_bin(self, fs_id, **kwargs):
"""还原单个文件或目录(非强一致接口,调用后请sleep 1秒读取).
:param fs_id: 所还原的文件或目录在PCS的临时唯一标识ID。
:type fs_id: str
:return: Response 对象
"""
data = {
'fs_id': fs_id,
}
return self._request('file', 'restore', data=data, **kwargs) | 还原单个文件或目录(非强一致接口,调用后请sleep 1秒读取).
:param fs_id: 所还原的文件或目录在PCS的临时唯一标识ID。
:type fs_id: str
:return: Response 对象 | entailment |
def multi_restore_recycle_bin(self, fs_ids, **kwargs):
"""批量还原文件或目录(非强一致接口,调用后请sleep1秒 ).
:param fs_ids: 所还原的文件或目录在 PCS 的临时唯一标识 ID 的列表。
:type fs_ids: list or tuple
:return: Response 对象
"""
data = {
'param': json.dumps({
'list': [{'fs_id': fs_id} for fs_id in fs_ids]
}),
}
return self._request('file', 'restore', data=data, **kwargs) | 批量还原文件或目录(非强一致接口,调用后请sleep1秒 ).
:param fs_ids: 所还原的文件或目录在 PCS 的临时唯一标识 ID 的列表。
:type fs_ids: list or tuple
:return: Response 对象 | entailment |
def get_new_access_token(refresh_token, client_id, client_secret,
scope=None, **kwargs):
"""使用 Refresh Token 刷新以获得新的 Access Token.
:param refresh_token: 用于刷新 Access Token 用的 Refresh Token;
:param client_id: 应用的 API Key;
:param client_secret: 应用的 Secret Key;
:param scope: 以空格分隔的权限列表,若不传递此参数,代表请求的数据访问
操作权限与上次获取 Access Token 时一致。通过 Refresh Token
刷新 Access Token 时所要求的 scope 权限范围必须小于等于上次
获取 Access Token 时授予的权限范围。 关于权限的具体信息请参考
“ `权限列表`__ ”。
:return: Response 对象
关于 ``response.json()`` 字典的内容所代表的含义,
请参考 `相关的百度帮助文档`__ 。
__ http://developer.baidu.com/wiki/index.php?title=docs/oauth/baiduoauth/list
__ http://developer.baidu.com/wiki/index.php?title=docs/oauth/refresh
"""
data = {
'grant_type': 'refresh_token',
'refresh_token': refresh_token,
'client_id': client_id,
'client_secret': client_secret,
}
if scope:
data['scope'] = scope
url = 'https://openapi.baidu.com/oauth/2.0/token'
return requests.post(url, data=data) | 使用 Refresh Token 刷新以获得新的 Access Token.
:param refresh_token: 用于刷新 Access Token 用的 Refresh Token;
:param client_id: 应用的 API Key;
:param client_secret: 应用的 Secret Key;
:param scope: 以空格分隔的权限列表,若不传递此参数,代表请求的数据访问
操作权限与上次获取 Access Token 时一致。通过 Refresh Token
刷新 Access Token 时所要求的 scope 权限范围必须小于等于上次
获取 Access Token 时授予的权限范围。 关于权限的具体信息请参考
“ `权限列表`__ ”。
:return: Response 对象
关于 ``response.json()`` 字典的内容所代表的含义,
请参考 `相关的百度帮助文档`__ 。
__ http://developer.baidu.com/wiki/index.php?title=docs/oauth/baiduoauth/list
__ http://developer.baidu.com/wiki/index.php?title=docs/oauth/refresh | entailment |
def login(self):
""" Login to verisure app api
Login before calling any read or write commands
"""
if os.path.exists(self._cookieFileName):
with open(self._cookieFileName, 'r') as cookieFile:
self._vid = cookieFile.read().strip()
try:
self._get_installations()
except ResponseError:
self._vid = None
os.remove(self._cookieFileName)
if self._vid is None:
self._create_cookie()
with open(self._cookieFileName, 'w') as cookieFile:
cookieFile.write(self._vid)
self._get_installations()
self._giid = self.installations[0]['giid'] | Login to verisure app api
Login before calling any read or write commands | entailment |
def _get_installations(self):
""" Get information about installations """
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.get(
urls.get_installations(self._username),
headers={
'Cookie': 'vid={}'.format(self._vid),
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
self.installations = json.loads(response.text) | Get information about installations | entailment |
def get_overview(self):
""" Get overview for installation """
response = None
try:
response = requests.get(
urls.overview(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text) | Get overview for installation | entailment |
def set_smartplug_state(self, device_label, state):
""" Turn on or off smartplug
Args:
device_label (str): Smartplug device label
state (boolean): new status, 'True' or 'False'
"""
response = None
try:
response = requests.post(
urls.smartplug(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps([{
"deviceLabel": device_label,
"state": state}]))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response) | Turn on or off smartplug
Args:
device_label (str): Smartplug device label
state (boolean): new status, 'True' or 'False' | entailment |
def get_history(self, filters=(), pagesize=15, offset=0):
""" Get recent events
Args:
filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION',
'TECHNICAL', 'SOS', 'WARNING', 'LOCK',
'UNLOCK'
pagesize (int): Number of events to display
offset (int): Skip pagesize * offset first events
"""
response = None
try:
response = requests.get(
urls.history(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"offset": int(offset),
"pagesize": int(pagesize),
"notificationCategories": filters})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text) | Get recent events
Args:
filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION',
'TECHNICAL', 'SOS', 'WARNING', 'LOCK',
'UNLOCK'
pagesize (int): Number of events to display
offset (int): Skip pagesize * offset first events | entailment |
def get_climate(self, device_label):
""" Get climate history
Args:
device_label: device label of climate device
"""
response = None
try:
response = requests.get(
urls.climate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"deviceLabel": device_label})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text) | Get climate history
Args:
device_label: device label of climate device | entailment |
def set_lock_state(self, code, device_label, state):
""" Lock or unlock
Args:
code (str): Lock code
device_label (str): device label of lock
state (str): 'lock' or 'unlock'
"""
response = None
try:
response = requests.put(
urls.set_lockstate(self._giid, device_label, state),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code)}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text) | Lock or unlock
Args:
code (str): Lock code
device_label (str): device label of lock
state (str): 'lock' or 'unlock' | entailment |
def get_lock_state_transaction(self, transaction_id):
""" Get lock state transaction status
Args:
transaction_id: Transaction ID received from set_lock_state
"""
response = None
try:
response = requests.get(
urls.get_lockstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text) | Get lock state transaction status
Args:
transaction_id: Transaction ID received from set_lock_state | entailment |
def get_lock_config(self, device_label):
""" Get lock configuration
Args:
device_label (str): device label of lock
"""
response = None
try:
response = requests.get(
urls.lockconfig(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text) | Get lock configuration
Args:
device_label (str): device label of lock | entailment |
def set_lock_config(self, device_label, volume=None, voice_level=None,
auto_lock_enabled=None):
""" Set lock configuration
Args:
device_label (str): device label of lock
volume (str): 'SILENCE', 'LOW' or 'HIGH'
voice_level (str): 'ESSENTIAL' or 'NORMAL'
auto_lock_enabled (boolean): auto lock enabled
"""
response = None
data = {}
if volume:
data['volume'] = volume
if voice_level:
data['voiceLevel'] = voice_level
if auto_lock_enabled is not None:
data['autoLockEnabled'] = auto_lock_enabled
try:
response = requests.put(
urls.lockconfig(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps(data))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response) | Set lock configuration
Args:
device_label (str): device label of lock
volume (str): 'SILENCE', 'LOW' or 'HIGH'
voice_level (str): 'ESSENTIAL' or 'NORMAL'
auto_lock_enabled (boolean): auto lock enabled | entailment |
def capture_image(self, device_label):
""" Capture smartcam image
Args:
device_label (str): device label of camera
"""
response = None
try:
response = requests.post(
urls.imagecapture(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response) | Capture smartcam image
Args:
device_label (str): device label of camera | entailment |
def get_camera_imageseries(self, number_of_imageseries=10, offset=0):
""" Get smartcam image series
Args:
number_of_imageseries (int): number of image series to get
offset (int): skip offset amount of image series
"""
response = None
try:
response = requests.get(
urls.get_imageseries(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"numberOfImageSeries": int(number_of_imageseries),
"offset": int(offset),
"fromDate": "",
"toDate": "",
"onlyNotViewed": "",
"_": self._giid})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text) | Get smartcam image series
Args:
number_of_imageseries (int): number of image series to get
offset (int): skip offset amount of image series | entailment |
def download_image(self, device_label, image_id, file_name):
""" Download image taken by a smartcam
Args:
device_label (str): device label of camera
image_id (str): image id from image series
file_name (str): path to file
"""
response = None
try:
response = requests.get(
urls.download_image(self._giid, device_label, image_id),
headers={
'Cookie': 'vid={}'.format(self._vid)},
stream=True)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
with open(file_name, 'wb') as image_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
image_file.write(chunk) | Download image taken by a smartcam
Args:
device_label (str): device label of camera
image_id (str): image id from image series
file_name (str): path to file | entailment |
def logout(self):
""" Logout and remove vid """
response = None
try:
response = requests.delete(
urls.login(),
headers={
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response) | Logout and remove vid | entailment |
def set_heat_pump_mode(self, device_label, mode):
""" Set heatpump mode
Args:
mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'mode': mode}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text) | Set heatpump mode
Args:
mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO' | entailment |
def set_heat_pump_feature(self, device_label, feature):
""" Set heatpump mode
Args:
feature: 'QUIET', 'ECONAVI', or 'POWERFUL'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_feature(self._giid, device_label, feature),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text) | Set heatpump mode
Args:
feature: 'QUIET', 'ECONAVI', or 'POWERFUL' | entailment |
def print_result(overview, *names):
""" Print the result of a verisure request """
if names:
for name in names:
toprint = overview
for part in name.split('/'):
toprint = toprint[part]
print(json.dumps(toprint, indent=4, separators=(',', ': ')))
else:
print(json.dumps(overview, indent=4, separators=(',', ': '))) | Print the result of a verisure request | entailment |
def main():
""" Start verisure command line """
parser = argparse.ArgumentParser(
description='Read or change status of verisure devices')
parser.add_argument(
'username',
help='MyPages username')
parser.add_argument(
'password',
help='MyPages password')
parser.add_argument(
'-i', '--installation',
help='Installation number',
type=int,
default=1)
parser.add_argument(
'-c', '--cookie',
help='File to store cookie in',
default='~/.verisure-cookie')
commandsparser = parser.add_subparsers(
help='commands',
dest='command')
# installations command
commandsparser.add_parser(
COMMAND_INSTALLATIONS,
help='Get information about installations')
# overview command
overview_parser = commandsparser.add_parser(
COMMAND_OVERVIEW,
help='Read status of one or many device types')
overview_parser.add_argument(
'filter',
nargs='*',
help='Read status for device type')
# armstate command
commandsparser.add_parser(
COMMAND_ARMSTATE,
help='Get arm state')
# Set command
set_parser = commandsparser.add_parser(
COMMAND_SET,
help='Set status of a device')
set_device = set_parser.add_subparsers(
help='device',
dest='device')
# Set smartplug
set_smartplug = set_device.add_parser(
'smartplug',
help='set smartplug value')
set_smartplug.add_argument(
'device_label',
help='device label')
set_smartplug.add_argument(
'new_value',
choices=[
'on',
'off'],
help='new value')
# Set alarm
set_alarm = set_device.add_parser(
'alarm',
help='set alarm status')
set_alarm.add_argument(
'code',
help='alarm code')
set_alarm.add_argument(
'new_status',
choices=[
'ARMED_HOME',
'ARMED_AWAY',
'DISARMED'],
help='new status')
# Set lock
set_lock = set_device.add_parser(
'lock',
help='set lock status')
set_lock.add_argument(
'code',
help='alarm code')
set_lock.add_argument(
'serial_number',
help='serial number')
set_lock.add_argument(
'new_status',
choices=[
'lock',
'unlock'],
help='new status')
# Get climate history
history_climate = commandsparser.add_parser(
COMMAND_CLIMATE,
help='get climate history')
history_climate.add_argument(
'device_label',
help='device label')
# Event log command
eventlog_parser = commandsparser.add_parser(
COMMAND_EVENTLOG,
help='Get event log')
eventlog_parser.add_argument(
'-p', '--pagesize',
type=int,
default=15,
help='Number of elements on one page')
eventlog_parser.add_argument(
'-o', '--offset',
type=int,
default=0,
help='Page offset')
eventlog_parser.add_argument(
'-f', '--filter',
nargs='*',
default=[],
choices=[
'ARM',
'DISARM',
'FIRE',
'INTRUSION',
'TECHNICAL',
'SOS',
'WARNING',
'LOCK',
'UNLOCK'],
help='Filter event log')
# Capture command
capture_parser = commandsparser.add_parser(
COMMAND_CAPTURE,
help='Capture image')
capture_parser.add_argument(
'device_label',
help='Device label')
# Image series command
commandsparser.add_parser(
COMMAND_IMAGESERIES,
help='Get image series')
# Get image command
getimage_parser = commandsparser.add_parser(
COMMAND_GETIMAGE,
help='Download image')
getimage_parser.add_argument(
'device_label',
help='Device label')
getimage_parser.add_argument(
'image_id',
help='image ID')
getimage_parser.add_argument(
'file_name',
help='Output file name')
# Vacation mode command
commandsparser.add_parser(
COMMAND_VACATIONMODE,
help='Get vacation mode info')
# Door window status command
commandsparser.add_parser(
COMMAND_DOOR_WINDOW,
help='Get door/window status')
# Test ethernet command
commandsparser.add_parser(
COMMAND_TEST_ETHERNET,
help='Update ethernet status')
args = parser.parse_args()
session = verisure.Session(args.username, args.password, args.cookie)
session.login()
try:
session.set_giid(session.installations[args.installation - 1]['giid'])
if args.command == COMMAND_INSTALLATIONS:
print_result(session.installations)
if args.command == COMMAND_OVERVIEW:
print_result(session.get_overview(), *args.filter)
if args.command == COMMAND_ARMSTATE:
print_result(session.get_arm_state())
if args.command == COMMAND_SET:
if args.device == 'smartplug':
session.set_smartplug_state(
args.device_label,
args.new_value == 'on')
if args.device == 'alarm':
print_result(session.set_arm_state(
args.code,
args.new_status))
if args.device == 'lock':
print_result(session.set_lock_state(
args.code,
args.serial_number,
args.new_status))
if args.command == COMMAND_CLIMATE:
print_result(session.get_climate(args.device_label))
if args.command == COMMAND_EVENTLOG:
print_result(
session.get_history(
args.filter,
pagesize=args.pagesize,
offset=args.offset))
if args.command == COMMAND_CAPTURE:
session.capture_image(args.device_label)
if args.command == COMMAND_IMAGESERIES:
print_result(session.get_camera_imageseries())
if args.command == COMMAND_GETIMAGE:
session.download_image(
args.device_label,
args.image_id,
args.file_name)
if args.command == COMMAND_VACATIONMODE:
print_result(session.get_vacation_mode())
if args.command == COMMAND_DOOR_WINDOW:
print_result(session.get_door_window())
if args.command == COMMAND_TEST_ETHERNET:
session.test_ethernet()
except verisure.session.ResponseError as ex:
print(ex.text) | Start verisure command line | entailment |
def type_id(self):
"""
Shortcut to retrieving the ContentType id of the model.
"""
try:
return ContentType.objects.get_for_model(self.model, for_concrete_model=False).id
except DatabaseError as e:
raise DatabaseError("Unable to fetch ContentType object, is a plugin being registered before the initial syncdb? (original error: {0})".format(str(e))) | Shortcut to retrieving the ContentType id of the model. | entailment |
def get_output_cache_key(self, placeholder_name, instance):
"""
.. versionadded:: 0.9
Return the default cache key which is used to store a rendered item.
By default, this function generates the cache key using :func:`get_output_cache_base_key`.
"""
cachekey = self.get_output_cache_base_key(placeholder_name, instance)
if self.cache_output_per_site:
cachekey = "{0}-s{1}".format(cachekey, settings.SITE_ID)
# Append language code
if self.cache_output_per_language:
# NOTE: Not using self.language_code, but using the current language instead.
# That is what the {% trans %} tags are rendered as after all.
# The render_placeholder() code can switch the language if needed.
user_language = get_language()
if user_language not in self.cache_supported_language_codes:
user_language = 'unsupported'
cachekey = "{0}.{1}".format(cachekey, user_language)
return cachekey | .. versionadded:: 0.9
Return the default cache key which is used to store a rendered item.
By default, this function generates the cache key using :func:`get_output_cache_base_key`. | entailment |
def get_output_cache_keys(self, placeholder_name, instance):
"""
.. versionadded:: 0.9
Return the possible cache keys for a rendered item.
This method should be overwritten when implementing a function :func:`set_cached_output` method
or when implementing a :func:`get_output_cache_key` function.
By default, this function generates the cache key using :func:`get_output_cache_base_key`.
"""
base_key = self.get_output_cache_base_key(placeholder_name, instance)
cachekeys = [
base_key,
]
if self.cache_output_per_site:
site_ids = list(Site.objects.values_list('pk', flat=True))
if settings.SITE_ID not in site_ids:
site_ids.append(settings.SITE_ID)
base_key = get_rendering_cache_key(placeholder_name, instance)
cachekeys = ["{0}-s{1}".format(base_key, site_id) for site_id in site_ids]
if self.cache_output_per_language or self.render_ignore_item_language:
# Append language code to all keys,
# have to invalidate a lot more items in memcache.
# Also added "None" suffix, since get_parent_language_code() may return that.
# TODO: ideally for render_ignore_item_language, only invalidate all when the fallback language changed.
total_list = []
cache_languages = list(self.cache_supported_language_codes) + ['unsupported', 'None']
# All variants of the Placeholder (for full page caching)
placeholder = instance.placeholder
total_list.extend(get_placeholder_cache_key(placeholder, lc) for lc in cache_languages)
# All variants of the ContentItem in different languages
for user_language in cache_languages:
total_list.extend("{0}.{1}".format(base, user_language) for base in cachekeys)
cachekeys = total_list
return cachekeys | .. versionadded:: 0.9
Return the possible cache keys for a rendered item.
This method should be overwritten when implementing a function :func:`set_cached_output` method
or when implementing a :func:`get_output_cache_key` function.
By default, this function generates the cache key using :func:`get_output_cache_base_key`. | entailment |
def get_cached_output(self, placeholder_name, instance):
"""
.. versionadded:: 0.9
Return the cached output for a rendered item, or ``None`` if no output is cached.
This method can be overwritten to implement custom caching mechanisms.
By default, this function generates the cache key using :func:`get_output_cache_key`
and retrieves the results from the configured Django cache backend (e.g. memcached).
"""
cachekey = self.get_output_cache_key(placeholder_name, instance)
return cache.get(cachekey) | .. versionadded:: 0.9
Return the cached output for a rendered item, or ``None`` if no output is cached.
This method can be overwritten to implement custom caching mechanisms.
By default, this function generates the cache key using :func:`get_output_cache_key`
and retrieves the results from the configured Django cache backend (e.g. memcached). | entailment |
def set_cached_output(self, placeholder_name, instance, output):
"""
.. versionadded:: 0.9
Store the cached output for a rendered item.
This method can be overwritten to implement custom caching mechanisms.
By default, this function generates the cache key using :func:`~fluent_contents.cache.get_rendering_cache_key`
and stores the results in the configured Django cache backend (e.g. memcached).
When custom cache keys are used, also include those in :func:`get_output_cache_keys`
so the cache will be cleared when needed.
.. versionchanged:: 1.0
The received data is no longer a HTML string, but :class:`~fluent_contents.models.ContentItemOutput` object.
"""
cachekey = self.get_output_cache_key(placeholder_name, instance)
if self.cache_timeout is not DEFAULT_TIMEOUT:
cache.set(cachekey, output, self.cache_timeout)
else:
# Don't want to mix into the default 0/None issue.
cache.set(cachekey, output) | .. versionadded:: 0.9
Store the cached output for a rendered item.
This method can be overwritten to implement custom caching mechanisms.
By default, this function generates the cache key using :func:`~fluent_contents.cache.get_rendering_cache_key`
and stores the results in the configured Django cache backend (e.g. memcached).
When custom cache keys are used, also include those in :func:`get_output_cache_keys`
so the cache will be cleared when needed.
.. versionchanged:: 1.0
The received data is no longer a HTML string, but :class:`~fluent_contents.models.ContentItemOutput` object. | entailment |
def render(self, request, instance, **kwargs):
"""
The rendering/view function that displays a plugin model instance.
:param instance: An instance of the ``model`` the plugin uses.
:param request: The Django :class:`~django.http.HttpRequest` class containing the request parameters.
:param kwargs: An optional slot for any new parameters.
To render a plugin, either override this function, or specify the :attr:`render_template` variable,
and optionally override :func:`get_context`.
It is recommended to wrap the output in a ``<div>`` tag,
to prevent the item from being displayed right next to the previous plugin.
.. versionadded:: 1.0
The function may either return a string of HTML code,
or return a :class:`~fluent_contents.models.ContentItemOutput` object
which holds both the CSS/JS includes and HTML string.
For the sake of convenience and simplicity, most examples
only return a HTML string directly.
When the user needs to be redirected, simply return a :class:`~django.http.HttpResponseRedirect`
or call the :func:`redirect` method.
To render raw HTML code, use :func:`~django.utils.safestring.mark_safe` on the returned HTML.
"""
render_template = self.get_render_template(request, instance, **kwargs)
if not render_template:
return str(_(u"{No rendering defined for class '%s'}" % self.__class__.__name__))
context = self.get_context(request, instance, **kwargs)
return self.render_to_string(request, render_template, context) | The rendering/view function that displays a plugin model instance.
:param instance: An instance of the ``model`` the plugin uses.
:param request: The Django :class:`~django.http.HttpRequest` class containing the request parameters.
:param kwargs: An optional slot for any new parameters.
To render a plugin, either override this function, or specify the :attr:`render_template` variable,
and optionally override :func:`get_context`.
It is recommended to wrap the output in a ``<div>`` tag,
to prevent the item from being displayed right next to the previous plugin.
.. versionadded:: 1.0
The function may either return a string of HTML code,
or return a :class:`~fluent_contents.models.ContentItemOutput` object
which holds both the CSS/JS includes and HTML string.
For the sake of convenience and simplicity, most examples
only return a HTML string directly.
When the user needs to be redirected, simply return a :class:`~django.http.HttpResponseRedirect`
or call the :func:`redirect` method.
To render raw HTML code, use :func:`~django.utils.safestring.mark_safe` on the returned HTML. | entailment |
def render_to_string(self, request, template, context, content_instance=None):
"""
Render a custom template with the :class:`~PluginContext` as context instance.
"""
if not content_instance:
content_instance = PluginContext(request)
content_instance.update(context)
return render_to_string(template, content_instance.flatten(), request=request) | Render a custom template with the :class:`~PluginContext` as context instance. | entailment |
def register_frontend_media(request, media):
"""
Add a :class:`~django.forms.Media` class to the current request.
This will be rendered by the ``render_plugin_media`` template tag.
"""
if not hasattr(request, '_fluent_contents_frontend_media'):
request._fluent_contents_frontend_media = Media()
add_media(request._fluent_contents_frontend_media, media) | Add a :class:`~django.forms.Media` class to the current request.
This will be rendered by the ``render_plugin_media`` template tag. | entailment |
def get_rendering_cache_key(placeholder_name, contentitem):
"""
Return a cache key for the content item output.
.. seealso::
The :func:`ContentItem.clear_cache() <fluent_contents.models.ContentItem.clear_cache>` function
can be used to remove the cache keys of a retrieved object.
"""
if not contentitem.pk:
return None
return "contentitem.@{0}.{1}.{2}".format(
placeholder_name,
contentitem.plugin.type_name, # always returns the upcasted name.
contentitem.pk, # already unique per language_code
) | Return a cache key for the content item output.
.. seealso::
The :func:`ContentItem.clear_cache() <fluent_contents.models.ContentItem.clear_cache>` function
can be used to remove the cache keys of a retrieved object. | entailment |
def get_placeholder_cache_key(placeholder, language_code):
"""
Return a cache key for an existing placeholder object.
This key is used to cache the entire output of a placeholder.
"""
return _get_placeholder_cache_key_for_id(
placeholder.parent_type_id,
placeholder.parent_id,
placeholder.slot,
language_code
) | Return a cache key for an existing placeholder object.
This key is used to cache the entire output of a placeholder. | entailment |
def get_placeholder_cache_key_for_parent(parent_object, placeholder_name, language_code):
"""
Return a cache key for a placeholder.
This key is used to cache the entire output of a placeholder.
"""
parent_type = ContentType.objects.get_for_model(parent_object)
return _get_placeholder_cache_key_for_id(
parent_type.id,
parent_object.pk,
placeholder_name,
language_code
) | Return a cache key for a placeholder.
This key is used to cache the entire output of a placeholder. | entailment |
def remove_stale_items(self, stale_cts):
"""
See if there are items that point to a removed model.
"""
stale_ct_ids = list(stale_cts.keys())
items = (ContentItem.objects
.non_polymorphic() # very important, or polymorphic skips them on fetching derived data
.filter(polymorphic_ctype__in=stale_ct_ids)
.order_by('polymorphic_ctype', 'pk')
)
if not items:
self.stdout.write("No stale items found.")
return
if self.dry_run:
self.stdout.write("The following content items are stale:")
else:
self.stdout.write("The following content items were stale:")
for item in items:
ct = stale_cts[item.polymorphic_ctype_id]
self.stdout.write("- #{id} points to removed {app_label}.{model}".format(
id=item.pk, app_label=ct.app_label, model=ct.model
))
if not self.dry_run:
try:
item.delete()
except PluginNotFound:
Model.delete(item) | See if there are items that point to a removed model. | entailment |
def remove_unreferenced_items(self, stale_cts):
"""
See if there are items that no longer point to an existing parent.
"""
stale_ct_ids = list(stale_cts.keys())
parent_types = (ContentItem.objects.order_by()
.exclude(polymorphic_ctype__in=stale_ct_ids)
.values_list('parent_type', flat=True).distinct())
num_unreferenced = 0
for ct_id in parent_types:
parent_ct = ContentType.objects.get_for_id(ct_id)
unreferenced_items = (ContentItem.objects
.filter(parent_type=ct_id)
.order_by('polymorphic_ctype', 'pk'))
if parent_ct.model_class() is not None:
# Only select the items that are part of removed pages,
# unless the parent type was removed - then removing all is correct.
unreferenced_items = unreferenced_items.exclude(
parent_id__in=parent_ct.get_all_objects_for_this_type()
)
if unreferenced_items:
for item in unreferenced_items:
self.stdout.write(
"- {cls}#{id} points to nonexisting {app_label}.{model}".format(
cls=item.__class__.__name__, id=item.pk,
app_label=parent_ct.app_label, model=parent_ct.model
))
num_unreferenced += 1
if not self.dry_run and self.remove_unreferenced:
item.delete()
if not num_unreferenced:
self.stdout.write("No unreferenced items found.")
else:
self.stdout.write("{0} unreferenced items found.".format(num_unreferenced))
if not self.remove_unreferenced:
self.stdout.write("Re-run this command with --remove-unreferenced to remove these items") | See if there are items that no longer point to an existing parent. | entailment |
def __initial_minus_queryset(self):
"""
Gives all elements from self._initial having a slot value that is not already
in self.get_queryset()
"""
queryset = self.get_queryset()
def initial_not_in_queryset(initial):
for x in queryset:
if x.slot == initial['slot']:
return False
return True
return list(filter(initial_not_in_queryset, self._initial)) | Gives all elements from self._initial having a slot value that is not already
in self.get_queryset() | entailment |
def _get_placeholder_arg(arg_name, placeholder):
"""
Validate and return the Placeholder object that the template variable points to.
"""
if placeholder is None:
raise RuntimeWarning(u"placeholder object is None")
elif isinstance(placeholder, Placeholder):
return placeholder
elif isinstance(placeholder, Manager):
manager = placeholder
try:
parent_object = manager.instance # read RelatedManager code
except AttributeError:
parent_object = None
try:
placeholder = manager.all()[0]
if parent_object is not None:
placeholder.parent = parent_object # Fill GFK cache
return placeholder
except IndexError:
raise RuntimeWarning(u"No placeholders found for query '{0}.all.0'".format(arg_name))
else:
raise ValueError(u"The field '{0}' does not refer to a placeholder object!".format(arg_name)) | Validate and return the Placeholder object that the template variable points to. | entailment |
def _split_js(media, domain):
"""
Extract the local or external URLs from a Media object.
"""
# Read internal property without creating new Media instance.
if not media._js:
return ImmutableMedia.empty_instance
needs_local = domain == 'local'
new_js = []
for url in media._js:
if needs_local == _is_local(url):
new_js.append(url)
if not new_js:
return ImmutableMedia.empty_instance
else:
return Media(js=new_js) | Extract the local or external URLs from a Media object. | entailment |
def _split_css(media, domain):
"""
Extract the local or external URLs from a Media object.
"""
# Read internal property without creating new Media instance.
if not media._css:
return ImmutableMedia.empty_instance
needs_local = domain == 'local'
new_css = {}
for medium, url in six.iteritems(media._css):
if needs_local == _is_local(url):
new_css.setdefault(medium, []).append(url)
if not new_css:
return ImmutableMedia.empty_instance
else:
return Media(css=new_css) | Extract the local or external URLs from a Media object. | entailment |
def parse(cls, parser, token):
"""
Parse the node syntax:
.. code-block:: html+django
{% page_placeholder parentobj slotname title="test" role="m" %}
"""
bits, as_var = parse_as_var(parser, token)
tag_name, args, kwargs = parse_token_kwargs(parser, bits, allowed_kwargs=cls.allowed_kwargs, compile_args=True, compile_kwargs=True)
# Play with the arguments
if len(args) == 2:
parent_expr = args[0]
slot_expr = args[1]
elif len(args) == 1:
# Allow 'page' by default. Works with most CMS'es, including django-fluent-pages.
parent_expr = Variable('page')
slot_expr = args[0]
else:
raise TemplateSyntaxError("""{0} tag allows two arguments: 'parent object' 'slot name' and optionally: title=".." role="..".""".format(tag_name))
cls.validate_args(tag_name, *args, **kwargs)
return cls(
tag_name=tag_name,
as_var=as_var,
parent_expr=parent_expr,
slot_expr=slot_expr,
**kwargs
) | Parse the node syntax:
.. code-block:: html+django
{% page_placeholder parentobj slotname title="test" role="m" %} | entailment |
def get_title(self):
"""
Return the string literal that is used in the template.
The title is used in the admin screens.
"""
try:
return extract_literal(self.meta_kwargs['title'])
except KeyError:
slot = self.get_slot()
if slot is not None:
return slot.replace('_', ' ').title()
return None | Return the string literal that is used in the template.
The title is used in the admin screens. | entailment |
def extract_literal(templatevar):
"""
See if a template FilterExpression holds a literal value.
:type templatevar: django.template.FilterExpression
:rtype: bool|None
"""
# FilterExpression contains another 'var' that either contains a Variable or SafeData object.
if hasattr(templatevar, 'var'):
templatevar = templatevar.var
if isinstance(templatevar, SafeData):
# Literal in FilterExpression, can return.
return templatevar
else:
# Variable in FilterExpression, not going to work here.
return None
if templatevar[0] in ('"', "'") and templatevar[-1] in ('"', "'"):
return templatevar[1:-1]
else:
return None | See if a template FilterExpression holds a literal value.
:type templatevar: django.template.FilterExpression
:rtype: bool|None | entailment |
def extract_literal_bool(templatevar):
"""
See if a template FilterExpression holds a literal boolean value.
:type templatevar: django.template.FilterExpression
:rtype: bool|None
"""
# FilterExpression contains another 'var' that either contains a Variable or SafeData object.
if hasattr(templatevar, 'var'):
templatevar = templatevar.var
if isinstance(templatevar, SafeData):
# Literal in FilterExpression, can return.
return is_true(templatevar)
else:
# Variable in FilterExpression, not going to work here.
return None
return is_true(templatevar) | See if a template FilterExpression holds a literal boolean value.
:type templatevar: django.template.FilterExpression
:rtype: bool|None | entailment |
def _create_markup_plugin(language, model):
"""
Create a new MarkupPlugin class that represents the plugin type.
"""
form = type("{0}MarkupItemForm".format(language.capitalize()), (MarkupItemForm,), {
'default_language': language,
})
classname = "{0}MarkupPlugin".format(language.capitalize())
PluginClass = type(classname, (MarkupPluginBase,), {
'model': model,
'form': form,
})
return PluginClass | Create a new MarkupPlugin class that represents the plugin type. | entailment |
def get_parent_lookup_kwargs(parent_object):
"""
Return lookup arguments for the generic ``parent_type`` / ``parent_id`` fields.
:param parent_object: The parent object.
:type parent_object: :class:`~django.db.models.Model`
"""
if parent_object is None:
return dict(
parent_type__isnull=True,
parent_id=0
)
elif isinstance(parent_object, models.Model):
return dict(
parent_type=ContentType.objects.get_for_model(parent_object),
parent_id=parent_object.pk
)
else:
raise ValueError("parent_object is not a model!") | Return lookup arguments for the generic ``parent_type`` / ``parent_id`` fields.
:param parent_object: The parent object.
:type parent_object: :class:`~django.db.models.Model` | entailment |
def get_parent_active_language_choices(parent_object, exclude_current=False):
"""
.. versionadded:: 1.0
Get the currently active languages of an parent object.
Note: if there is no content at the page, the language won't be returned.
"""
assert parent_object is not None, "Missing parent_object!"
from .db import ContentItem
qs = ContentItem.objects \
.parent(parent_object, limit_parent_language=False) \
.values_list('language_code', flat=True).distinct()
languages = set(qs)
if exclude_current:
parent_lang = get_parent_language_code(parent_object)
languages.discard(parent_lang)
if parler_appsettings.PARLER_LANGUAGES and not parler_appsettings.PARLER_SHOW_EXCLUDED_LANGUAGE_TABS:
site_id = get_parent_site_id(parent_object)
try:
lang_dict = parler_appsettings.PARLER_LANGUAGES[site_id]
except KeyError:
lang_dict = ()
allowed_languages = set(item['code'] for item in lang_dict)
languages &= allowed_languages
# No multithreading issue here, object is instantiated for this user only.
choices = [(lang, str(get_language_title(lang))) for lang in languages if lang]
choices.sort(key=lambda tup: tup[1])
return choices | .. versionadded:: 1.0
Get the currently active languages of an parent object.
Note: if there is no content at the page, the language won't be returned. | entailment |
def get_by_slot(self, parent_object, slot):
"""
Return a placeholder by key.
"""
placeholder = self.parent(parent_object).get(slot=slot)
placeholder.parent = parent_object # fill the reverse cache
return placeholder | Return a placeholder by key. | entailment |
def create_for_object(self, parent_object, slot, role='m', title=None):
"""
Create a placeholder with the given parameters
"""
from .db import Placeholder
parent_attrs = get_parent_lookup_kwargs(parent_object)
obj = self.create(
slot=slot,
role=role or Placeholder.MAIN,
title=title or slot.title().replace('_', ' '),
**parent_attrs
)
obj.parent = parent_object # fill the reverse cache
return obj | Create a placeholder with the given parameters | entailment |
def translated(self, *language_codes):
"""
.. versionadded:: 1.0
Only return translated objects which of the given languages.
When no language codes are given, only the currently active language is returned.
"""
# this API has the same semantics as django-parler's .translated() for familiarity.
# However, since this package doesn't filter in a related field, the ORM limitations don't apply.
if not language_codes:
language_codes = (get_language(),)
else:
# Since some code operates on a True/str switch, make sure that doesn't drip into this low level code.
for language_code in language_codes:
if not isinstance(language_code, six.string_types) or language_code.lower() in ('1', '0', 'true', 'false'):
raise ValueError("ContentItemQuerySet.translated() expected language_code to be an ISO code")
if len(language_codes) == 1:
return self.filter(language_code=language_codes[0])
else:
return self.filter(language_code__in=language_codes) | .. versionadded:: 1.0
Only return translated objects which of the given languages.
When no language codes are given, only the currently active language is returned. | entailment |
Subsets and Splits