prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>textutil.py<|end_file_name|><|fim▁begin|># Microsoft Azure Linux Agent
#
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
import base64
import crypt
import hashlib
import random
import re
import string
import struct
import sys
import traceback
import xml.dom.minidom as minidom
import zlib
from azurelinuxagent.common.future import ustr
def parse_doc(xml_text):
"""
Parse xml document from string
"""
# The minidom lib has some issue with unicode in python2.
# Encode the string into utf-8 first
xml_text = xml_text.encode('utf-8')
return minidom.parseString(xml_text)
def findall(root, tag, namespace=None):
"""
Get all nodes by tag and namespace under Node root.
"""
if root is None:
return []
if namespace is None:
return root.getElementsByTagName(tag)
else:
return root.getElementsByTagNameNS(namespace, tag)
def find(root, tag, namespace=None):
"""
Get first node by tag and namespace under Node root.
"""
nodes = findall(root, tag, namespace=namespace)
if nodes is not None and len(nodes) >= 1:
return nodes[0]
else:
return None
def gettext(node):
"""
Get node text
"""
if node is None:
return None
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE:
return child.data
return None
def findtext(root, tag, namespace=None):
"""
Get text of node by tag and namespace under Node root.
"""
node = find(root, tag, namespace=namespace)
return gettext(node)
def getattrib(node, attr_name):
"""
Get attribute of xml node
"""
if node is not None:
return node.getAttribute(attr_name)
else:
return None
def unpack(buf, offset, value_range):
"""
Unpack bytes into python values.
"""
result = 0
for i in value_range:
result = (result << 8) | str_to_ord(buf[offset + i])
return result
def unpack_little_endian(buf, offset, length):
"""
Unpack little endian bytes into python values.
"""
return unpack(buf, offset, list(range(length - 1, -1, -1)))
def unpack_big_endian(buf, offset, length):
"""
Unpack big endian bytes into python values.
"""
return unpack(buf, offset, list(range(0, length)))
def hex_dump3(buf, offset, length):
"""
Dump range of buf in formatted hex.
"""
return ''.join(['%02X' % str_to_ord(char) for char in buf[offset:offset + length]])
def hex_dump2(buf):
"""
Dump buf in formatted hex.
"""
return hex_dump3(buf, 0, len(buf))
def is_in_range(a, low, high):
"""
Return True if 'a' in 'low' <= a <= 'high'
"""
return low <= a <= high<|fim▁hole|>
def is_printable(ch):
"""
Return True if character is displayable.
"""
return (is_in_range(ch, str_to_ord('A'), str_to_ord('Z'))
or is_in_range(ch, str_to_ord('a'), str_to_ord('z'))
or is_in_range(ch, str_to_ord('0'), str_to_ord('9')))
def hex_dump(buffer, size): # pylint: disable=redefined-builtin
"""
Return Hex formated dump of a 'buffer' of 'size'.
"""
if size < 0:
size = len(buffer)
result = ""
for i in range(0, size):
if (i % 16) == 0:
result += "%06X: " % i
byte = buffer[i]
if type(byte) == str:
byte = ord(byte.decode('latin1'))
result += "%02X " % byte
if (i & 15) == 7:
result += " "
if ((i + 1) % 16) == 0 or (i + 1) == size:
j = i
while ((j + 1) % 16) != 0:
result += " "
if (j & 7) == 7:
result += " "
j += 1
result += " "
for j in range(i - (i % 16), i + 1):
byte = buffer[j]
if type(byte) == str:
byte = str_to_ord(byte.decode('latin1'))
k = '.'
if is_printable(byte):
k = chr(byte)
result += k
if (i + 1) != size:
result += "\n"
return result
def str_to_ord(a):
"""
Allows indexing into a string or an array of integers transparently.
Generic utility function.
"""
if type(a) == type(b'') or type(a) == type(u''):
a = ord(a)
return a
def compare_bytes(a, b, start, length):
for offset in range(start, start + length):
if str_to_ord(a[offset]) != str_to_ord(b[offset]):
return False
return True
def int_to_ip4_addr(a):
"""
Build DHCP request string.
"""
return "%u.%u.%u.%u" % ((a >> 24) & 0xFF,
(a >> 16) & 0xFF,
(a >> 8) & 0xFF,
(a) & 0xFF)
def hexstr_to_bytearray(a):
"""
Return hex string packed into a binary struct.
"""
b = b""
for c in range(0, len(a) // 2):
b += struct.pack("B", int(a[c * 2:c * 2 + 2], 16))
return b
def set_ssh_config(config, name, val):
found = False
no_match = -1
match_start = no_match
for i in range(0, len(config)):
if config[i].startswith(name) and match_start == no_match:
config[i] = "{0} {1}".format(name, val)
found = True
elif config[i].lower().startswith("match"):
if config[i].lower().startswith("match all"):
# outside match block
match_start = no_match
elif match_start == no_match:
# inside match block
match_start = i
if not found:
if match_start != no_match:
i = match_start
config.insert(i, "{0} {1}".format(name, val))
return config
def set_ini_config(config, name, val):
notfound = True
nameEqual = name + '='
length = len(config)
text = "{0}=\"{1}\"".format(name, val)
for i in reversed(range(0, length)):
if config[i].startswith(nameEqual):
config[i] = text
notfound = False
break
if notfound:
config.insert(length - 1, text)
def replace_non_ascii(incoming, replace_char=''):
outgoing = ''
if incoming is not None:
for c in incoming:
if str_to_ord(c) > 128:
outgoing += replace_char
else:
outgoing += c
return outgoing
def remove_bom(c):
"""
bom is comprised of a sequence of three chars,0xef, 0xbb, 0xbf, in case of utf-8.
"""
if not is_str_none_or_whitespace(c) and \
len(c) > 2 and \
str_to_ord(c[0]) > 128 and \
str_to_ord(c[1]) > 128 and \
str_to_ord(c[2]) > 128:
c = c[3:]
return c
def gen_password_hash(password, crypt_id, salt_len):
collection = string.ascii_letters + string.digits
salt = ''.join(random.choice(collection) for _ in range(salt_len))
salt = "${0}${1}".format(crypt_id, salt)
if sys.version_info[0] == 2:
# if python 2.*, encode to type 'str' to prevent Unicode Encode Error from crypt.crypt
password = password.encode('utf-8')
return crypt.crypt(password, salt)
def get_bytes_from_pem(pem_str):
base64_bytes = ""
for line in pem_str.split('\n'):
if "----" not in line:
base64_bytes += line
return base64_bytes
def compress(s):
"""
Compress a string, and return the base64 encoded result of the compression.
This method returns a string instead of a byte array. It is expected
that this method is called to compress smallish strings, not to compress
the contents of a file. The output of this method is suitable for
embedding in log statements.
"""
from azurelinuxagent.common.version import PY_VERSION_MAJOR
if PY_VERSION_MAJOR > 2:
return base64.b64encode(zlib.compress(bytes(s, 'utf-8'))).decode('utf-8')
return base64.b64encode(zlib.compress(s))
def b64encode(s):
from azurelinuxagent.common.version import PY_VERSION_MAJOR
if PY_VERSION_MAJOR > 2:
return base64.b64encode(bytes(s, 'utf-8')).decode('utf-8')
return base64.b64encode(s)
def b64decode(s):
from azurelinuxagent.common.version import PY_VERSION_MAJOR
if PY_VERSION_MAJOR > 2:
return base64.b64decode(s).decode('utf-8')
return base64.b64decode(s)
def safe_shlex_split(s):
import shlex
from azurelinuxagent.common.version import PY_VERSION
if PY_VERSION[:2] == (2, 6):
return shlex.split(s.encode('utf-8'))
return shlex.split(s)
def swap_hexstring(s, width=2):
r = len(s) % width
if r != 0:
s = ('0' * (width - (len(s) % width))) + s
return ''.join(reversed(
re.findall(
r'[a-f0-9]{{{0}}}'.format(width),
s,
re.IGNORECASE)))
def parse_json(json_str):
"""
Parse json string and return a resulting dictionary
"""
# trim null and whitespaces
result = None
if not is_str_empty(json_str):
import json
result = json.loads(json_str.rstrip(' \t\r\n\0'))
return result
def is_str_none_or_whitespace(s):
return s is None or len(s) == 0 or s.isspace()
def is_str_empty(s):
return is_str_none_or_whitespace(s) or is_str_none_or_whitespace(s.rstrip(' \t\r\n\0'))
def hash_strings(string_list):
"""
Compute a cryptographic hash of a list of strings
:param string_list: The strings to be hashed
:return: The cryptographic hash (digest) of the strings in the order provided
"""
sha1_hash = hashlib.sha1()
for item in string_list:
sha1_hash.update(item.encode())
return sha1_hash.digest()
def format_memory_value(unit, value):
units = {'bytes': 1, 'kilobytes': 1024, 'megabytes': 1024*1024, 'gigabytes': 1024*1024*1024}
if unit not in units:
raise ValueError("Unit must be one of {0}".format(units.keys()))
try:
value = float(value)
except TypeError:
raise TypeError('Value must be convertible to a float')
return int(value * units[unit])
def str_to_encoded_ustr(s, encoding='utf-8'):
"""
This function takes the string and converts it into the corresponding encoded ustr if its not already a ustr.
The encoding is utf-8 by default if not specified.
Note: ustr() is a unicode object for Py2 and a str object for Py3.
:param s: The string to convert to ustr
:param encoding: Encoding to use. Utf-8 by default
:return: Returns the corresponding ustr string. Returns None if input is None.
"""
# TODO: Import at the top of the file instead of a local import (using local import here to avoid cyclic dependency)
from azurelinuxagent.common.version import PY_VERSION_MAJOR
if s is None or type(s) is ustr:
# If its already a ustr/None then return as is
return s
if PY_VERSION_MAJOR > 2:
try:
# For py3+, str() is unicode by default
if isinstance(s, bytes):
# str.encode() returns bytes which should be decoded to get the str.
return s.decode(encoding)
else:
# If its not encoded, just return the string
return ustr(s)
except Exception:
# If some issues in decoding, just return the string
return ustr(s)
# For Py2, explicitly convert the string to unicode with the specified encoding
return ustr(s, encoding=encoding)
def format_exception(exception):
# Function to format exception message
e = None
if sys.version_info[0] == 2:
_, e, tb = sys.exc_info()
else:
tb = exception.__traceback__
msg = ustr(exception) + "\n"
if tb is None or (sys.version_info[0] == 2 and e != exception):
msg += "[Traceback not available]"
else:
msg += ''.join(traceback.format_exception(etype=type(exception), value=exception, tb=tb))
return msg<|fim▁end|> | |
<|file_name|>text_file.py<|end_file_name|><|fim▁begin|>"""text_file
provides the TextFile class, which gives an interface to text files
that (optionally) takes care of stripping comments, ignoring blank
lines, and joining lines with backslashes."""
__revision__ = "$Id$"
from types import *
import sys, os, string
class TextFile:
"""Provides a file-like object that takes care of all the things you
commonly want to do when processing a text file that has some
line-by-line syntax: strip comments (as long as "#" is your
comment character), skip blank lines, join adjacent lines by
escaping the newline (ie. backslash at end of line), strip
leading and/or trailing whitespace. All of these are optional
and independently controllable.
Provides a 'warn()' method so you can generate warning messages that
report physical line number, even if the logical line in question
spans multiple physical lines. Also provides 'unreadline()' for
implementing line-at-a-time lookahead.
Constructor is called as:
TextFile (filename=None, file=None, **options)
It bombs (RuntimeError) if both 'filename' and 'file' are None;
'filename' should be a string, and 'file' a file object (or
something that provides 'readline()' and 'close()' methods). It is
recommended that you supply at least 'filename', so that TextFile
can include it in warning messages. If 'file' is not supplied,
TextFile creates its own using the 'open()' builtin.
The options are all boolean, and affect the value returned by
'readline()':
strip_comments [default: true]
strip from "#" to end-of-line, as well as any whitespace
leading up to the "#" -- unless it is escaped by a backslash
lstrip_ws [default: false]
strip leading whitespace from each line before returning it
rstrip_ws [default: true]
strip trailing whitespace (including line terminator!) from
each line before returning it
skip_blanks [default: true}
skip lines that are empty *after* stripping comments and
whitespace. (If both lstrip_ws and rstrip_ws are false,
then some lines may consist of solely whitespace: these will
*not* be skipped, even if 'skip_blanks' is true.)
join_lines [default: false]
if a backslash is the last non-newline character on a line
after stripping comments and whitespace, join the following line
to it to form one "logical line"; if N consecutive lines end
with a backslash, then N+1 physical lines will be joined to
form one logical line.
collapse_join [default: false]
strip leading whitespace from lines that are joined to their
predecessor; only matters if (join_lines and not lstrip_ws)
Note that since 'rstrip_ws' can strip the trailing newline, the
semantics of 'readline()' must differ from those of the builtin file
object's 'readline()' method! In particular, 'readline()' returns
None for end-of-file: an empty string might just be a blank line (or
an all-whitespace line), if 'rstrip_ws' is true but 'skip_blanks' is
not."""
default_options = { 'strip_comments': 1,
'skip_blanks': 1,
'lstrip_ws': 0,
'rstrip_ws': 1,
'join_lines': 0,
'collapse_join': 0,
}
def __init__ (self, filename=None, file=None, **options):
"""Construct a new TextFile object. At least one of 'filename'
(a string) and 'file' (a file-like object) must be supplied.
They keyword argument options are described above and affect
the values returned by 'readline()'."""
if filename is None and file is None:
raise RuntimeError, \
"you must supply either or both of 'filename' and 'file'"
# set values for all options -- either from client option hash
# or fallback to default_options
for opt in self.default_options.keys():
if opt in options:
setattr (self, opt, options[opt])
else:
setattr (self, opt, self.default_options[opt])
# sanity check client option hash
for opt in options.keys():
if opt not in self.default_options:
raise KeyError, "invalid TextFile option '%s'" % opt
if file is None:
self.open (filename)
else:
self.filename = filename
self.file = file
self.current_line = 0 # assuming that file is at BOF!
# 'linebuf' is a stack of lines that will be emptied before we
# actually read from the file; it's only populated by an
# 'unreadline()' operation
self.linebuf = []
def open (self, filename):
"""Open a new file named 'filename'. This overrides both the
'filename' and 'file' arguments to the constructor."""
self.filename = filename
self.file = open (self.filename, 'r')
self.current_line = 0
def close (self):
"""Close the current file and forget everything we know about it
(filename, current line number)."""
self.file.close ()
self.file = None
self.filename = None
self.current_line = None
def gen_error (self, msg, line=None):
outmsg = []
if line is None:
line = self.current_line
outmsg.append(self.filename + ", ")
if type (line) in (ListType, TupleType):
outmsg.append("lines %d-%d: " % tuple (line))
else:
outmsg.append("line %d: " % line)
outmsg.append(str(msg))
return string.join(outmsg, "")
def error (self, msg, line=None):
raise ValueError, "error: " + self.gen_error(msg, line)
def warn (self, msg, line=None):
"""Print (to stderr) a warning message tied to the current logical
line in the current file. If the current logical line in the
file spans multiple physical lines, the warning refers to the
whole range, eg. "lines 3-5". If 'line' supplied, it overrides
the current line number; it may be a list or tuple to indicate a
range of physical lines, or an integer for a single physical
line."""
sys.stderr.write("warning: " + self.gen_error(msg, line) + "\n")
def readline (self):
"""Read and return a single logical line from the current file (or
from an internal buffer if lines have previously been "unread"
with 'unreadline()'). If the 'join_lines' option is true, this
may involve reading multiple physical lines concatenated into a
single string. Updates the current line number, so calling
'warn()' after 'readline()' emits a warning about the physical
line(s) just read. Returns None on end-of-file, since the empty
string can occur if 'rstrip_ws' is true but 'strip_blanks' is
not."""
# If any "unread" lines waiting in 'linebuf', return the top
# one. (We don't actually buffer read-ahead data -- lines only
# get put in 'linebuf' if the client explicitly does an
# 'unreadline()'.
if self.linebuf:
line = self.linebuf[-1]
del self.linebuf[-1]
return line
buildup_line = ''
while 1:
# read the line, make it None if EOF
line = self.file.readline()
if line == '': line = None
if self.strip_comments and line:
# Look for the first "#" in the line. If none, never
# mind. If we find one and it's the first character, or
# is not preceded by "\", then it starts a comment --
# strip the comment, strip whitespace before it, and
# carry on. Otherwise, it's just an escaped "#", so
# unescape it (and any other escaped "#"'s that might be
# lurking in there) and otherwise leave the line alone.
pos = string.find (line, "#")
if pos == -1: # no "#" -- no comments
pass
# It's definitely a comment -- either "#" is the first
# character, or it's elsewhere and unescaped.
elif pos == 0 or line[pos-1] != "\\":
# Have to preserve the trailing newline, because it's
# the job of a later step (rstrip_ws) to remove it --
# and if rstrip_ws is false, we'd better preserve it!
# (NB. this means that if the final line is all comment
# and has no trailing newline, we will think that it's
# EOF; I think that's OK.)
eol = (line[-1] == '\n') and '\n' or ''
line = line[0:pos] + eol
# If all that's left is whitespace, then skip line
# *now*, before we try to join it to 'buildup_line' --
# that way constructs like
# hello \\
# # comment that should be ignored
# there
# result in "hello there".
if string.strip(line) == "":
continue
else: # it's an escaped "#"
line = string.replace (line, "\\#", "#")
# did previous line end with a backslash? then accumulate
if self.join_lines and buildup_line:
# oops: end of file
if line is None:
self.warn ("continuation line immediately precedes "
"end-of-file")
return buildup_line
if self.collapse_join:
line = string.lstrip (line)
line = buildup_line + line
# careful: pay attention to line number when incrementing it
if type (self.current_line) is ListType:
self.current_line[1] = self.current_line[1] + 1
else:
self.current_line = [self.current_line,
self.current_line+1]
# just an ordinary line, read it as usual
else:
if line is None: # eof
return None
# still have to be careful about incrementing the line number!
if type (self.current_line) is ListType:
self.current_line = self.current_line[1] + 1
else:
self.current_line = self.current_line + 1
# strip whitespace however the client wants (leading and
# trailing, or one or the other, or neither)
if self.lstrip_ws and self.rstrip_ws:
line = string.strip (line)
elif self.lstrip_ws:
line = string.lstrip (line)
elif self.rstrip_ws:
line = string.rstrip (line)
# blank line (whether we rstrip'ed or not)? skip to next line
# if appropriate
if (line == '' or line == '\n') and self.skip_blanks:
continue
if self.join_lines:
if line[-1] == '\\':
buildup_line = line[:-1]
continue
if line[-2:] == '\\\n':
buildup_line = line[0:-2] + '\n'
continue
# well, I guess there's some actual content there: return it
return line
# readline ()
def readlines (self):
"""Read and return the list of all logical lines remaining in the
current file."""
lines = []
while 1:
line = self.readline()
if line is None:
return lines
lines.append (line)
def unreadline (self, line):
"""Push 'line' (a string) onto an internal buffer that will be
checked by future 'readline()' calls. Handy for implementing
a parser with line-at-a-time lookahead."""
self.linebuf.append (line)
if __name__ == "__main__":
test_data = """# test file
line 3 \\
# intervening comment
continues on next line
"""
# result 1: no fancy options
result1 = map (lambda x: x + "\n", string.split (test_data, "\n")[0:-1])
# result 2: just strip comments
result2 = ["\n",
"line 3 \\\n",
" continues on next line\n"]
# result 3: just strip blank lines
result3 = ["# test file\n",
"line 3 \\\n",
"# intervening comment\n",
" continues on next line\n"]
# result 4: default, strip comments, blank lines, and trailing whitespace
result4 = ["line 3 \\",
" continues on next line"]
# result 5: strip comments and blanks, plus join lines (but don't
# "collapse" joined lines
result5 = ["line 3 continues on next line"]
# result 6: strip comments and blanks, plus join lines (and
# "collapse" joined lines<|fim▁hole|> # result = string.join (result, '')
if result == expected_result:
print "ok %d (%s)" % (count, description)
else:
print "not ok %d (%s):" % (count, description)
print "** expected:"
print expected_result
print "** received:"
print result
filename = "test.txt"
out_file = open (filename, "w")
out_file.write (test_data)
out_file.close ()
in_file = TextFile (filename, strip_comments=0, skip_blanks=0,
lstrip_ws=0, rstrip_ws=0)
test_input (1, "no processing", in_file, result1)
in_file = TextFile (filename, strip_comments=1, skip_blanks=0,
lstrip_ws=0, rstrip_ws=0)
test_input (2, "strip comments", in_file, result2)
in_file = TextFile (filename, strip_comments=0, skip_blanks=1,
lstrip_ws=0, rstrip_ws=0)
test_input (3, "strip blanks", in_file, result3)
in_file = TextFile (filename)
test_input (4, "default processing", in_file, result4)
in_file = TextFile (filename, strip_comments=1, skip_blanks=1,
join_lines=1, rstrip_ws=1)
test_input (5, "join lines without collapsing", in_file, result5)
in_file = TextFile (filename, strip_comments=1, skip_blanks=1,
join_lines=1, rstrip_ws=1, collapse_join=1)
test_input (6, "join lines with collapsing", in_file, result6)
os.remove (filename)<|fim▁end|> | result6 = ["line 3 continues on next line"]
def test_input (count, description, file, expected_result):
result = file.readlines () |
<|file_name|>example_test.go<|end_file_name|><|fim▁begin|><|fim▁hole|> "go-common/library/net/http/blademaster/middleware/supervisor"
"time"
)
// This example create a supervisor middleware instance and attach to a blademaster engine,
// it will allow '/ping' API can be requested with specified policy.
// This example will block all http method except `GET` on '/ping' API in next hour,
// and allow in further.
func Example() {
now := time.Now()
end := now.Add(time.Hour * 1)
spv := supervisor.New(&supervisor.Config{
On: true,
Begin: now,
End: end,
})
engine := blademaster.Default()
engine.Use(spv)
engine.GET("/ping", func(c *blademaster.Context) {
c.String(200, "%s", "pong")
})
engine.Run(":18080")
}<|fim▁end|> | package supervisor_test
import (
"go-common/library/net/http/blademaster" |
<|file_name|>kalloc.rs<|end_file_name|><|fim▁begin|>// The MIT License (MIT)
//
// Copyright (c) 2015 Kashyap
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
use super::spinlock::{Spinlock, DUMMY_LOCK, init_lock};
use super::mmu::{Address, PG_SIZE, pg_roundup};
use super::memlayout::{v2p,PHYSTOP};
use super::uart::uart_put_str;
use super::console::panic;
use super::rlibc::memset;
struct KmemT {
lock: Spinlock,
use_lock: u32 , //TODO is u32 the right type?
// TODO struct run *freelist;
}
static mut kmem : KmemT = KmemT{ lock: DUMMY_LOCK, use_lock: 0} ;
static mut end : Address = 0;
pub fn kinit1(vstart: Address, vend: Address) {
unsafe {
init_lock(& mut kmem.lock, "kmem");
kmem.use_lock = 0;
}
free_range(vstart, vend);
}
fn free_range(vstart: Address, vend: Address) {<|fim▁hole|> // Keep it around for future debugging
//unsafe {
// asm!("mov $0 , %rax" : /* no outputs */ : "r"(vend) : "eax");
// asm!("mov $0 , %rbx" : /* no outputs */ : "r"(address) : "eax");
//}
unsafe {
end = vstart;
}
loop {
kfree(address);
address = address + PG_SIZE;
if address > vend {
break;
}
}
}
fn kfree(v : Address) {
//struct run *r;
if ((v % PG_SIZE) > 0) || (v2p(v) >= PHYSTOP) {
panic("kfree");
}
unsafe {
if v < end {
panic("kfree");
}
}
unsafe {
// Fill with junk to catch dangling refs.
memset(v as * mut u8, 1, PG_SIZE as usize);
}
//
// if(kmem.use_lock)
// acquire(&kmem.lock);
// r = (struct run*)v;
// r->next = kmem.freelist;
// kmem.freelist = r;
// if(kmem.use_lock)
// release(&kmem.lock);
//
//
//
// */
}<|fim▁end|> | let mut address = pg_roundup(vstart);
|
<|file_name|>query-language.ts<|end_file_name|><|fim▁begin|>import {customElement, bindable} from 'aurelia-templating';
import {inject} from 'aurelia-dependency-injection';
import {Utils, DomUtils} from 'marvelous-aurelia-core/utils';
import {AureliaUtils} from 'marvelous-aurelia-core/aureliaUtils';
@customElement('m-query-language')
@inject(Element, AureliaUtils)
export class QueryLanguage {
@bindable({ attribute: 'options' }) options: IQueryLanguageOptions;
autoCompletionResult: IAutoCompletionResult;
selectedCompletionIndex: number = 0;
errors: string[] = [];
private _subs = [];
private _queryInputElement: HTMLInputElement;
private _preventFromFocusOut = false;
private _loading: boolean;
private _lastSubmittedQuery: string;
query = '';
constructor(private _element: Element, private _aureliaUtils: AureliaUtils) {
}
attached() {
this.validateOptions();
this.createOptions();
this.registerInputHandlers();
}
detached() {
this._subs.forEach(x => x());
this._subs = [];
}
submit() {
if (this._lastSubmittedQuery === this.query) {
// submits only if query has some changes
return;
}
let promise = this.options.onSubmit();
if (!promise || !(promise.then instanceof Function)) {
return;
}
this._lastSubmittedQuery = this.query;
this._loading = true;
promise.then((x) => {
this._loading = false;
if (!x) {
return;
}
// if wrapped with DataSourceResult<T>
// then uses `queryLanguage`
// otherwise result is assumed to be QueryLanguageFilterResult<T>
let result = x.queryLanguage || x;
this.errors = result.errors || [];
}, () => this._loading = false);
}
createOptions() {
let o = this.options;
o.inlineButton = o.inlineButton === undefined ? true : o.inlineButton;
o.inlineButtonText = o.inlineButtonText || 'Apply';
o.submitOnFocusOut = o.submitOnFocusOut === undefined ? false : o.submitOnFocusOut;
o.onSubmit = o.onSubmit || Utils.noop;
}
validateOptions() {
if (!this.options) {
throw new Error('`options` attribute is required.');
}
}
autoComplete() {
let result = this.autoCompletionResult;
let current = result.Completions[this.selectedCompletionIndex];
let newQuery = this.query.substr(0, result.StartPosition);
newQuery += current.Text;
let caretPosition = newQuery.length;
newQuery += this.query.substr(result.StartPosition + result.Length);
this.query = newQuery;
this.hideCompletions();
DomUtils.setCaretPosition(this._queryInputElement, caretPosition);
}
anyCompletion() {
if (!this.autoCompletionResult || !this.autoCompletionResult.Completions) {
return false;
}
return this.autoCompletionResult.Completions.length != 0;
}
hideCompletions() {
this.selectedCompletionIndex = 0;
if (this.autoCompletionResult)
this.autoCompletionResult.Completions = [];
}
select(completion: IAutoCompletionRow) {
this.selectedCompletionIndex = this.autoCompletionResult.Completions.indexOf(completion);
}
selectNext() {
if (this.selectedCompletionIndex == this.autoCompletionResult.Completions.length - 1) {
this.selectedCompletionIndex = 0;
return;
}
this.selectedCompletionIndex++;
}
selectPrevious() {
if (this.selectedCompletionIndex == 0) {
this.selectedCompletionIndex = this.autoCompletionResult.Completions.length - 1;
return;
}
this.selectedCompletionIndex--;
}
<|fim▁hole|> return;
}
let promise = undefined;
let params = {
query: this.query,
caretPosition: caretPosition,
skip: 0
}
let func = Utils.createReadFunction(this.options.autoComplete, {
allowData: false,
dataMissingError: '`autoComplete` has to be either url or a function.',
shouldReturnUrlOrPromiseError: '`autoComplete` function should return url or promise.'
});
// TODO: race condition! only last one should resolve
func(params).then((x: IAutoCompletionResult) => {
this.selectedCompletionIndex = 0;
this.autoCompletionResult = x;
});
}
onCompletionClick(ev) {
Utils.preventDefaultAndPropagation(ev);
this.autoComplete();
}
private registerInputHandlers() {
let isInputClick = false;
this._subs.push(DomUtils.addEventListener(this._queryInputElement, "keyup", (ev: KeyboardEvent) => {
switch (ev.which) {
case 37: // Left
case 39: // Right
case 36: // Home
case 35: // End
this.refreshCompletions();
break;
case 38: // Up
case 40: // Down
if (!this.anyCompletion()) {
this.refreshCompletions();
}
break;
case 27: // Esc
this.hideCompletions();
break;
case 16: // Shift
case 17: // Ctrl
case 18: // Alt
case 255: // Fn
case 13: // Enter
case 9: // Tab
break;
default:
this.refreshCompletions();
break;
}
}));
this._subs.push(DomUtils.addEventListener(this._queryInputElement, "keydown", (ev: KeyboardEvent) => {
if (!this.anyCompletion()) {
if (ev.which === 13) { // Enter
this.submit();
Utils.preventDefaultAndPropagation(ev);
}
return;
}
switch (ev.which) {
case 38: // Up
this.selectPrevious();
Utils.preventDefaultAndPropagation(ev);
break;
case 40: // Down
this.selectNext();
Utils.preventDefaultAndPropagation(ev);
break;
case 13: // Enter
case 9: // Tab
this.autoComplete();
Utils.preventDefaultAndPropagation(ev);
break;
}
}));
this._subs.push(DomUtils.addEventListener(this._queryInputElement, "mouseup", (ev: KeyboardEvent) => {
this.refreshCompletions();
}));
this._subs.push(DomUtils.addEventListener(this._queryInputElement, "mousedown", (ev: KeyboardEvent) => {
isInputClick = true;
}));
this._subs.push(DomUtils.addEventListener(this._queryInputElement, "focus", (ev) => {
if (!isInputClick && !this._preventFromFocusOut) {
this.refreshCompletions();
}
isInputClick = false;
}));
this._subs.push(DomUtils.addEventListener(this._queryInputElement, "blur", (ev) => {
if (this._preventFromFocusOut) {
Utils.preventDefaultAndPropagation(ev);
return;
}
this.hideCompletions();
isInputClick = false;
if (this.options.submitOnFocusOut) {
this.submit();
}
}));
}
}
export interface IQueryLanguageOptions {
autoComplete?: ((IAutoCompletionParams) => any) | string;
inlineButton?: boolean;
inlineButtonText?: string;
submitOnFocusOut?: boolean;
onSubmit?: () => any;
}
export interface IAutoCompletionParams {
query: string,
caretPosition: number,
skip: number
}
export interface IAutoCompletionResult {
StartPosition: number,
Length: number,
Completions: IAutoCompletionRow[],
IsNextPageAvailable: boolean,
Errors: string[],
HasErrors: boolean
}
export interface IAutoCompletionRow {
Text: string,
Group: string
}<|fim▁end|> | refreshCompletions(caretPosition = DomUtils.getCaretPosition(this._queryInputElement)) {
// TODO: debaunce
if (!this.options.autoComplete) { |
<|file_name|>FormControl.test.js<|end_file_name|><|fim▁begin|>import * as React from 'react';
import { expect } from 'chai';
import { spy } from 'sinon';
import { createMount, describeConformanceV5, act, createClientRender } from 'test/utils';
import FormControl, { formControlClasses as classes } from '@material-ui/core/FormControl';
import Input from '@material-ui/core/Input';
import Select from '@material-ui/core/Select';
import useFormControl from './useFormControl';
describe('<FormControl />', () => {
const render = createClientRender();
const mount = createMount();
function TestComponent(props) {
const context = useFormControl();
React.useEffect(() => {
props.contextCallback(context);
});
return null;
}
describeConformanceV5(<FormControl />, () => ({
classes,
inheritComponent: 'div',
render,
mount,
refInstanceof: window.HTMLDivElement,
testComponentPropWith: 'fieldset',
muiName: 'MuiFormControl',
testVariantProps: { margin: 'dense' },
skip: ['componentsProp'],
}));
describe('initial state', () => {
it('should have no margin', () => {
const { container } = render(<FormControl />);
const root = container.firstChild;
expect(root).not.to.have.class(classes.marginNormal);
expect(root).not.to.have.class(classes.sizeSmall);
});
it('can have the margin normal class', () => {
const { container } = render(<FormControl margin="normal" />);
const root = container.firstChild;
expect(root).to.have.class(classes.marginNormal);
expect(root).not.to.have.class(classes.sizeSmall);
});
it('can have the margin dense class', () => {
const { container } = render(<FormControl margin="dense" />);
const root = container.firstChild;
expect(root).to.have.class(classes.marginDense);
expect(root).not.to.have.class(classes.marginNormal);
});
it('should not be filled initially', () => {
const readContext = spy();
render(
<FormControl>
<TestComponent contextCallback={readContext} />
</FormControl>,
);
expect(readContext.args[0][0]).to.have.property('filled', false);
});
it('should not be focused initially', () => {
const readContext = spy();
render(
<FormControl>
<TestComponent contextCallback={readContext} />
</FormControl>,
);
expect(readContext.args[0][0]).to.have.property('focused', false);
});
});
describe('prop: required', () => {
it('should not apply it to the DOM', () => {
const { container } = render(<FormControl required />);
expect(container.firstChild).not.to.have.attribute('required');
});
});
describe('prop: disabled', () => {
it('will be unfocused if it gets disabled', () => {
const readContext = spy();
const { container, setProps } = render(
<FormControl>
<Input />
<TestComponent contextCallback={readContext} />
</FormControl>,
);
expect(readContext.args[0][0]).to.have.property('focused', false);
act(() => {
container.querySelector('input').focus();
});
expect(readContext.args[1][0]).to.have.property('focused', true);
setProps({ disabled: true });
expect(readContext.args[2][0]).to.have.property('focused', false);
});
});
describe('prop: focused', () => {
it('should display input in focused state', () => {
const readContext = spy();
const { container } = render(
<FormControl focused>
<Input />
<TestComponent contextCallback={readContext} />
</FormControl>,
);
expect(readContext.args[0][0]).to.have.property('focused', true);
container.querySelector('input').blur();
expect(readContext.args[0][0]).to.have.property('focused', true);
});
it('ignores focused when disabled', () => {
const readContext = spy();
render(
<FormControl focused disabled>
<Input />
<TestComponent contextCallback={readContext} />
</FormControl>,
);
expect(readContext.args[0][0]).to.include({ disabled: true, focused: false });
});
});
describe('input', () => {
it('should be filled when a value is set', () => {
const readContext = spy();
render(
<FormControl>
<Input value="bar" />
<TestComponent contextCallback={readContext} />
</FormControl>,
);
expect(readContext.args[0][0]).to.have.property('filled', true);
});
it('should be filled when a defaultValue is set', () => {
const readContext = spy();
render(
<FormControl>
<Input defaultValue="bar" />
<TestComponent contextCallback={readContext} />
</FormControl>,
);
expect(readContext.args[0][0]).to.have.property('filled', true);
});
it('should not be adornedStart with an endAdornment', () => {
const readContext = spy();
render(
<FormControl>
<Input endAdornment={<div />} />
<TestComponent contextCallback={readContext} />
</FormControl>,<|fim▁hole|> expect(readContext.args[0][0]).to.have.property('adornedStart', false);
});
it('should be adornedStar with a startAdornment', () => {
const readContext = spy();
render(
<FormControl>
<Input startAdornment={<div />} />
<TestComponent contextCallback={readContext} />
</FormControl>,
);
expect(readContext.args[0][0]).to.have.property('adornedStart', true);
});
});
describe('select', () => {
it('should not be adorned without a startAdornment', () => {
const readContext = spy();
render(
<FormControl>
<Select value="" />
<TestComponent contextCallback={readContext} />
</FormControl>,
);
expect(readContext.args[0][0]).to.have.property('adornedStart', false);
});
it('should be adorned with a startAdornment', () => {
const readContext = spy();
render(
<FormControl>
<Select value="" input={<Input startAdornment={<div />} />} />
<TestComponent contextCallback={readContext} />
</FormControl>,
);
expect(readContext.args[0][0].adornedStart, true);
});
});
describe('useFormControl', () => {
const FormController = React.forwardRef((_, ref) => {
const formControl = useFormControl();
React.useImperativeHandle(ref, () => formControl, [formControl]);
return null;
});
const FormControlled = React.forwardRef(function FormControlled(props, ref) {
return (
<FormControl {...props}>
<FormController ref={ref} />
</FormControl>
);
});
describe('from props', () => {
it('should have the required prop from the instance', () => {
const formControlRef = React.createRef();
const { setProps } = render(<FormControlled ref={formControlRef} />);
expect(formControlRef.current).to.have.property('required', false);
setProps({ required: true });
expect(formControlRef.current).to.have.property('required', true);
});
it('should have the error prop from the instance', () => {
const formControlRef = React.createRef();
const { setProps } = render(<FormControlled ref={formControlRef} />);
expect(formControlRef.current).to.have.property('error', false);
setProps({ error: true });
expect(formControlRef.current).to.have.property('error', true);
});
it('should have the margin prop from the instance', () => {
const formControlRef = React.createRef();
const { setProps } = render(<FormControlled ref={formControlRef} />);
expect(formControlRef.current).to.have.property('size', 'medium');
setProps({ size: 'small' });
expect(formControlRef.current).to.have.property('size', 'small');
});
it('should have the fullWidth prop from the instance', () => {
const formControlRef = React.createRef();
const { setProps } = render(<FormControlled ref={formControlRef} />);
expect(formControlRef.current).to.have.property('fullWidth', false);
setProps({ fullWidth: true });
expect(formControlRef.current).to.have.property('fullWidth', true);
});
});
describe('callbacks', () => {
describe('onFilled', () => {
it('should set the filled state', () => {
const formControlRef = React.createRef();
render(<FormControlled ref={formControlRef} />);
expect(formControlRef.current).to.have.property('filled', false);
act(() => {
formControlRef.current.onFilled();
});
expect(formControlRef.current).to.have.property('filled', true);
act(() => {
formControlRef.current.onFilled();
});
expect(formControlRef.current).to.have.property('filled', true);
});
});
describe('onEmpty', () => {
it('should clean the filled state', () => {
const formControlRef = React.createRef();
render(<FormControlled ref={formControlRef} />);
act(() => {
formControlRef.current.onFilled();
});
expect(formControlRef.current).to.have.property('filled', true);
act(() => {
formControlRef.current.onEmpty();
});
expect(formControlRef.current).to.have.property('filled', false);
act(() => {
formControlRef.current.onEmpty();
});
expect(formControlRef.current).to.have.property('filled', false);
});
});
describe('handleFocus', () => {
it('should set the focused state', () => {
const formControlRef = React.createRef();
render(<FormControlled ref={formControlRef} />);
expect(formControlRef.current).to.have.property('focused', false);
act(() => {
formControlRef.current.onFocus();
});
expect(formControlRef.current).to.have.property('focused', true);
act(() => {
formControlRef.current.onFocus();
});
expect(formControlRef.current).to.have.property('focused', true);
});
});
describe('handleBlur', () => {
it('should clear the focused state', () => {
const formControlRef = React.createRef();
render(<FormControlled ref={formControlRef} />);
expect(formControlRef.current).to.have.property('focused', false);
act(() => {
formControlRef.current.onFocus();
});
expect(formControlRef.current).to.have.property('focused', true);
act(() => {
formControlRef.current.onBlur();
});
expect(formControlRef.current).to.have.property('focused', false);
act(() => {
formControlRef.current.onBlur();
});
expect(formControlRef.current).to.have.property('focused', false);
});
});
});
});
});<|fim▁end|> | ); |
<|file_name|>issue_detail_broker.py<|end_file_name|><|fim▁begin|>from ..broker import Broker
class IssueDetailBroker(Broker):
controller = "issue_details"
def show(self, **kwargs):
"""Shows the details for the specified issue detail.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param IssueID: The internal NetMRI identifier for this issue instance.
:type IssueID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of issue detail methods. The listed methods will be called on each issue detail returned and included in the output. Available methods are: data_source, device, interface, iprg, vlan, subnet, alternate_device, issue_desc, title, severity, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, interface, iprg, vlan, subnet, alternate_device, issue_desc.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return issue_detail: The issue detail identified by the specified IssueID.
:rtype issue_detail: IssueDetail
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available issue details. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param BatchID: The internal NetMRI identifier for the job execution batch to which this issue applies, if relevant.
:type BatchID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param BatchID: The internal NetMRI identifier for the job execution batch to which this issue applies, if relevant.
:type BatchID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device to which this issue applies.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device to which this issue applies.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param EndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type EndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param EndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type EndTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the interface to which this issue applies, if relevant.
:type InterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the interface to which this issue applies, if relevant.
:type InterfaceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgID: The internal NetMRI identifier for the HSRP or VRRP group to which this issue applies, if relevant.
:type IprgID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgID: The internal NetMRI identifier for the HSRP or VRRP group to which this issue applies, if relevant.
:type IprgID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IssueID: The internal NetMRI identifier for this issue instance.
:type IssueID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IssueID: The internal NetMRI identifier for this issue instance.
:type IssueID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IssueTypeID: An internal NetMRI identifier for the type of this issue.
:type IssueTypeID: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IssueTypeID: An internal NetMRI identifier for the type of this issue.
:type IssueTypeID: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SubnetID: The internal NetMRI identifier for the subnet to which this issue applies, if relevant.
:type SubnetID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SubnetID: The internal NetMRI identifier for the subnet to which this issue applies, if relevant.
:type SubnetID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param Timestamp: The date and time this record was collected or calculated.
:type Timestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Timestamp: The date and time this record was collected or calculated.
:type Timestamp: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanID: The internal NetMRI identifier of the VLAN to which this issue applies, if relevant.
:type VlanID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanID: The internal NetMRI identifier of the VLAN to which this issue applies, if relevant.
:type VlanID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the issue details as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of issue detail methods. The listed methods will be called on each issue detail returned and included in the output. Available methods are: data_source, device, interface, iprg, vlan, subnet, alternate_device, issue_desc, title, severity, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, interface, iprg, vlan, subnet, alternate_device, issue_desc.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` IssueID
:param sort: The data field(s) to use for sorting the output. Default is IssueID. Valid values are DataSourceID, IssueID, StartTime, EndTime, ChangedCols, Timestamp, IssueTypeID, DetailID, DeviceID, InterfaceID, VlanID, SubnetID, IprgID, BatchID, AltDeviceID, Criteria, IssueValue, Component, SeverityID, Correctness, Stability, SuppressedInd.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each IssueDetail. Valid values are DataSourceID, IssueID, StartTime, EndTime, ChangedCols, Timestamp, IssueTypeID, DetailID, DeviceID, InterfaceID, VlanID, SubnetID, IprgID, BatchID, AltDeviceID, Criteria, IssueValue, Component, SeverityID, Correctness, Stability, SuppressedInd. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return issue_details: An array of the IssueDetail objects that match the specified input criteria.
:rtype issue_details: Array of IssueDetail
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available issue details matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param AltDeviceID: The internal NetMRI identifier of the alternate device (such as a neighbor) involved in this issue, if relevant.
:type AltDeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param AltDeviceID: The internal NetMRI identifier of the alternate device (such as a neighbor) involved in this issue, if relevant.
:type AltDeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param BatchID: The internal NetMRI identifier for the job execution batch to which this issue applies, if relevant.
:type BatchID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param BatchID: The internal NetMRI identifier for the job execution batch to which this issue applies, if relevant.
:type BatchID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ChangedCols: The fields that changed between this revision of the record and the previous revision.
:type ChangedCols: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ChangedCols: The fields that changed between this revision of the record and the previous revision.
:type ChangedCols: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param Component: The issue component (Devices, Configuration, VLANs, etc.).
:type Component: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Component: The issue component (Devices, Configuration, VLANs, etc.).
:type Component: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param Correctness: The correctness contribution for this issue.
:type Correctness: Float
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Correctness: The correctness contribution for this issue.
:type Correctness: Array of Float
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param Criteria: The criteria value for this issue at the time it was raised.
:type Criteria: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Criteria: The criteria value for this issue at the time it was raised.
:type Criteria: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that raised this issue.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that raised this issue.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DetailID: A unique identifier for this issue instance.
:type DetailID: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DetailID: A unique identifier for this issue instance.
:type DetailID: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device to which this issue applies.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device to which this issue applies.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param EndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type EndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param EndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type EndTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the interface to which this issue applies, if relevant.
:type InterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the interface to which this issue applies, if relevant.
:type InterfaceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgID: The internal NetMRI identifier for the HSRP or VRRP group to which this issue applies, if relevant.
:type IprgID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgID: The internal NetMRI identifier for the HSRP or VRRP group to which this issue applies, if relevant.
:type IprgID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IssueID: The internal NetMRI identifier for this issue instance.
:type IssueID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IssueID: The internal NetMRI identifier for this issue instance.
:type IssueID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IssueTypeID: An internal NetMRI identifier for the type of this issue.
:type IssueTypeID: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IssueTypeID: An internal NetMRI identifier for the type of this issue.
:type IssueTypeID: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IssueValue: The meaning of this field varies based upon the specific issue.
:type IssueValue: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IssueValue: The meaning of this field varies based upon the specific issue.
:type IssueValue: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SeverityID: The issue severity ID (1 = Error, 2 = Warning, 3 = Info). Useful for sorting.
:type SeverityID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SeverityID: The issue severity ID (1 = Error, 2 = Warning, 3 = Info). Useful for sorting.
:type SeverityID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param Stability: The stability contribution for this issue.
:type Stability: Float
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Stability: The stability contribution for this issue.
:type Stability: Array of Float
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StartTime: The date/time this issue instance was raised.
:type StartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StartTime: The date/time this issue instance was raised.
:type StartTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SubnetID: The internal NetMRI identifier for the subnet to which this issue applies, if relevant.
:type SubnetID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SubnetID: The internal NetMRI identifier for the subnet to which this issue applies, if relevant.
:type SubnetID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SuppressedInd: A flag indicating whether this issue is suppressed or not.
:type SuppressedInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SuppressedInd: A flag indicating whether this issue is suppressed or not.
:type SuppressedInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param Timestamp: The date and time this record was collected or calculated.
:type Timestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Timestamp: The date and time this record was collected or calculated.
:type Timestamp: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanID: The internal NetMRI identifier of the VLAN to which this issue applies, if relevant.
:type VlanID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanID: The internal NetMRI identifier of the VLAN to which this issue applies, if relevant.
:type VlanID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the issue details as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of issue detail methods. The listed methods will be called on each issue detail returned and included in the output. Available methods are: data_source, device, interface, iprg, vlan, subnet, alternate_device, issue_desc, title, severity, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, interface, iprg, vlan, subnet, alternate_device, issue_desc.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` IssueID
:param sort: The data field(s) to use for sorting the output. Default is IssueID. Valid values are DataSourceID, IssueID, StartTime, EndTime, ChangedCols, Timestamp, IssueTypeID, DetailID, DeviceID, InterfaceID, VlanID, SubnetID, IprgID, BatchID, AltDeviceID, Criteria, IssueValue, Component, SeverityID, Correctness, Stability, SuppressedInd.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each IssueDetail. Valid values are DataSourceID, IssueID, StartTime, EndTime, ChangedCols, Timestamp, IssueTypeID, DetailID, DeviceID, InterfaceID, VlanID, SubnetID, IprgID, BatchID, AltDeviceID, Criteria, IssueValue, Component, SeverityID, Correctness, Stability, SuppressedInd. If empty or omitted, all attributes will be returned.
:type select: Array
<|fim▁hole|> | ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against issue details, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: AltDeviceID, BatchID, ChangedCols, Component, Correctness, Criteria, DataSourceID, DetailID, DeviceID, EndTime, InterfaceID, IprgID, IssueID, IssueTypeID, IssueValue, SeverityID, Stability, StartTime, SubnetID, SuppressedInd, Timestamp, VlanID.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return issue_details: An array of the IssueDetail objects that match the specified input criteria.
:rtype issue_details: Array of IssueDetail
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available issue details matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: AltDeviceID, BatchID, ChangedCols, Component, Correctness, Criteria, DataSourceID, DetailID, DeviceID, EndTime, InterfaceID, IprgID, IssueID, IssueTypeID, IssueValue, SeverityID, Stability, StartTime, SubnetID, SuppressedInd, Timestamp, VlanID.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_AltDeviceID: The operator to apply to the field AltDeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. AltDeviceID: The internal NetMRI identifier of the alternate device (such as a neighbor) involved in this issue, if relevant. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_AltDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_AltDeviceID: If op_AltDeviceID is specified, the field named in this input will be compared to the value in AltDeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_AltDeviceID must be specified if op_AltDeviceID is specified.
:type val_f_AltDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_AltDeviceID: If op_AltDeviceID is specified, this value will be compared to the value in AltDeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_AltDeviceID must be specified if op_AltDeviceID is specified.
:type val_c_AltDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_BatchID: The operator to apply to the field BatchID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. BatchID: The internal NetMRI identifier for the job execution batch to which this issue applies, if relevant. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_BatchID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_BatchID: If op_BatchID is specified, the field named in this input will be compared to the value in BatchID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_BatchID must be specified if op_BatchID is specified.
:type val_f_BatchID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_BatchID: If op_BatchID is specified, this value will be compared to the value in BatchID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_BatchID must be specified if op_BatchID is specified.
:type val_c_BatchID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ChangedCols: The operator to apply to the field ChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ChangedCols: If op_ChangedCols is specified, the field named in this input will be compared to the value in ChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ChangedCols must be specified if op_ChangedCols is specified.
:type val_f_ChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ChangedCols: If op_ChangedCols is specified, this value will be compared to the value in ChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ChangedCols must be specified if op_ChangedCols is specified.
:type val_c_ChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_Component: The operator to apply to the field Component. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. Component: The issue component (Devices, Configuration, VLANs, etc.). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_Component: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_Component: If op_Component is specified, the field named in this input will be compared to the value in Component using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_Component must be specified if op_Component is specified.
:type val_f_Component: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_Component: If op_Component is specified, this value will be compared to the value in Component using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_Component must be specified if op_Component is specified.
:type val_c_Component: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_Correctness: The operator to apply to the field Correctness. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. Correctness: The correctness contribution for this issue. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_Correctness: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_Correctness: If op_Correctness is specified, the field named in this input will be compared to the value in Correctness using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_Correctness must be specified if op_Correctness is specified.
:type val_f_Correctness: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_Correctness: If op_Correctness is specified, this value will be compared to the value in Correctness using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_Correctness must be specified if op_Correctness is specified.
:type val_c_Correctness: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_Criteria: The operator to apply to the field Criteria. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. Criteria: The criteria value for this issue at the time it was raised. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_Criteria: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_Criteria: If op_Criteria is specified, the field named in this input will be compared to the value in Criteria using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_Criteria must be specified if op_Criteria is specified.
:type val_f_Criteria: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_Criteria: If op_Criteria is specified, this value will be compared to the value in Criteria using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_Criteria must be specified if op_Criteria is specified.
:type val_c_Criteria: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that raised this issue. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DetailID: The operator to apply to the field DetailID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DetailID: A unique identifier for this issue instance. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DetailID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DetailID: If op_DetailID is specified, the field named in this input will be compared to the value in DetailID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DetailID must be specified if op_DetailID is specified.
:type val_f_DetailID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DetailID: If op_DetailID is specified, this value will be compared to the value in DetailID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DetailID must be specified if op_DetailID is specified.
:type val_c_DetailID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device to which this issue applies. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_EndTime: The operator to apply to the field EndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. EndTime: The ending effective time of this revision of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_EndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_EndTime: If op_EndTime is specified, the field named in this input will be compared to the value in EndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_EndTime must be specified if op_EndTime is specified.
:type val_f_EndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_EndTime: If op_EndTime is specified, this value will be compared to the value in EndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_EndTime must be specified if op_EndTime is specified.
:type val_c_EndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_InterfaceID: The operator to apply to the field InterfaceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. InterfaceID: The internal NetMRI identifier for the interface to which this issue applies, if relevant. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_InterfaceID: If op_InterfaceID is specified, the field named in this input will be compared to the value in InterfaceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_InterfaceID must be specified if op_InterfaceID is specified.
:type val_f_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_InterfaceID: If op_InterfaceID is specified, this value will be compared to the value in InterfaceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_InterfaceID must be specified if op_InterfaceID is specified.
:type val_c_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgID: The operator to apply to the field IprgID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgID: The internal NetMRI identifier for the HSRP or VRRP group to which this issue applies, if relevant. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgID: If op_IprgID is specified, the field named in this input will be compared to the value in IprgID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgID must be specified if op_IprgID is specified.
:type val_f_IprgID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgID: If op_IprgID is specified, this value will be compared to the value in IprgID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgID must be specified if op_IprgID is specified.
:type val_c_IprgID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IssueID: The operator to apply to the field IssueID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IssueID: The internal NetMRI identifier for this issue instance. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IssueID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IssueID: If op_IssueID is specified, the field named in this input will be compared to the value in IssueID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IssueID must be specified if op_IssueID is specified.
:type val_f_IssueID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IssueID: If op_IssueID is specified, this value will be compared to the value in IssueID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IssueID must be specified if op_IssueID is specified.
:type val_c_IssueID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IssueTypeID: The operator to apply to the field IssueTypeID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IssueTypeID: An internal NetMRI identifier for the type of this issue. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IssueTypeID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IssueTypeID: If op_IssueTypeID is specified, the field named in this input will be compared to the value in IssueTypeID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IssueTypeID must be specified if op_IssueTypeID is specified.
:type val_f_IssueTypeID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IssueTypeID: If op_IssueTypeID is specified, this value will be compared to the value in IssueTypeID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IssueTypeID must be specified if op_IssueTypeID is specified.
:type val_c_IssueTypeID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IssueValue: The operator to apply to the field IssueValue. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IssueValue: The meaning of this field varies based upon the specific issue. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IssueValue: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IssueValue: If op_IssueValue is specified, the field named in this input will be compared to the value in IssueValue using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IssueValue must be specified if op_IssueValue is specified.
:type val_f_IssueValue: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IssueValue: If op_IssueValue is specified, this value will be compared to the value in IssueValue using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IssueValue must be specified if op_IssueValue is specified.
:type val_c_IssueValue: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SeverityID: The operator to apply to the field SeverityID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SeverityID: The issue severity ID (1 = Error, 2 = Warning, 3 = Info). Useful for sorting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SeverityID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SeverityID: If op_SeverityID is specified, the field named in this input will be compared to the value in SeverityID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SeverityID must be specified if op_SeverityID is specified.
:type val_f_SeverityID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SeverityID: If op_SeverityID is specified, this value will be compared to the value in SeverityID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SeverityID must be specified if op_SeverityID is specified.
:type val_c_SeverityID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_Stability: The operator to apply to the field Stability. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. Stability: The stability contribution for this issue. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_Stability: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_Stability: If op_Stability is specified, the field named in this input will be compared to the value in Stability using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_Stability must be specified if op_Stability is specified.
:type val_f_Stability: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_Stability: If op_Stability is specified, this value will be compared to the value in Stability using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_Stability must be specified if op_Stability is specified.
:type val_c_Stability: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StartTime: The operator to apply to the field StartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StartTime: The date/time this issue instance was raised. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StartTime: If op_StartTime is specified, the field named in this input will be compared to the value in StartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StartTime must be specified if op_StartTime is specified.
:type val_f_StartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StartTime: If op_StartTime is specified, this value will be compared to the value in StartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StartTime must be specified if op_StartTime is specified.
:type val_c_StartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SubnetID: The operator to apply to the field SubnetID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SubnetID: The internal NetMRI identifier for the subnet to which this issue applies, if relevant. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SubnetID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SubnetID: If op_SubnetID is specified, the field named in this input will be compared to the value in SubnetID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SubnetID must be specified if op_SubnetID is specified.
:type val_f_SubnetID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SubnetID: If op_SubnetID is specified, this value will be compared to the value in SubnetID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SubnetID must be specified if op_SubnetID is specified.
:type val_c_SubnetID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SuppressedInd: The operator to apply to the field SuppressedInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SuppressedInd: A flag indicating whether this issue is suppressed or not. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SuppressedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SuppressedInd: If op_SuppressedInd is specified, the field named in this input will be compared to the value in SuppressedInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SuppressedInd must be specified if op_SuppressedInd is specified.
:type val_f_SuppressedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SuppressedInd: If op_SuppressedInd is specified, this value will be compared to the value in SuppressedInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SuppressedInd must be specified if op_SuppressedInd is specified.
:type val_c_SuppressedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_Timestamp: The operator to apply to the field Timestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. Timestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_Timestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_Timestamp: If op_Timestamp is specified, the field named in this input will be compared to the value in Timestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_Timestamp must be specified if op_Timestamp is specified.
:type val_f_Timestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_Timestamp: If op_Timestamp is specified, this value will be compared to the value in Timestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_Timestamp must be specified if op_Timestamp is specified.
:type val_c_Timestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VlanID: The operator to apply to the field VlanID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VlanID: The internal NetMRI identifier of the VLAN to which this issue applies, if relevant. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VlanID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VlanID: If op_VlanID is specified, the field named in this input will be compared to the value in VlanID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VlanID must be specified if op_VlanID is specified.
:type val_f_VlanID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VlanID: If op_VlanID is specified, this value will be compared to the value in VlanID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VlanID must be specified if op_VlanID is specified.
:type val_c_VlanID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the issue details as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of issue detail methods. The listed methods will be called on each issue detail returned and included in the output. Available methods are: data_source, device, interface, iprg, vlan, subnet, alternate_device, issue_desc, title, severity, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, interface, iprg, vlan, subnet, alternate_device, issue_desc.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` IssueID
:param sort: The data field(s) to use for sorting the output. Default is IssueID. Valid values are DataSourceID, IssueID, StartTime, EndTime, ChangedCols, Timestamp, IssueTypeID, DetailID, DeviceID, InterfaceID, VlanID, SubnetID, IprgID, BatchID, AltDeviceID, Criteria, IssueValue, Component, SeverityID, Correctness, Stability, SuppressedInd.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each IssueDetail. Valid values are DataSourceID, IssueID, StartTime, EndTime, ChangedCols, Timestamp, IssueTypeID, DetailID, DeviceID, InterfaceID, VlanID, SubnetID, IprgID, BatchID, AltDeviceID, Criteria, IssueValue, Component, SeverityID, Correctness, Stability, SuppressedInd. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return issue_details: An array of the IssueDetail objects that match the specified input criteria.
:rtype issue_details: Array of IssueDetail
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def direct_data(self, **kwargs):
"""Return data for a given issue.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param column_names: The names of columns for which we want the content.
:type column_names: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param start_time: None
:type start_time: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param end_time: None
:type end_time: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param device_id: None
:type device_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param issue_type_id: None
:type issue_type_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param issue_id: None
:type issue_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param group_ids: None
:type group_ids: Array
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param issue_id: Id of the issue.
:type issue_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param mode: None
:type mode: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("direct_data"), kwargs)<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|>from trifle.server.views.api import api
from trifle.server.views.monitor import monitor
from trifle.server.views.configure import configure<|fim▁end|> | from trifle.server.views.frontend import frontend |
<|file_name|>TAARenderPass.d.ts<|end_file_name|><|fim▁begin|>import {<|fim▁hole|>} from '../../../src/Three';
import { SSAARenderPass } from './SSAARenderPass';
export class TAARenderPass extends SSAARenderPass {
constructor(scene: Scene, camera: Camera, clearColor: Color | string | number, clearAlpha: number);
accumulate: boolean;
}<|fim▁end|> | Scene,
Camera,
Color |
<|file_name|>issue-28344.rs<|end_file_name|><|fim▁begin|>use std::ops::BitXor;
fn main() {
let x: u8 = BitXor::bitor(0 as u8, 0 as u8);
//~^ ERROR must be specified
//~| no function or associated item named
let g = BitXor::bitor;<|fim▁hole|> //~^ ERROR must be specified
//~| no function or associated item named
}<|fim▁end|> | |
<|file_name|>studio-bridge-fullshot-image.js<|end_file_name|><|fim▁begin|>(function ($) {
function getCsrfTokenForFullShotImage(callback) {
$
.get(Drupal.url('rest/session/token'))
.done(function (data) {
var csrfToken = data;
callback(csrfToken);
});
}
<|fim▁hole|>
$.ajax({
url: Drupal.url('file/' + fid + '?_format=hal_json'),
method: 'PATCH',
headers: {
'Content-Type': 'application/hal+json',
'X-CSRF-Token': csrfToken
},
data: JSON.stringify(file),
success: function (file) {
//console.log(node);
//document.getElementById('msg-up').innerHTML = 'Image Fullshot started!';
swal({
title: "Full Shot",
text: "Image has been selected as full shot. Scan next ID",
type: "success",
showCancelButton: false,
confirmButtonColor: "#DD6B55",
confirmButtonText: "OK",
closeOnConfirm: true
});
},
error: function(){
swal({
title: "Full Shot",
text: "There was an error, please try again.",
type: "error",
showCancelButton: false,
confirmButtonColor: "#DD6B55",
confirmButtonText: "OK",
closeOnConfirm: true
});
}
});
// setTimeout(function(){
// document.getElementById('msg-up').innerHTML = '';
// }, 3300);
}
/*
* tag value 1 means tag
* tag value 0 means undo tag
*
*/
function update_image_fullshot(tag,fidinput) {
var nid = fidinput;
var Node_imgs = {
_links: {
type: {
href: Drupal.url.toAbsolute(drupalSettings.path.baseUrl + 'rest/type/file/image')
}
},
// type: {
// target_id: 'products'
// },
field_full_shoot: {
value:tag
}
};
getCsrfTokenForFullShotImage(function (csrfToken) {
if (nid) {
patchImageFullShot(csrfToken, Node_imgs, nid);
}else{
alert('Node product found, pls refresh the page.');
}
});
}
$(".studio-img-fullshot").click(function () {
var id = $(this).parents('span').attr('id');
console.log('fullshot');
update_image_fullshot(1,id);
});
$(document).on("click",".studio-img-fullshot",function(){
var id = $(this).parents('span').attr('id');
console.log('fullshot');
update_image_fullshot(1,id);
});
})(jQuery);<|fim▁end|> | function patchImageFullShot(csrfToken, file, fid) {
//document.getElementById('msg-up').innerHTML = 'Image marked as fullshot ....'; |
<|file_name|>scan.js<|end_file_name|><|fim▁begin|>/*
Copyright (c) 2004-2012, The Dojo Foundation All Rights Reserved.
Available via Academic Free License >= 2.1 OR the modified BSD license.
see: http://dojotoolkit.org/license for details
*/
if(!dojo._hasResource["dojox.lang.functional.scan"]){ //_hasResource checks added by build. Do not use _hasResource directly in your code.
dojo._hasResource["dojox.lang.functional.scan"] = true;
dojo.provide("dojox.lang.functional.scan");
dojo.require("dojox.lang.functional.lambda");
// This module adds high-level functions and related constructs:
// - "scan" family of functions
// Notes:
// - missing high-level functions are provided with the compatible API:
// scanl, scanl1, scanr, scanr1
// Defined methods:
// - take any valid lambda argument as the functional argument
// - operate on dense arrays
// - take a string as the array argument
// - take an iterator objects as the array argument (only scanl, and scanl1)
(function(){
var d = dojo, df = dojox.lang.functional, empty = {};
d.mixin(df, {
// classic reduce-class functions
scanl: function(/*Array|String|Object*/ a, /*Function|String|Array*/ f, /*Object*/ z, /*Object?*/ o){
// summary: repeatedly applies a binary function to an array from left
// to right using a seed value as a starting point; returns an array
// of values produced by foldl() at that point.
if(typeof a == "string"){ a = a.split(""); }
o = o || d.global; f = df.lambda(f);
var t, n, i;
if(d.isArray(a)){
// array
t = new Array((n = a.length) + 1);
t[0] = z;
for(i = 0; i < n; z = f.call(o, z, a[i], i, a), t[++i] = z);
}else if(typeof a.hasNext == "function" && typeof a.next == "function"){
// iterator
t = [z];
for(i = 0; a.hasNext(); t.push(z = f.call(o, z, a.next(), i++, a)));
}else{
// object/dictionary
t = [z];
for(i in a){
if(!(i in empty)){
t.push(z = f.call(o, z, a[i], i, a));
}
}
}
return t; // Array
},
scanl1: function(/*Array|String|Object*/ a, /*Function|String|Array*/ f, /*Object?*/ o){
// summary: repeatedly applies a binary function to an array from left
// to right; returns an array of values produced by foldl1() at that
// point.
if(typeof a == "string"){ a = a.split(""); }
o = o || d.global; f = df.lambda(f);
var t, n, z, first = true;
if(d.isArray(a)){
// array
t = new Array(n = a.length);
t[0] = z = a[0];
for(var i = 1; i < n; t[i] = z = f.call(o, z, a[i], i, a), ++i);
}else if(typeof a.hasNext == "function" && typeof a.next == "function"){
// iterator
if(a.hasNext()){
t = [z = a.next()];
for(var i = 1; a.hasNext(); t.push(z = f.call(o, z, a.next(), i++, a)));
}
}else{
// object/dictionary
for(var i in a){
if(!(i in empty)){
if(first){
t = [z = a[i]];
first = false;
}else{
t.push(z = f.call(o, z, a[i], i, a));
}
}
}
}
return t; // Array
},
scanr: function(/*Array|String*/ a, /*Function|String|Array*/ f, /*Object*/ z, /*Object?*/ o){
// summary: repeatedly applies a binary function to an array from right
// to left using a seed value as a starting point; returns an array
// of values produced by foldr() at that point.
if(typeof a == "string"){ a = a.split(""); }
o = o || d.global; f = df.lambda(f);
var n = a.length, t = new Array(n + 1), i = n;
t[n] = z;
for(; i > 0; --i, z = f.call(o, z, a[i], i, a), t[i] = z);
return t; // Array<|fim▁hole|> },
scanr1: function(/*Array|String*/ a, /*Function|String|Array*/ f, /*Object?*/ o){
// summary: repeatedly applies a binary function to an array from right
// to left; returns an array of values produced by foldr1() at that
// point.
if(typeof a == "string"){ a = a.split(""); }
o = o || d.global; f = df.lambda(f);
var n = a.length, t = new Array(n), z = a[n - 1], i = n - 1;
t[i] = z;
for(; i > 0; --i, z = f.call(o, z, a[i], i, a), t[i] = z);
return t; // Array
}
});
})();
}<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from .gameserver import Game<|fim▁hole|><|fim▁end|> | from .example import TicTacToe |
<|file_name|>test_parser.py<|end_file_name|><|fim▁begin|>import parser
import unittest
import sys
class TestVideoParser(unittest.TestCase):
def test_parse_video(self):
if sys.platform.startswith('win'):
path = '\\server\\Movies\\Brave (2007)\\Brave (2006).mkv'
else:
path = '/server/Movies/Brave (2007)/Brave (2006).mkv'
video_info = parser.parse_video(path)
self.assertEqual(video_info['name'], 'Brave')
self.assertEqual(video_info['container'], 'mkv')
self.assertEqual(video_info['year'], 2006)
class TestVideoStackParser(unittest.TestCase):
def test_parse_simple_stack(self):
files = (
'Bad Boys (2006) part1.mkv',
'Bad Boys (2006) part2.mkv',
'Bad Boys (2006) part3.mkv',
'Bad Boys (2006) part4.mkv',
'Bad Boys (2006)-trailer.mkv',
)
stack = parser.parse_video_stack(files)
print(stack)
self.assertEqual(len(stack), 1)
def test_parse_dual_stacks(self):
files = (
'Bad Boys (2006) part1.mkv',
'Bad Boys (2006) part2.mkv',
'Bad Boys (2006) part3.mkv',
'Bad Boys (2006) part4.mkv',
'Bad Boys (2006)-trailer.mkv',
'300 (2006) part1.mkv',
'300 (2006) part2.mkv',
'300 (2006) part3.mkv',
'300 (2006)-trailer.mkv'
)
stacks = parser.parse_video_stack(files)
for s in stacks:
print(s)
self.assertEqual(len(stacks), 2)
def test_dirty_names(self):
files = (
"Bad Boys (2006).part1.stv.unrated.multi.1080p.bluray.x264-rough.mkv",
"Bad Boys (2006).part2.stv.unrated.multi.1080p.bluray.x264-rough.mkv",
"Bad Boys (2006).part3.stv.unrated.multi.1080p.bluray.x264-rough.mkv",
"Bad Boys (2006).part4.stv.unrated.multi.1080p.bluray.x264-rough.mkv",
"Bad Boys (2006)-trailer.mkv"
)
stack = parser.parse_video_stack(files)
print(stack)
self.assertEqual(len(stack), 1)
#TestStackInfo(result.Stacks[0], "Bad Boys (2006).stv.unrated.multi.1080p.bluray.x264-rough", 4);
def test_parse_mixed_expressions(self):<|fim▁hole|> files = (
'Bad Boys (2006) part1.mkv',
'Bad Boys (2006) part2.mkv',
'Bad Boys (2006) part3.mkv',
'Bad Boys (2006) part4.mkv',
'Bad Boys (2006)-trailer.mkv',
'300 (2006) parta.mkv',
'300 (2006) partb.mkv',
'300 (2006) partc.mkv',
'300 (2006) partd.mkv',
'300 (2006)-trailer.mkv',
'300a.mkv',
'300b.mkv',
'300c.mkv',
'300-trailer.mkv'
)
stacks = parser.parse_video_stack(files)
for s in stacks:
print(s)
self.assertEqual(len(stacks), 3)
if __name__ == '__main__':
unittest.main()<|fim▁end|> | |
<|file_name|>libsoletta.so-gdb.py<|end_file_name|><|fim▁begin|># This file is part of the Soletta Project
#
# Copyright (C) 2015 Intel Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gdb
import re
## IMPORTANT NOTE:
#
# This file is a Python GDB script that is highly dependent on
# symbol names, even the internal functions and parameters.
#
# Whenever depending on a symbol, mark them in the source file
# so people know they have to adapt this file on changes.
## LOADING:
#
# This file should be auto-loaded by gdb if it is installed in GDB's
# auto-load directory and matches the installed libsoletta.so,
# including the final so-version.
#
# If soletta is installed to custom directory, then make sure GDB knows
# about this location and that the directory is marked as safe-path:
#
# (gdb) add-auto-load-scripts-directory ${soletta_prefix}/share/gdb/auto-load
# (gdb) add-auto-load-safe-path ${soletta_prefix}/share/gdb/auto-load<|fim▁hole|># It may be included directly if not auto-loaded:
#
# (gdb) source ${soletta_prefix}/share/gdb/auto-load/libsoletta.so-gdb.py
#
## Usage:
# commands start with 'sol_' prefix, then you can use 'apropos ^sol_' to
# filter commands in our namespace or tabl-completion.
# GDB's "help command" to get more information
defvalue_member_map = {
"string": "s",
"byte": "byte",
"boolean": "b",
"int": "i",
"float": "f",
"rgb": "rgb",
"direction_vector": "direction_vector",
}
def get_type_description(type):
try:
tdesc = type["description"]
if tdesc:
return tdesc.dereference()
except KeyError:
pass
return None
def get_node_type_description(node):
type = node["type"]
return get_type_description(type)
def _get_node_port_index_by_name(node, member, port_name):
tdesc = get_node_type_description(node)
if not tdesc:
return -1
array = tdesc[member]
if not array:
return -1
i = 0
while array[i]:
port = array[i]
if port["name"] and port["name"].string() == port_name:
return i
i += 1
return -1
def get_node_port_out_index_by_name(node, port_name):
return _get_node_port_index_by_name(node, "ports_out", port_name)
def get_node_port_in_index_by_name(node, port_name):
return _get_node_port_index_by_name(node, "ports_in", port_name)
def _get_node_port_name_by_index(node, member, port_index):
tdesc = get_node_type_description(node)
if not tdesc:
return None
array = tdesc[member]
if not array:
return None
i = 0
while array[i]:
if i == port_index:
port = array[i]
if port["name"]:
return port["name"].string()
return None
elif i > port_index:
break
i += 1
return None
def get_node_port_out_name_by_index(node, port_index):
return _get_node_port_name_by_index(node, "ports_out", port_index)
def get_node_port_in_name_by_index(node, port_index):
return _get_node_port_name_by_index(node, "ports_in", port_index)
class FlowTypePrinter(object):
"Print a 'struct sol_flow_node_type'"
def __init__(self, val):
self.val = val
self.port_in_type = gdb.lookup_type("struct sol_flow_port_type_in").const().pointer()
def display_hint(self):
return 'sol_flow_node_type'
def _port_description_to_string(self, index, port, port_type):
s = ("\n %d %s (%s)\n" \
" description: %s\n") % (
index,
port["name"].string(),
port["data_type"].string(),
port["description"].string())
if port_type["connect"]:
s += " connect(): %s\n" % (port_type["connect"],)
if port_type["disconnect"]:
s += " disconnect(): %s\n" % (port_type["disconnect"],)
if port_type.type == self.port_in_type and port_type["process"]:
s += " process(): %s\n" % (port_type["process"],)
return s
def _option_description_to_string(self, option):
data_type = option["data_type"].string()
defvalue_member = defvalue_member_map.get(data_type)
if not defvalue_member:
defvalue = ""
else:
defvalue = option["defvalue"][defvalue_member]
if data_type == "string":
if defvalue:
defvalue = defvalue.string()
else:
defvalue = "NULL"
defvalue = " (default=%s)" % (defvalue,)
return "\n %s(%s) \"%s\"%s," % (
option["name"].string(),
data_type,
option["description"].string(),
defvalue)
def _ports_description_to_string(self, array, get_port_type):
if not array:
return ""
i = 0
r = []
while array[i]:
port_type = get_port_type(i)
r.append(self._port_description_to_string(i, array[i], port_type))
i += 1
if i > 0:
r.append("\n ")
return "".join(r)
def _options_description_to_string(self, opts):
if not opts:
return ""
opts = opts.dereference()
array = opts["members"]
if not array:
return ""
i = 0
r = []
while array[i]["name"]:
r.append(self._option_description_to_string(array[i]))
i += 1
if i > 0:
r.append("\n ")
return "".join(r)
def to_string(self):
type = self.val
tdesc = get_type_description(type)
if tdesc:
get_port_in = gdb.parse_and_eval("sol_flow_node_type_get_port_in")
get_port_out = gdb.parse_and_eval("sol_flow_node_type_get_port_out")
p_type = type.address
ports_in = self._ports_description_to_string(tdesc["ports_in"], lambda idx: get_port_in(p_type, idx))
ports_out = self._ports_description_to_string(tdesc["ports_out"], lambda idx: get_port_out(p_type, idx))
options = self._options_description_to_string(tdesc["options"])
return "%s=%s" \
"\n name=\"%s\"," \
"\n category=\"%s\"," \
"\n description=\"%s\"," \
"\n ports_in={%s}," \
"\n ports_out={%s}," \
"\n options={%s})" % (
tdesc["symbol"].string(),
type.address,
tdesc["name"].string(),
tdesc["category"].string(),
tdesc["description"].string(),
ports_in,
ports_out,
options)
return "(struct sol_flow_node_type)%s (no node type description)" % (type.address,)
class FlowPrinter(object):
"Print a 'struct sol_flow_node'"
def __init__(self, val):
self.val = val
def display_hint(self):
return 'sol_flow_node'
def to_string(self):
id = self.val["id"]
type = self.val["type"]
if not type:
return "sol_flow_node(%s) is under construction." % (
self.val.address,)
tname = "%#x (no node type description)" % (type.address,)
tdesc = get_type_description(type)
if tdesc:
tname = "%s(%s=%s)" % (
tdesc["name"].string(),
tdesc["symbol"].string(),
type.address)
return "sol_flow_node(%s, id=\"%s\", type=%s)" % (
self.val.address, id.string(), tname)
def sol_flow_pretty_printers(val):
lookup_tag = val.type.tag
if lookup_tag == "sol_flow_node":
return FlowPrinter(val)
elif lookup_tag == "sol_flow_node_type":
return FlowTypePrinter(val)
return None
def register_pretty_printers(objfile):
gdb.pretty_printers.append(sol_flow_pretty_printers)
def get_type_options_string(type, options):
if not options:
return ""
tdesc = get_type_description(type)
if not tdesc or not tdesc["options"] or not tdesc["options"]["members"]:
return "OPTIONS: %s (no node type description)\n" % (options,)
string = ""
opts_desc = tdesc["options"]
array = opts_desc["members"]
i = 0
string += "OPTIONS: (struct %s*)%s\n" % (tdesc["options_symbol"].string(), options)
opt_type = gdb.lookup_type("struct %s" % (tdesc["options_symbol"].string(),))
options = options.cast(opt_type.pointer())
while array[i]["name"]:
m = array[i]
name = m["name"].string()
data_type = m["data_type"].string()
description = m["description"].string()
value = options[name]
if data_type == "string":
if value:
value = value.string()
else:
value = "NULL"
defvalue_member = defvalue_member_map.get(data_type)
if not defvalue_member:
defvalue = ""
else:
defvalue = m["defvalue"][defvalue_member]
if data_type == "string":
if defvalue:
defvalue = defvalue.string()
else:
defvalue = "NULL"
defvalue = " (default=%s)" % (defvalue,)
string += " %s (%s) = %s // %s%s\n" % (name, data_type, value, description, defvalue)
i += 1
string += "\n"
return string
class InspectAndBreakIfMatches(gdb.Breakpoint):
class InternalBreak(gdb.Breakpoint):
def __init__(self, method, banner=None, matches=None, values=None):
addr = "*%s" % (method.cast(gdb.lookup_type("long")),)
self.method = method
self.banner = banner
self.matches = matches or {}
self.values = values or {}
gdb.Breakpoint.__init__(self, addr, gdb.BP_BREAKPOINT, internal=True, temporary=True)
def stop(self):
if self.banner:
if callable(self.banner):
self.banner(self.matches, self.values)
else:
gdb.write(self.banner)
return True
def __init__(self, spec, matches):
gdb.Breakpoint.__init__(self, spec, gdb.BP_BREAKPOINT, internal=False)
self.matches = {}
for k, v in matches.items():
self.matches[k] = get_str_or_regexp_match(v)
def print_matches(self, values=None):
gdb.write("%s matches:\n" % (self.__class__.__name__,), gdb.STDERR)
if not values:
values = {}
for k, func in self.matches.items():
v = values.get(k)
if v is None:
gdb.write(" %s = %s (no value provided)\n" % (k, func.__doc__), gdb.STDERR)
else:
try:
res = func(v)
except Exception as e:
res = "Exception executing match: %s" % (e,)
gdb.write(" %s = %s (value: '%s', match: %s)\n" %
(k, func.__doc__, v, res), gdb.STDERR)
gdb.write("\n", gdb.STDERR)
def get_values(self):
raise NotImplemented()
def stop(self):
try:
values = self.get_values()
except Exception as e:
gdb.write("Exception at %s.get_values(): %s\n" % (
self.__class__.__name__, e), gdb.STDERR)
return False
if not values:
gdb.write("%s.get_values() did not return values.\n" % (
self.__class__.__name__,), gdb.STDERR)
return False
def print_values():
gdb.write("Values:\n", gdb.STDERR)
for k, v in values.items():
gdb.write(" %s: %s\n" % (k, v), gdb.STDERR)
gdb.write("\n", gdb.STDERR)
for k, match_func in self.matches.items():
try:
v = values[k]
except KeyError:
gdb.write("%s.get_values() did not provide key '%s'.\n" % (
self.__class__.__name__, k), gdb.STDERR)
self.print_matches(values)
print_values()
return False
try:
if not match_func(v):
return False
except Exception as e:
gdb.write("Exception at %s.stop() while matching %s %s (%s): %s\n" % (
self.__class__.__name__, k, v, match_func.__doc__, e,), gdb.STDERR)
self.print_matches(values)
return False
method = values.get("method")
banner = values.get("banner")
if not method:
node = values.get("node")
if node:
gdb.write("NODE: %s\n" % (node,), gdb.STDERR)
gdb.write("%s did not return the internal method to break at.\n" % (
self.__class__.__name__,), gdb.STDERR)
self.print_matches(values)
gdb.write("Breaking at the caller function %s\n" % (self.location,),
gdb.STDERR)
return True
def add_breakpoint():
try:
self.InternalBreak(method, banner, self.matches, values)
except Exception as e:
gdb.write("Could not add internal breakpoint: %s\n" % (e,), gdb.STDERR)
self.print_matches(values)
gdb.post_event(add_breakpoint)
return False
def get_str_or_regexp_match(string):
if not string:
string = "/.*/"
if len(string) > 2 and string.startswith("/") and string.endswith("/"):
r = re.compile(string[1:-1])
match = lambda x: bool(r.match(x))
else:
match = lambda x: string == x
match.__doc__ = string
return match
class FlowBreakOpen(InspectAndBreakIfMatches):
def __init__(self, matches):
InspectAndBreakIfMatches.__init__(self, "sol_flow_node_init", matches)
def get_values(self):
node_id = gdb.parse_and_eval("name")
if node_id:
node_id = node_id.string()
type = gdb.parse_and_eval("type")
method = type["open"]
node = gdb.parse_and_eval("*node")
options = gdb.parse_and_eval("options")
def banner(matches, values):
gdb.write("""\
Break before opening node:
FUNCTION: %s
NODE....: %s (filter: %s)
%s""" % (method, node,
matches["node_id"].__doc__,
get_type_options_string(node["type"], options)))
return {
"node": node,
"node_id": node_id,
"method": method,
"banner": banner,
}
class FlowBreakClose(InspectAndBreakIfMatches):
def __init__(self, matches):
InspectAndBreakIfMatches.__init__(self, "sol_flow_node_fini", matches)
def get_values(self):
node = gdb.parse_and_eval("*node")
node_id = node["id"]
if node_id:
node_id = node_id.string()
type = node["type"]
method = type["close"]
def banner(matches, values):
gdb.write("""\
Break before closing node:
FUNCTION: %s
NODE....: %s (filter: %s)
""" % (method, node,
matches["node_id"].__doc__))
return {
"node": node,
"node_id": node_id,
"method": method,
"banner": banner,
}
class FlowBreakSend(InspectAndBreakIfMatches):
def __init__(self, matches):
InspectAndBreakIfMatches.__init__(self, "inspector_will_send_packet", matches)
def get_values(self):
node = gdb.parse_and_eval("*src_node")
port = gdb.parse_and_eval("src_port")
packet = gdb.parse_and_eval("*packet")
node_id = node["id"]
if node_id:
node_id = node_id.string()
port_name = get_node_port_out_name_by_index(node, port)
packet_type = packet["type"]["name"].string()
type = gdb.parse_and_eval("(struct sol_flow_node_container_type *)src_node->parent->type")
method = type["send"]
def banner(matches, values):
gdb.write("""\
Break before sending packet:
FUNCTION: %s
NODE....: %s (filter: %s)
PORT....: %s (index: %s, filter: %s)
PACKET..: %s (filter: %s)
""" % (
method,
node,
matches["node_id"].__doc__,
port_name,
port,
matches["port_name"].__doc__,
packet,
matches["packet_type"].__doc__))
return {
"node": node,
"node_id": node_id,
"port_name": port_name,
"packet_type": packet_type,
"method": method,
"banner": banner,
}
class FlowBreakProcess(InspectAndBreakIfMatches):
def __init__(self, matches):
InspectAndBreakIfMatches.__init__(self, "inspector_will_deliver_packet", matches)
def get_values(self):
node = gdb.parse_and_eval("*dst_node")
port = gdb.parse_and_eval("dst_port")
packet = gdb.parse_and_eval("*packet")
node_id = node["id"]
if node_id:
node_id = node_id.string()
port_name = get_node_port_in_name_by_index(node, port)
packet_type = packet["type"]["name"].string()
get_port_in = gdb.parse_and_eval("sol_flow_node_type_get_port_in")
type = node["type"]
port_type = get_port_in(type, port)
if not port_type:
method = None
else:
method = port_type["process"]
def banner(matches, values):
gdb.write("""\
Break before processing packet:
FUNCTION: %s
NODE....: %s (filter: %s)
PORT....: %s (index: %s, filter: %s)
PACKET..: %s (filter: %s)
""" % (
method,
node,
matches["node_id"].__doc__,
port_name,
port,
matches["port_name"].__doc__,
packet,
matches["packet_type"].__doc__))
return {
"node": node,
"node_id": node_id,
"port_name": port_name,
"packet_type": packet_type,
"method": method,
"banner": banner,
}
class FlowCommand(gdb.Command):
"Commands to operate with 'sol_flow'"
def __init__(self):
gdb.Command.__init__(self, "sol_flow", gdb.COMMAND_USER, gdb.COMPLETE_COMMAND, True)
def invoke(self, arg, from_tty):
raise gdb.GdbError("missing sub-command: break or print")
class FlowBreakCommand(gdb.Command):
"Add an execution break when sol_flow events happen."
def __init__(self):
gdb.Command.__init__(self, "sol_flow break", gdb.COMMAND_BREAKPOINTS, gdb.COMPLETE_SYMBOL, True)
def invoke(self, arg, from_tty):
raise gdb.GdbError("missing sub-command: open, close, send or process")
class FlowBreakFilterBaseCommand(gdb.Command):
"""Base command for 'sol_flow break' subcommands.
The subcommand will be registered and will take matches as list of
optional arguments. If not available then None is assumed. These
parameters will be sent to breakpoint in order.
"""
def __init__(self, subcommand, matches, breakpoint):
gdb.Command.__init__(self, "sol_flow break " + subcommand, gdb.COMMAND_BREAKPOINTS, gdb.COMPLETE_SYMBOL, True)
self.matches = matches
self.breakpoint = breakpoint
def invoke(self, arg, from_tty):
arg = gdb.string_to_argv(arg)
params = {}
for i, name in enumerate(self.matches):
if len(arg) > i:
p = arg[i]
else:
p = None
params[name] = p
self.breakpoint(params)
self.dont_repeat()
class FlowBreakOpenCommand(FlowBreakFilterBaseCommand):
"""Add an execution break when sol_flow_node is created (type->open).
Arguments: node_id
node_id may be an exact string or a regular expression if enclosed
in "//". Examples:
sol_flow break open timer
will break on nodes with id "timer" (exact match)
sol_flow break open /^timer.*$/
will break on nodes with id that matches regular expression
"^timer.*$" (starts with "timer")
"""
def __init__(self):
matches = ["node_id"]
FlowBreakFilterBaseCommand.__init__(self, "open", matches, FlowBreakOpen)
class FlowBreakCloseCommand(FlowBreakFilterBaseCommand):
"""Add an execution break when sol_flow_node is destroyed (type->close).
Arguments: node_id
node_id may be an exact string or a regular expression if enclosed
in "//". Examples:
sol_flow break close timer
will break on nodes with id "timer" (exact match)
sol_flow break close /^timer.*$/
will break on nodes with id that matches regular expression
"^timer.*$" (starts with "timer")
"""
def __init__(self):
matches = ["node_id"]
FlowBreakFilterBaseCommand.__init__(self, "close", matches, FlowBreakClose)
class FlowBreakSendCommand(FlowBreakFilterBaseCommand):
"""Add an execution break when sol_flow_node sends a packet on its output port.
Arguments: node_id port_name packet_type
Each argument is optional and may be a string or a regular
expression if enclosed in "//". If omitted the regular expression
/.*/ is assumed, matching all patterns.
"""
def __init__(self):
matches = ["node_id", "port_name", "packet_type"]
FlowBreakFilterBaseCommand.__init__(self, "send", matches, FlowBreakSend)
class FlowBreakProcessCommand(FlowBreakFilterBaseCommand):
"""Add an execution break when sol_flow_node will receive a packet on its input port (port's process()).
Arguments: node_id port_name packet_type
Each argument is optional and may be a string or a regular
expression if enclosed in "//". If omitted the regular expression
/.*/ is assumed, matching all patterns.
"""
def __init__(self):
matches = ["node_id", "port_name", "packet_type"]
FlowBreakFilterBaseCommand.__init__(self, "process", matches, FlowBreakProcess)
class FlowPrintCommand(gdb.Command):
"Print sol_flow types"
def __init__(self):
gdb.Command.__init__(self, "sol_flow print", gdb.COMMAND_BREAKPOINTS, gdb.COMPLETE_COMMAND, True)
def invoke(self, arg, from_tty):
raise gdb.GdbError("missing sub-command: type, port or options")
def get_node_type_from_exp(arg):
node = gdb.parse_and_eval(arg)
if not node:
raise gdb.GdbError("invalid node: %s" % (arg,))
gt = node.type.unqualified()
sol_flow_node_type = gdb.lookup_type("struct sol_flow_node")
sol_flow_node_type_type = gdb.lookup_type("struct sol_flow_node_type")
if gt == sol_flow_node_type or gt == sol_flow_node_type.pointer() or \
gt == sol_flow_node_type.const().pointer():
return node["type"]
elif gt == sol_flow_node_type_type or gt == sol_flow_node_type_type.pointer() or \
gt == sol_flow_node_type_type.const().pointer():
return node
else:
raise gdb.GdbError("invalid node: %s" % (arg,))
class FlowPrintTypeCommand(gdb.Command):
"""Prints the type information for the given 'struct sol_flow_node'.
Arguments: node
"""
def __init__(self):
gdb.Command.__init__(self, "sol_flow print type", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL, True)
def invoke(self, arg, from_tty):
arg = gdb.string_to_argv(arg)
if len(arg) < 1:
raise gdb.GdbError("missing pointer to struct sol_flow_node")
type = get_node_type_from_exp(arg[0])
gdb.write("%s\n" % (type.dereference(),))
class FlowPrintPortCommand(gdb.Command):
"""Prints the port information for the given node.
Arguments: node [direction] [filter_type] [filter_specifier]
node is the pointer to node where to find the port.
direction may be 'in', 'out' or 'both'. If omitted, both will be
assumed. May be omitted and 'both' is used.
filter_type may be 'all', 'number' or 'name'. If omitted, all
will be assumed.
If filter_type is 'number', then filter_specifier must be an integer.
If filter_type is 'name', then filter_specifier must be a string
or a regular expression enclosed in "//".
If filter_type is omitted, then it's gussed from filter_specifier.
"""
def __init__(self):
gdb.Command.__init__(self, "sol_flow print port", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL, True)
def _print_ports(self, type, tdesc, member, filter):
array = tdesc[member]
if not array:
return
did = 0
i = 0
if member == "ports_in":
get_port_type = gdb.parse_and_eval("sol_flow_node_type_get_port_in")
else:
get_port_type = gdb.parse_and_eval("sol_flow_node_type_get_port_out")
while array[i]:
port = array[i]
if filter["type"] == "all" or \
(filter["type"] == "number" and filter["number"] == i) or \
(filter["type"] == "name" and filter["name"](port["name"].string())):
if did == 0:
gdb.write("%s:\n" % member)
did += 1
gdb.write(" %d: %s (%s)\n description: %s\n" % (
i,
port["name"].string(),
port["data_type"].string(),
port["description"].string(),
))
port_type = get_port_type(type, i)
if port_type["connect"]:
gdb.write(" connect(): %s\n" % (port_type["connect"],))
if port_type["disconnect"]:
gdb.write(" disconnect(): %s\n" % (port_type["disconnect"],))
if member == "ports_in" and port_type["process"]:
gdb.write(" process(): %s\n" % (port_type["process"],))
gdb.write("\n")
i += 1
def invoke(self, arg, from_tty):
arg = gdb.string_to_argv(arg)
if len(arg) < 1:
raise gdb.GdbError("missing pointer to struct sol_flow_node")
direction = "both"
filter = {"type": "all"}
if len(arg) > 1:
direction = arg[1]
if direction not in ("both", "in", "out"):
direction = "both"
try:
filter["number"] = int(arg[1])
filter["type"] = "number"
except ValueError:
filter["name"] = get_str_or_regexp_match(arg[1])
filter["type"] = "name"
if len(arg) > 2:
filter["type"] = arg[2]
if filter["type"] not in ("all", "number", "name"):
try:
filter["number"] = int(arg[2])
filter["type"] = "number"
except ValueError:
filter["name"] = get_str_or_regexp_match(arg[2])
filter["type"] = "name"
elif filter["type"] == 'number':
if len(arg) < 4:
raise gdb.GdbError("missing port number to filter")
filter["number"] = int(arg[3])
elif filter["type"] == 'name':
if len(arg) < 4:
raise gdb.GdbError("missing port name to filter")
filter["name"] = get_str_or_regexp_match(arg[3])
type = get_node_type_from_exp(arg[0])
tdesc = get_type_description(type)
if not tdesc:
gdb.write("no node type description\n")
return
if direction == "both" or direction == "in":
self._print_ports(type, tdesc, "ports_in", filter)
if direction == "both" or direction == "out":
self._print_ports(type, tdesc, "ports_out", filter)
class FlowPrintOptionsCommand(gdb.Command):
"""Prints the options used to open the given node.
Arguments: node options
node is the pointer to node where to find the port.
options is the pointer to options to open to given node.
"""
def __init__(self):
gdb.Command.__init__(self, "sol_flow print options", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL, True)
def invoke(self, arg, from_tty):
arg = gdb.string_to_argv(arg)
if len(arg) != 2:
raise gdb.GdbError("Usage: sol_flow print options <node> <options>")
type = get_node_type_from_exp(arg[0])
options = gdb.parse_and_eval(arg[1])
gdb.write(get_type_options_string(type, options))
FlowCommand()
FlowBreakCommand()
FlowBreakOpenCommand()
FlowBreakCloseCommand()
FlowBreakSendCommand()
FlowBreakProcessCommand()
FlowPrintCommand()
FlowPrintTypeCommand()
FlowPrintPortCommand()
FlowPrintOptionsCommand()
register_pretty_printers(gdb.current_objfile())<|fim▁end|> | # |
<|file_name|>GeneticOperations.hh<|end_file_name|><|fim▁begin|>/* FILE GeneticOperations.hh
** PACKAGE GeneticOperations
** AUTHOR Edward S. Blurock
**
** CONTENT
** Prototypes for the "GeneticOperations" package in the CoreObjects environment
**
** COPYRIGHT (C) 1997 Edward S. Blurock
*/
#ifndef CoreObjects_GENETICOPERATIONS_HH
#define CoreObjects_GENETICOPERATIONS_HH
#define GENETIC_DISTRIBUTION_ID GENETIC_BASE + 2
#define GENETIC_DISTRIBUTION_NAME "GeneticDistribution"
#define GENETIC_STDDEV_ID GENETIC_BASE + 3
#define GENETIC_STDDEV_NAME "GeneticStdDev"
#define GENETIC_INTERVAL_ID GENETIC_BASE + 4
#define GENETIC_INTERVAL_NAME "GeneticInterval"
#define GENETIC_CONSTANT_ID GENETIC_BASE + 6<|fim▁hole|>#define GENETIC_CONSTANT_NAME "GeneticConstant"
#define GENETIC_SETOFPARAMS_ID GENETIC_BASE + 5
#define GENETIC_SETOFPARAMS_NAME "GeneticSetOfParameters"
#define SET_NAME1 "C1"
#define SET_NAME2 "C2"
/*I . . . INCLUDES . . . . . . . . . . . . . . . . . . . . . . . . . . . .
*/
#include "GeneticOperationsType.hh"
/*P . . . PROTOTYPES . . . . . . . . . . . . . . . . . . . . . . . . . . .
*/
extern void InitialGeneticEncodeDecodeRoutines();
void AddGeneticClasses(DataSetOfObjectsClass& set);
BaseDataSetOfObjects *PairSet(DataObjectClass *popobjectbase);
#endif<|fim▁end|> | |
<|file_name|>D31.java<|end_file_name|><|fim▁begin|>/*
Copyright 2006 by Sean Luke
Licensed under the Academic Free License version 3.0
See the file "LICENSE" for more information
*/
<|fim▁hole|>
package ec.app.parity.func;
import ec.*;
import ec.app.parity.*;
import ec.gp.*;
import ec.util.*;
/*
* D31.java
*
* Created: Wed Nov 3 18:31:38 1999
* By: Sean Luke
*/
/**
* @author Sean Luke
* @version 1.0
*/
public class D31 extends GPNode
{
public String toString() { return "D31"; }
/*
public void checkConstraints(final EvolutionState state,
final int tree,
final GPIndividual typicalIndividual,
final Parameter individualBase)
{
super.checkConstraints(state,tree,typicalIndividual,individualBase);
if (children.length!=0)
state.output.error("Incorrect number of children for node " +
toStringForError() + " at " +
individualBase);
}
*/
public int expectedChildren() { return 0; }
public void eval(final EvolutionState state,
final int thread,
final GPData input,
final ADFStack stack,
final GPIndividual individual,
final Problem problem)
{
((ParityData)input).x =
((((Parity)problem).bits >>> 31 ) & 1);
}
}<|fim▁end|> | |
<|file_name|>MGI.py<|end_file_name|><|fim▁begin|>import csv
import os
from datetime import datetime
import logging
import re
from dipper.sources.PostgreSQLSource import PostgreSQLSource
from dipper.models.assoc.Association import Assoc
from dipper.models.assoc.G2PAssoc import G2PAssoc
from dipper.models.Genotype import Genotype
from dipper.models.Reference import Reference
from dipper.models.Model import Model
from dipper import config
from dipper.models.GenomicFeature import Feature, makeChromID
LOG = logging.getLogger(__name__)
class MGI(PostgreSQLSource):
"""
This is the
[Mouse Genome Informatics](http://www.informatics.jax.org/) resource,
from which we process genotype and phenotype data about laboratory mice.
Genotypes leverage the GENO genotype model.
Here, we connect to their public database, and download a subset of
tables/views to get specifically at the geno-pheno data,
then iterate over the tables. We end up effectively performing joins
when adding nodes to the graph.
In order to use this parser, you will need to have user/password connection
details in your conf.yaml file, like:
dbauth : {'mgi' : {'user' : '<username>', 'password' : '<password>'}}
You can request access by contacting [email protected]
"""
# CONSIDER IF WE NEED:
# mgi_organism_acc_view:
# Consider using this for the taxon mapping instead of
# the hashmap encoded below
# mgi_reference_allele_view:
# Don't believe this view is used in either
# the genotype of phenotype view
# all_allele_cellline_view: When we want to start dealing with cell lines
# mgi_note_strain_view: prose descriptions of strains.
# prb_strain_summary_view:
# Don't believe this view is used in
# either the genotype of phenotype view
# prb_strain_marker_view:
# eventually i think we want this because
# it has other relevant markers that are affected
resources = {
'query_map': [
{
'query': '../../resources/sql/mgi/mgi_dbinfo.sql',
'outfile': 'mgi_dbinfo',
'Force': True
},
{
'query': '../../resources/sql/mgi/gxd_genotype_view.sql',
'outfile': 'gxd_genotype_view'
},
{
'query': '../../resources/sql/mgi/gxd_genotype_summary_view.sql',
'outfile': 'gxd_genotype_summary_view'
},
{
'query': '../../resources/sql/mgi/gxd_allelepair_view.sql',
'outfile': 'gxd_allelepair_view'
},
{
'query': '../../resources/sql/mgi/all_summary_view.sql',
'outfile': 'all_summary_view'
},
{
'query': '../../resources/sql/mgi/all_allele_view.sql',
'outfile': 'all_allele_view'
},
{
'query': '../../resources/sql/mgi/all_allele_mutation_view.sql',
'outfile': 'all_allele_mutation_view'
},
{
'query': '../../resources/sql/mgi/mrk_marker_view.sql',
'outfile': 'mrk_marker_view'
},
{
'query': '../../resources/sql/mgi/voc_annot_view.sql',
'outfile': 'voc_annot_view'
},
{
'query': '../../resources/sql/mgi/evidence.sql',
'outfile': 'evidence_view'
},
{
'query': '../../resources/sql/mgi/bib_acc_view.sql',
'outfile': 'bib_acc_view'
},
{
'query': '../../resources/sql/mgi/prb_strain_view.sql',
'outfile': 'prb_strain_view'
},
{
'query': '../../resources/sql/mgi/mrk_summary_view.sql',
'outfile': 'mrk_summary_view'
},
{
'query': '../../resources/sql/mgi/mrk_acc_view.sql',
'outfile': 'mrk_acc_view'
},
{
'query': '../../resources/sql/mgi/prb_strain_acc_view.sql',
'outfile': 'prb_strain_acc_view'
},
{
'query': '../../resources/sql/mgi/prb_strain_genotype_view.sql',
'outfile': 'prb_strain_genotype_view'
},
{
'query': '../../resources/sql/mgi/mgi_note_vocevidence_view.sql',
'outfile': 'mgi_note_vocevidence_view'
},
{
'query': '../../resources/sql/mgi/mgi_note_allele_view.sql',
'outfile': 'mgi_note_allele_view'
},
{
'query': '../../resources/sql/mgi/mrk_location_cache.sql',
'outfile': 'mrk_location_cache' # gene locations
}
],
'test_keys': '../../resources/mgi_test_keys.yaml'
}
# with an existing set of (fresh) files in the shell; we can get a head start with:
# for v in raw/mgi/*;do echo -e "\t\t'${v##*/}': \
# {\n\t\t\t'columns': [";head -1 $v|tr '\t' '\n'|sed "s/\(.*\)/\t\t\t\t'\1',/";done
tables = {
'all_allele_mutation_view': {
'columns': [
'_allele_key',
'mutation']},
'all_allele_view': {
'columns': [
'_allele_key',
'_marker_key',
'_strain_key',
'symbol',
'name',
'iswildtype']},
'all_summary_view': {
'columns': [
'_object_key',
'preferred',
'mgiid',
'description',
'short_description']},
'bib_acc_view': {
'columns': [
'accid',
'prefixpart',
'numericpart',
'_object_key',
'logicaldb',
'_logicaldb_key']},
'evidence_view': {
'columns': [
'_annotevidence_key',
'_annot_key',
'evidencecode',
'jnumid',
'term',
'value',
'annottype']},
'gxd_allelepair_view': {
'columns': [
'_allelepair_key',
'_genotype_key',
'_allele_key_1',
'_allele_key_2',
'allele1',
'allele2',
'allelestate']},
'gxd_genotype_summary_view': {
'columns': [
'_object_key',
'preferred',
'mgiid',
'subtype',
'short_description']},
'gxd_genotype_view': {
'columns': [
'_genotype_key',
'_strain_key',
'strain',
'mgiid']},
'mgi_note_allele_view': {
'columns': [
'_object_key',
'notetype',
'note',
'sequencenum']},
'mgi_note_vocevidence_view': {
'columns': [
'_object_key',
'note']},
'mgi_relationship_transgene_genes': {
'columns': [
'rel_key',
'object_1',
'allele_id',
'allele_label',
'category_key',
'category_name',
'property_key',
'property_name',
'property_value']},
'mrk_acc_view': {
'columns': [
'accid',
'prefixpart',
'_logicaldb_key',
'_object_key',
'preferred',
'_organism_key']},
'mrk_location_cache': {
'columns': [
'_marker_key',
'_organism_key',
'chromosome',
'startcoordinate',
'endcoordinate',
'strand',
'version']},
'mrk_marker_view': {
'columns': [
'_marker_key',
'_organism_key',
'_marker_status_key',
'symbol',
'name',
'latinname',
'markertype']},
'mrk_summary_view': {
'columns': [
'accid',
'_logicaldb_key',
'_object_key',
'preferred',
'mgiid',
'subtype',
'short_description']},
'prb_strain_acc_view': {
'columns': [
'accid',
'prefixpart',
'_logicaldb_key',
'_object_key',
'preferred']},
'prb_strain_genotype_view': {
'columns': [
'_strain_key',
'_genotype_key']},
'prb_strain_view': {
'columns': [
'_strain_key',
'strain',
'species']},
'voc_annot_view': {
'columns': [
'_annot_key',
'annottype',
'_object_key',
'_term_key',
'_qualifier_key',
'qualifier',
'term',
'accid']},
}
# For ambiguous/undefined taxa terms that will
# conflict with seq alt_type portion of local tt
unknown_taxa = [
'Not Applicable',
'Not Specified',
]
# for testing purposes, this is a list of internal db keys
# to match and select only portions of the source
def __init__(
self,
graph_type,
are_bnodes_skolemized,
data_release_version=None
):
super().__init__(
graph_type=graph_type,
are_bnodes_skolemized=are_bnodes_skolemized,
data_release_version=data_release_version,
name='mgi',
ingest_title='Mouse Genome Informatics',
ingest_url='http://www.informatics.jax.org/',
ingest_logo="source-mgi.png",
license_url=None,
data_rights='http://www.informatics.jax.org/mgihome/other/copyright.shtml',
file_handle=None)
# so that we don't have to deal with BNodes,
# we will create hash lookups
# for the internal identifiers the hash will hold
# the type-specific-object-keys to MGI public identifiers.
# then, subsequent views of the table will lookup the identifiers
# in the hash. this allows us to do the 'joining' on the fly
self.idhash = {
'allele': {}, 'marker': {}, 'publication': {}, 'strain': {},
'genotype': {}, 'annot': {}, 'notes': {}, 'seqalt': {}}
# to store if a marker is a class or indiv
self.markers = {
'classes': [], 'indiv': []}
# use this to store internally generated labels for various features
self.label_hash = {}
# use this to store the genotype strain ids
# for building genotype labels
self.geno_bkgd = {}
self.strain_to_genotype_map = {}
self.wildtype_alleles = set()
# also add the gene ids from the test_ids
# in order to capture transgenes of the test set
if 'gene' in self.all_test_ids:
self.test_ids = self.all_test_ids['gene']
else:
LOG.warning("not configured with gene test ids.")
self.test_ids = []
self.test_keys = self.open_and_parse_yaml(self.resources['test_keys'])
def fetch(self, is_dl_forced=False):
"""
For the MGI resource, we connect to the remote database,
and pull the tables into local files.
We'll check the local table versions against the remote version
:return:
"""
# check if config exists; if it doesn't, error out and let user know
if 'dbauth' not in config.get_config() and 'mgi' \
not in config.get_config()['dbauth']:
LOG.error("not configured with PG user/password.")
# create the connection details for MGI
cxn = config.get_config()['dbauth']['mgi']
pg_iri = ''.join((
'jdbc:postgresql://', cxn['host'], ':', str(cxn['port']), '/',
cxn['database']))
self.dataset.set_ingest_source(pg_iri)
self.dataset.set_ingest_source_file_version_retrieved_on(
pg_iri,
datetime.today().strftime('%Y-%m-%d'))
# process the tables
# self.fetch_from_pgdb(self.tables, cxn, 100) # for testing only
# self.fetch_from_pgdb(self.tables, cxn, None, is_dl_forced)
for query_map in self.resources['query_map']:
query_fh = open(os.path.join(
os.path.dirname(__file__), query_map['query']), 'r')
query = query_fh.read()
# force = False
# if 'Force' in query_map: # unused
# force = query_map['Force']
self.fetch_query_from_pgdb(
query_map['outfile'], query, None, cxn)
# always get this - it has the verion info
self.fetch_transgene_genes_from_db(cxn)
datestamp = ver = None
# get the resource version information from
# table mgi_dbinfo, already fetched above
outfile = '/'.join((self.rawdir, 'mgi_dbinfo'))
if os.path.exists(outfile):
with open(outfile, 'r') as reader:
reader.readline() # read the header row; skip
info = reader.readline()
cols = info.split('\t')
ver = cols[0] # col 0 is public_version
ver = ver.replace('MGI ', '') # MGI 5.20 --> 5.20
# MGI has a datestamp for the data within the database;
# use it instead of the download date
# datestamp in the table: 2014-12-23 00:14:20[.12345]
# modification date without micro seconds
dat = cols[1].strip().split('.')[0]
datestamp = datetime.strptime(
dat, "%Y-%m-%d %H:%M:%S").strftime("%Y-%m-%d")
self.dataset.set_ingest_source_file_version_num(pg_iri, ver)
self.dataset.set_ingest_source_file_version_date(pg_iri, datestamp)
def parse(self, limit=None):
"""
We process each of the postgres tables in turn.
The order of processing is important here, as we build
up a hashmap of internal vs external identifers
(unique keys by type to MGI id). These include allele, marker (gene),
publication, strain, genotype, annotation (association),
and descriptive notes.
:param limit: Only parse this many rows in each table
:return:
"""
if limit is not None:
LOG.info("Only parsing first %d rows of each file", limit)
LOG.info("Parsing files...")
if self.test_only:
self.test_mode = True
# the following will provide us the hash-lookups
# These must be processed in a specific order
self._process_prb_strain_acc_view(limit)
self._process_mrk_acc_view()
self._process_all_summary_view(limit)
self._process_bib_acc_view(limit)
self._process_gxd_genotype_summary_view(limit)
# The following will use the hash populated above
# to lookup the ids when filling in the graph
self._process_prb_strain_view(limit)
# self._process_prb_strain_genotype_view(limit)
self._process_gxd_genotype_view(limit)
self._process_mrk_marker_view(limit)
self._process_mrk_acc_view_for_equiv(limit)
self._process_mrk_summary_view(limit)
self._process_all_allele_view(limit)
self._process_all_allele_mutation_view(limit)
self._process_gxd_allele_pair_view(limit)
self._process_voc_annot_view(limit)
self._process_evidence_view(limit)
self._process_mgi_note_vocevidence_view(limit)
self._process_mrk_location_cache(limit)
self.process_mgi_relationship_transgene_genes(limit)
self.process_mgi_note_allele_view(limit)
LOG.info("Finished parsing.")
LOG.info("Loaded %d nodes", len(self.graph))
def fetch_transgene_genes_from_db(self, cxn):
"""
This is a custom query to fetch the non-mouse genes that
are part of transgene alleles.
:param cxn:
:return:
"""
query = '''
SELECT r._relationship_key as rel_key,
r._object_key_1 as object_1,
a.accid as allele_id,
alabel.label as allele_label,
rc._category_key as category_key,
rc.name as category_name,
t._term_key as property_key,
t.term as property_name,
rp.value as property_value
FROM mgi_relationship r
JOIN mgi_relationship_category rc ON r._category_key = rc._category_key
JOIN acc_accession a ON r._object_key_1 = a._object_key
AND rc._mgitype_key_1 = a._mgitype_key
AND a._logicaldb_key = 1
JOIN all_label alabel ON a._object_key = alabel._allele_key
AND alabel._label_status_key = 1
AND alabel.priority = 1
JOIN mgi_relationship_property rp ON r._relationship_key = rp._relationship_key
AND rp._propertyname_key = 12948292
JOIN voc_term t ON rp._propertyname_key = t._term_key
WHERE r._category_key = 1004
'''
self.fetch_query_from_pgdb(
'mgi_relationship_transgene_genes', query, None, cxn)
def _process_gxd_genotype_view(self, limit=None):
"""
This table indicates the relationship between a genotype
and it's background strain. It leverages the Genotype class methods
to do this.
Makes these triples:
<MGI:genotypeid> GENO:has_reference_part <MGI:strainid>
<MGI:strainid> a GENO:genomic_background
If the genotype id isn't in the hashmap, it adds it here
(but this shouldn't happen):
<MGI:genotypeid> a GENO:genotype
If the strain isn't in the hashmap, it also adds it here with a
monarchized identifier using the unique key of the strain,
formatted like: :_mgistrainkey12345
:param limit:
:return:
"""
src_key = 'gxd_genotype_view'
line_num = 0
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
geno = Genotype(graph)
model = Model(graph)
col = self.tables[src_key]['columns']
raw = '/'.join((self.rawdir, src_key))
LOG.info("getting genotypes and their backgrounds")
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
row = line.split('\t')
line_num += 1
genotype_key = row[col.index('_genotype_key')].strip()
strain_key = row[col.index('_strain_key')].strip()
strain = row[col.index('strain',)].strip()
mgiid = row[col.index('mgiid')].strip()
if self.test_mode is True:
if int(genotype_key) not in self.test_keys.get('genotype'):
continue
if self.idhash['genotype'].get(genotype_key) is None:
# just in case we haven't seen it before,
# catch and add the id mapping here
self.idhash['genotype'][genotype_key] = mgiid
geno.addGenotype(mgiid, None)
# the label is elsewhere...
# need to add the MGI label as a synonym
# if it's in the hash,
# assume that the individual was created elsewhere
strain_id = self.idhash['strain'].get(strain_key)
background_type = self.globaltt['genomic_background']
if strain_id is None or int(strain_key) < 0:
if strain_id is None:
# some of the strains don't have public identifiers!
# so we make one up, and add it to the hash
strain_id = self._make_internal_identifier('strain', strain_key)
self.idhash['strain'].update({strain_key: strain_id})
model.addComment(strain_id, "strain_key:" + strain_key)
elif int(strain_key) < 0:
# these are ones that are unidentified/unknown.
# so add instances of each.
strain_id = self._make_internal_identifier(
'strain', re.sub(r':', '', str(strain_id)))
strain_id += re.sub(r':', '', str(mgiid))
strain_id = re.sub(r'^_', '_:', strain_id)
strain_id = re.sub(r'::', ':', strain_id)
model.addDescription(
strain_id,
"This genomic background is unknown. " +
"This is a placeholder background for " +
mgiid + "."
)
background_type = self.globaltt[
'unspecified_genomic_background']
# add it back to the idhash
LOG.info(
"adding background as internal id: %s %s: %s",
strain_key, strain, strain_id)
geno.addGenomicBackgroundToGenotype(
strain_id, mgiid, background_type)
self.label_hash[strain_id] = strain
# add BG to a hash so we can build the genotype label later
self.geno_bkgd[mgiid] = strain_id
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_gxd_genotype_summary_view(self, limit=None):
"""
Add the genotype internal id to mgiid mapping to the idhashmap.
Also, add them as individuals to the graph.
We re-format the label to put the background strain in brackets
after the gvc.
We must pass through the file once to get the ids and
aggregate the vslcs into a hashmap into the genotype
Triples created:
<genotype id> a GENO:intrinsic_genotype
<genotype id> rdfs:label "<gvc> [bkgd]"
:param limit:
:return:
"""
src_key = 'gxd_genotype_summary_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_num = 0
geno_hash = {}
raw = '/'.join((self.rawdir, src_key))
LOG.info("building labels for genotypes")
col = self.tables[src_key]['columns']
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
object_key = row[col.index('_object_key')].strip()
preferred = row[col.index('preferred')].strip()
mgiid = row[col.index('mgiid')].strip()
subtype = row[col.index('subtype')].strip()
short_description = row[col.index('short_description')].strip()
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('genotype'):
continue
# add the internal genotype to mgi mapping
self.idhash['genotype'][object_key] = mgiid
if preferred == '1':
d = re.sub(r'\,', '/', short_description.strip())
if mgiid not in geno_hash:
geno_hash[mgiid] = {'vslcs': [d], 'subtype': subtype,
'key': object_key}
else:
vslcs = geno_hash[mgiid].get('vslcs')
vslcs.append(d)
else:
pass
# TODO what to do with != preferred
if not self.test_mode and limit is not None and line_num > limit:
break
# now, loop through the hash and add the genotypes as individuals
# we add the mgi genotype as a label
# (we generate our own label later and add as a synonym)
geno = Genotype(graph)
for gt in geno_hash:
genotype = geno_hash.get(gt)
gvc = sorted(genotype.get('vslcs'))
label = '; '.join(gvc) + ' [' + genotype.get('subtype') + ']'
model.addComment(
gt, self._make_internal_identifier(
'genotype', genotype.get('key')
)
)
geno.addGenotype(gt, label.strip())
def _process_all_summary_view(self, limit):
"""
Here, we get the allele definitions: id, label, description, type
We also add the id to this source's global idhash for lookup later
<alleleid> a OWL:NamedIndividual
rdfs:label "allele symbol"
dc:description "long allele name"
:param limit:
:return:
"""
src_key = 'all_summary_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_num = 0
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
col_len = len(col)
LOG.info(
"alleles with labels and descriptions from all_summary_view")
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
# head -1 workspace/build-mgi-ttl/dipper/raw/mgi/all_summary_view|\
# tr '\t' '\n' | grep -n . | \
# awk -F':' '{col=$1;$1="";print $0,",\t #" col}'
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
# bail if the row is malformed
if col_len != len(row):
LOG.warning('Expected %i columns.', col_len)
LOG.warning('Received %i columns.', len(row))
LOG.warning(line.format())
continue<|fim▁hole|> # no stray tab in the description column
object_key = row[col.index('_object_key')].strip()
preferred = row[col.index('preferred')].strip()
mgiid = row[col.index('mgiid')].strip()
description = row[col.index('description')].strip()
short_description = row[col.index('short_description')].strip()
# NOTE: May want to filter alleles based on the preferred field
# (preferred = 1) or will get duplicates
# (24288, to be exact...
# Reduced to 480 if filtered on preferred = 1)
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('allele'):
continue
# we are setting the allele type to None,
# so that we can add the type later
# since we don't actually know
# if it's a reference or altered allele
# altype = None # temporary; we'll assign the type later
# set type to a parent term incase a more specific term is not found
altype = self.globaltt['allele']
# If we want to filter on preferred:
if preferred == '1':
# add the allele key to the hash for later lookup
self.idhash['allele'][object_key] = mgiid
# TODO consider not adding the individuals in this one
model.addIndividualToGraph(
mgiid, short_description.strip(), altype, description.strip()
)
self.label_hash[mgiid] = short_description.strip()
# TODO deal with non-preferreds, are these deprecated?
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_all_allele_view(self, limit):
"""
Add the allele as a variant locus (or reference locus if wild-type).
If the marker is specified, we add the link to the marker.
We assume that the MGI ids are available in the idhash,
added in all_summary_view.
We add the sequence alteration as a BNode here, if there is a marker.
Otherwise, the allele itself is a sequence alteration.
Triples:
<MGI:allele_id> a GENO:variant_locus
OR GENO:reference_locus
OR GENO:sequence_alteration IF no marker_id specified.
[GENO:has_variant_part OR GENO:has_reference_part] <MGI:marker_id>
GENO:derived_from <MGI:strain_id>
GENO:has_variant_part <_seq_alt_id>
<_seq_alt_id> a GENO:sequence_alteration
derives_from <strain_id>
:param limit:
:return:
"""
src_key = 'all_allele_view'
# transmission_key -> inheritance? Need to locate related table.
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
line_num = 0
LOG.info(
"adding alleles, mapping to markers, extracting their "
"sequence alterations from all_allele_view")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
col_len = len(col)
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
# bail if the row is malformed
if col_len != len(row):
LOG.warning('Expected %i columns.', col_len)
LOG.warning('Received %i columns.', len(row))
LOG.warning(line.format())
continue
allele_key = row[col.index('_allele_key')].strip()
marker_key = row[col.index('_marker_key')].strip()
strain_key = row[col.index('_strain_key')].strip()
symbol = row[col.index('symbol')].strip()
name = row[col.index('name')].strip()
iswildtype = row[col.index('iswildtype')].strip()
# TODO update processing to use this view better
# including jnums!
if self.test_mode is True and \
int(allele_key) not in self.test_keys.get('allele'):
continue
# so are allele_key ints or not? -TEC
allele_id = self.idhash['allele'].get(allele_key)
if allele_id is None:
LOG.error(
"what to do! can't find allele_id. skipping %s %s",
allele_key, symbol)
continue
marker_id = None
if marker_key is not None and marker_key != '':
# we make the assumption here that the markers
# have already been added to the table
marker_id = self.idhash['marker'].get(marker_key)
if marker_id is None:
LOG.error(
"what to do! can't find marker_id. skipping %s %s",
marker_key, symbol)
continue
iseqalt_id = self._make_internal_identifier('seqalt', allele_key)
# for non-wild type alleles:
if iswildtype == '0':
locus_type = self.globaltt['variant_locus']
locus_rel = self.globaltt['is_allele_of']
# for wild type alleles:
elif iswildtype == '1':
locus_type = self.globaltt['reference_locus']
locus_rel = self.globaltt['is_reference_allele_of']
# add the allele to the wildtype set for lookup later
self.wildtype_alleles.add(allele_id)
else:
locus_rel = None
locus_type = None
model.addIndividualToGraph(allele_id, symbol, locus_type)
model.makeLeader(allele_id)
self.label_hash[allele_id] = symbol
self.idhash['seqalt'][allele_key] = iseqalt_id
# HACK - if the label of the allele == marker,
# then make the thing a seq alt
allele_label = self.label_hash.get(allele_id)
marker_label = self.label_hash.get(marker_id)
if allele_label is not None and allele_label == marker_label:
# model.addSameIndividual(allele_id, marker_id)
# this causes disjoint category violations, see
# https://github.com/monarch-initiative/dipper/issues/519
self.idhash['seqalt'][allele_key] = allele_id
model.addComment(
allele_id,
self._make_internal_identifier('allele', allele_key)
)
if marker_id is not None:
# marker_id will be none if the allele
# is not linked to a marker
# (as in, it's not mapped to a locus)
geno.addAlleleOfGene(allele_id, marker_id, locus_rel)
# sequence alteration in strain
if iswildtype == '0':
sa_label = symbol
sa_id = iseqalt_id
if marker_key is not None \
and allele_label != marker_label and marker_key != '':
# sequence alteration has label reformatted(symbol)
if re.match(r".*<.*>.*", symbol):
sa_label = re.sub(r".*<", "<", symbol)
elif re.match(r"\+", symbol):
# TODO: Check to see if this is the proper handling
# as while symbol is just +,
# marker symbol has entries without any <+>.
sa_label = '<+>'
geno.addSequenceAlterationToVariantLocus(iseqalt_id,
allele_id)
else:
# make the sequence alteration == allele
sa_id = allele_id
# else this will end up adding the non-located transgenes
# as sequence alterations also removing the < and > from sa
sa_label = re.sub(r'[\<\>]', '', sa_label)
geno.addSequenceAlteration(sa_id, sa_label, None, name)
self.label_hash[sa_id] = sa_label
strain_id = self.idhash['strain'].get(strain_key)
# scrub out if the strain is "not specified"
if strain_id is not None and \
strain_id not in ['MGI:4867032', 'MGI:5649511']:
geno.addSequenceDerivesFrom(allele_id, strain_id)
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_gxd_allele_pair_view(self, limit):
"""
This assumes that the genotype and alleles
have already been added to the id hashmap.
We use the Genotype methods to add all the parts we need.
Triples added:
<genotype_id> has_part <vslc>
<vslc> has_part <allele1>
<vslc> has_part <allele2>
<vslc> has_zygosity <zygosity>
:param limit:
:return:
"""
src_key = 'gxd_allelepair_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
line_num = 0
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
LOG.info("processing allele pairs (VSLCs) for genotypes")
geno_hash = {}
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
allelepair_key = row[col.index('_allelepair_key')].strip()
genotype_key = row[col.index('_genotype_key')].strip()
allele_key_1 = row[col.index('_allele_key_1')].strip()
allele_key_2 = row[col.index('_allele_key_2')].strip()
allele1 = row[col.index('allele1')].strip()
allele2 = row[col.index('allele2')].strip()
allelestate = row[col.index('allelestate')].strip()
# NOTE: symbol = gene/marker,
# allele1 + allele2 = VSLC,
# allele1/allele2 = variant locus,
# allelestate = zygosity
# FIXME Need to handle alleles not in the *<*> format,
# incl gene traps, induced mut, & transgenics
if self.test_mode is True:
if int(genotype_key) not in self.test_keys.get('genotype'):
continue
genotype_id = self.idhash['genotype'].get(genotype_key)
if genotype_id not in geno_hash:
geno_hash[genotype_id] = set()
if genotype_id is None:
LOG.error(
"genotype_id not found for key %s; skipping", genotype_key)
continue
allele1_id = self.idhash['allele'].get(allele_key_1)
allele2_id = self.idhash['allele'].get(allele_key_2)
# Need to map the allelestate to a zygosity term
zygosity_id = self.resolve(allelestate.strip())
ivslc_id = self._make_internal_identifier('vslc', allelepair_key)
geno_hash[genotype_id].add(ivslc_id)
# TODO: VSLC label likely needs processing similar to
# the processing in the all_allele_view
# FIXME: handle null alleles
vslc_label = allele1 + '/'
if allele2_id is None:
if zygosity_id in [
self.globaltt['hemizygous insertion-linked'],
self.globaltt['hemizygous-x'],
self.globaltt['hemizygous-y'],
self.globaltt['hemizygous'],
]:
vslc_label += '0'
elif zygosity_id == self.globaltt['heterozygous']:
vslc_label += '+'
elif zygosity_id == self.globaltt['indeterminate']:
vslc_label += '?'
elif zygosity_id == self.globaltt['heteroplasmic']:
vslc_label += '?' # todo is there anything else to add here?
elif zygosity_id == self.globaltt['homoplasmic']:
vslc_label += '?' # todo is there anything else to add here?
elif zygosity_id == self.globaltt['homozygous']:
# we shouldn't get here, but for testing this is handy
vslc_label += allele1
else:
LOG.info(
"A different kind of zygosity found is: %s",
self.globaltcid[zygosity_id])
vslc_label += '?'
else:
vslc_label += allele2
model.addIndividualToGraph(
ivslc_id,
vslc_label,
self.globaltt['variant single locus complement']
)
self.label_hash[ivslc_id] = vslc_label
rel1 = rel2 = self.globaltt['has_variant_part']
if allele1_id in self.wildtype_alleles:
rel1 = self.globaltt['has_reference_part']
if allele2_id in self.wildtype_alleles:
rel2 = self.globaltt['has_reference_part']
geno.addPartsToVSLC(
ivslc_id, allele1_id, allele2_id, zygosity_id, rel1, rel2
)
# if genotype_id not in geno_hash:
# geno_hash[genotype_id] = [vslc_label]
# else:
# geno_hash[genotype_id] += [vslc_label]
if not self.test_mode and limit is not None and line_num > limit:
break
# build the gvc and the genotype label
for gt in geno_hash.keys():
if gt is None: # not sure why, but sometimes this is the case
continue
vslcs = sorted(list(geno_hash[gt]))
gvc_label = None
if len(vslcs) > 1:
gvc_id = re.sub(r'_', '', ('-'.join(vslcs)))
gvc_id = re.sub(r':', '', gvc_id)
gvc_id = self.make_id(gvc_id, '_')
vslc_labels = []
for v in vslcs:
vslc_labels.append(self.label_hash[v])
gvc_label = '; '.join(vslc_labels)
model.addIndividualToGraph(
gvc_id, gvc_label, self.globaltt['genomic_variation_complement'])
self.label_hash[gvc_id] = gvc_label
for v in vslcs:
geno.addParts(v, gvc_id, self.globaltt['has_variant_part'])
geno.addVSLCtoParent(v, gvc_id)
geno.addParts(gvc_id, gt, self.globaltt['has_variant_part'])
elif len(vslcs) == 1:
gvc_id = vslcs[0]
gvc_label = self.label_hash[gvc_id]
# type the VSLC as also a GVC
model.addIndividualToGraph(
gvc_id, gvc_label, self.globaltt['genomic_variation_complement']
)
geno.addVSLCtoParent(gvc_id, gt)
else:
LOG.info("No VSLCs for %s", gt)
# make the genotype label = gvc + background
bkgd_id = self.geno_bkgd.get(gt)
if bkgd_id is not None:
bkgd_label = self.label_hash.get(bkgd_id)
if bkgd_label is None:
bkgd_label = bkgd_id # just in case
else:
bkgd_label = 'unspecified background'
if gvc_label is not None:
genotype_label = gvc_label + ' [' + bkgd_label + ']'
else:
genotype_label = '[' + bkgd_label + ']'
self.label_hash[gt] = genotype_label
def _process_all_allele_mutation_view(self, limit):
"""
This fetches the mutation type for the alleles,
and maps them to the sequence alteration.
Note that we create a BNode for the sequence alteration because
it isn't publicly identified.
<sequence alteration id> a <SO:mutation_type>
:param limit:
:return:
"""
src_key = 'all_allele_mutation_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_num = 0
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
LOG.info("getting mutation types for sequence alterations")
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
allele_key = row[col.index('_allele_key')].strip()
mutation = row[col.index('mutation')].strip()
iseqalt_id = self.idhash['seqalt'].get(allele_key)
if iseqalt_id is None:
continue
# nothing will ever connect w/these 350k bnode "individuals"
# iseqalt_id = self._make_internal_identifier('seqalt', allele_key)
if self.test_mode and int(allele_key) \
not in self.test_keys.get('allele'):
continue
# TODO we might need to map the seq alteration to the MGI id
# for unlocated things; need to use hashmap
# map the sequence_alteration_type
seq_alt_type_id = self.resolve(mutation, mandatory=False)
if seq_alt_type_id == mutation:
LOG.error("No mappjng found for seq alt '%s'", mutation)
LOG.info("Defaulting to 'sequence_alteration'")
seq_alt_type_id = self.globaltt['sequence_alteration']
# HACK - if the seq alteration is a transgene,
# then make sure it is a transgenic insertion
allele_id = self.idhash['allele'].get(allele_key)
if allele_id is not None:
allele_label = self.label_hash.get(allele_id)
if allele_label is not None and re.search(r'Tg\(', allele_label):
LOG.info(
"Found a transgenic insertion for %s", allele_label)
# transgenic_insertion, instead of plain old insertion
seq_alt_type_id = self.globaltt["transgenic_insertion"]
model.addIndividualToGraph(iseqalt_id, None, seq_alt_type_id)
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_voc_annot_view(self, limit):
"""
This MGI table represents associations between things.
We add the internal annotation id to the idhashmap.
It is expected that the genotypes have already been added to the idhash
:param limit:
:return:
"""
# TODO also get Strain/Attributes (annottypekey = 1000)
# TODO what is Phenotype (Derived) vs
# non-derived? (annottypekey = 1015)
# TODO is evidence in this table? what is the evidence vocab key?
src_key = 'voc_annot_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_num = 0
LOG.info("getting G2P associations")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r') as reader:
row = reader.readline().rstrip('\n').split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
row = line.rstrip('\n').split('\t')
annot_key = row[col.index('_annot_key')]
annot_type = row[col.index('annottype')]
object_key = row[col.index('_object_key')]
term_key = row[col.index('_term_key')]
qualifier_key = row[col.index('_qualifier_key')]
qualifier = row[col.index('qualifier')]
# term,
accid = row[col.index('accid')]
if self.test_mode is True:
if int(annot_key) not in self.test_keys.get('annot'):
continue
# qualifier of "norm" means the phenotype was measured but
# was normal, since we don't have negation or normal phenotypes
# modelled just yet, skip the row
if qualifier == 'norm':
continue
# iassoc_id = self._make_internal_identifier('annot', annot_key)
# assoc_id = self.make_id(iassoc_id)
assoc_id = None
# Mammalian Phenotype/Genotype are curated G2P assoc
if annot_type == 'Mammalian Phenotype/Genotype':
line_num += 1
# We expect the label for the phenotype
# to be taken care of elsewhere
model.addClassToGraph(accid, None)
genotype_id = self.idhash['genotype'].get(object_key)
if genotype_id is None:
LOG.error(
"can't find genotype id for %s", object_key)
else:
# add the association
assoc = G2PAssoc(graph, self.name, genotype_id, accid)
assoc.add_association_to_graph()
assoc_id = assoc.get_association_id()
# OMIM/Genotype are disease-models
elif annot_type == 'DO/Genotype':
# skip NOT annotations for now FIXME
if qualifier_key == '1614157':
continue
genotype_id = self.idhash['genotype'].get(object_key)
if genotype_id is None:
LOG.error("can't find genotype id for %s", object_key)
else:
# add the association
assoc = Assoc(graph, self.name)
# TODO PYLINT
# Redefinition of assoc type from
# dipper.models.assoc.G2PAssoc.G2PAssoc to
# dipper.models.assoc.Association.Assoc
assoc.set_subject(genotype_id)
assoc.set_object(accid)
assoc.set_relationship(self.globaltt['is model of'])
assoc.add_association_to_graph()
assoc_id = assoc.get_association_id()
elif annot_type == 'MCV/Marker':
# marker category == type
marker_id = self.idhash['marker'].get(object_key)
if str(term_key).strip() in self.localtt:
# check "Not Applicable": "reference_locus"
term_id = self.resolve(str(term_key).strip())
else:
term_id = None
logging.warning('No type mapping for: %s', term_key)
# note that the accid here is an internal mouse cv term,
# and we don't use it.
if term_id is not None and marker_id is not None:
# do something special for transgenics -
# make sure these are transgenic insertions
model.addType(marker_id, term_id)
elif annot_type == 'DO/Allele': # allele/Disease
allele_id = self.idhash['allele'].get(object_key)
if allele_id is None:
LOG.error("can't find genotype id for %s", object_key)
else:
# add the association
assoc = Assoc(graph, self.name)
assoc.set_subject(allele_id)
assoc.set_object(accid)
assoc.set_relationship(self.globaltt['is model of'])
assoc.add_association_to_graph()
assoc_id = assoc.get_association_id()
if assoc_id is not None:
# add the assoc to the hashmap (using the monarch id)
self.idhash['annot'][annot_key] = assoc_id
model.addComment(assoc_id, "annot_key:" + annot_key)
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_evidence_view(self, limit):
"""
Here we fetch the evidence (code and publication) for the associations.
The evidence codes are mapped from the standard GO codes to ECO.
J numbers are added for publications.
We will only add the evidence if the annotation is in our idhash.
We also pull in evidence qualifiers, as of June 2018 they are
Data Interpretation Center (eg IMPC)
external ref (eg UniProtKB:Q9JHI2-3 for Proteoform/Marker assoc)
Phenotyping Center (eg WTSI)
Resource Name (eg MGP)
MP-Sex-Specificity (eg NA, M, F)
Triples:
<annot_id> dc:evidence <evidence_id>
<pub_id> a owl:NamedIndividual
<annot_id> dc:source <pub_id>
:param limit:
:return:
"""
src_key = 'evidence_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_num = 0
LOG.info("getting evidence and pubs for annotations")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r') as reader:
line = reader.readline()
line = line.rstrip("\n")
row = line.split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
row = line.split('\t')
line_num += 1
annot_evidence_key = row[col.index('_annotevidence_key')]
annot_key = row[col.index('_annot_key')]
evidence_code = row[col.index('evidencecode')]
jnumid = row[col.index('jnumid')]
qualifier = row[col.index('term')]
qualifier_value = row[col.index('value')]
# annotation_type = row[col.index('annottype')]
if self.test_mode and annot_key not in self.test_keys.get('annot'):
continue
# add the association id to map to the evidence key
# (to attach the right note to the right assn)
self.idhash['notes'][annot_evidence_key] = annot_key
assoc_id = self.idhash['annot'].get(annot_key)
if assoc_id is None:
# assume that we only want to add the evidence/source
# for annots that we have in our db
continue
evidence_id = self.resolve(evidence_code)
reference = Reference(graph, jnumid)
reference.addRefToGraph()
# add the ECO and citation information to the annot
model.addTriple(assoc_id, self.globaltt['has evidence'], evidence_id)
model.addTriple(assoc_id, self.globaltt['Source'], jnumid)
# For Mammalian Phenotype/Genotype annotation types
# MGI adds sex specificity qualifiers here
if qualifier == 'MP-Sex-Specificity' and qualifier_value in ('M', 'F'):
model._addSexSpecificity(assoc_id, self.resolve(qualifier_value))
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_bib_acc_view(self, limit):
"""
This traverses the table twice:
once to look up the internal key to J number mapping
for the id hashmap then again to make the equivalences.
All internal keys have both a J and MGI identifier.
This will make equivalences between the different pub ids
Triples:
<pub_id> a owl:NamedIndividual
<other_pub_id> a owl:NamedIndividual
<pub_id> owl:sameAs <other_pub_id>
:param limit:
:return:
"""
src_key = 'bib_acc_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
# firstpass, get the J number mapping, and add to the global hash
LOG.info('populating pub id hash')
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r', encoding="utf8") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(filereader)
if not self.check_fileheader(col, row, src_key):
pass
for row in filereader:
accid = row[col.index('accid')]
prefixpart = row[col.index('prefixpart')]
# 'numericpart'
object_key = int(row[col.index('_object_key')]) # likely unstable
# logicaldb = row[col.index('logicaldb')]
# logicaldb_key = row[col.index('_logicaldb_key')]
if self.test_mode and object_key not in self.test_keys.get('pub'):
continue
# we use the J number here because
# it is the externally-accessible identifier
if prefixpart != 'J:':
continue
self.idhash['publication'][object_key] = accid
reference = Reference(graph, accid)
reference.addRefToGraph()
if not self.test_mode and limit is not None and \
filereader.line_num > limit:
break
# 2nd pass, look up the MGI identifier in the hash
LOG.info("getting pub equivalent ids")
with open(raw, 'r', encoding="utf8") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(filereader) # header already checked
for row in filereader:
accid = row[col.index('accid')]
prefixpart = row[col.index('prefixpart')]
# 'numericpart'
object_key = int(row[col.index('_object_key')])
logicaldb = row[col.index('logicaldb')].strip()
logicaldb_key = row[col.index('_logicaldb_key')]
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('pub'):
continue
jid = self.idhash['publication'].get(object_key)
pub_id = None
if logicaldb_key == '29': # pubmed
pub_id = 'PMID:' + accid
elif logicaldb_key == '1' and prefixpart[:4] == 'MGI:':
# don't get the J numbers,
# because we dont' need to make the equiv to itself.
pub_id = accid
elif logicaldb == 'Journal Link':
# some DOIs seem to have spaces
# FIXME MGI needs to FIX THESE UPSTREAM!!!!
# we'll scrub them here for the time being
accid = re.sub(r'\s+', '', accid)
# some DOIs have un-urlencoded brackets <>
accid = re.sub(r'<', '%3C', accid)
accid = re.sub(r'>', '%3E', accid)
pub_id = 'DOI:' + accid
elif logicaldb_key == '1' and re.match(r'J:', prefixpart):
# we can skip the J numbers
continue
if pub_id is not None:
# only add these to the graph if
# it's mapped to something we understand
reference = Reference(graph, pub_id)
# make the assumption that if it is a PMID, it is a journal
if re.match(r'PMID', pub_id):
reference.setType(self.globaltt['journal article'])
model.makeLeader(pub_id)
reference.addRefToGraph()
model.addSameIndividual(jid, pub_id)
else:
LOG.warning(
"Publication from (%s) not mapped for %s",
logicaldb, object_key)
if not self.test_mode and limit is not None and \
filereader.line_num > limit:
break
def _process_prb_strain_view(self, limit):
"""
Process a table to get strains (with internal ids), and their labels.
These strains are created as instances of the species that they are.
Triples:
<strain id> a GENO:intrinsic_genotype
rdfs:label "strain label"
RO:in_taxon <NCBI taxon id>
:param limit:
:return:
"""
src_key = 'prb_strain_view'
# Only 9 strain types if we want to map them
# recombinant congenci, inbred strain, NA,
# congenic, consomic, coisogenic,
# recombinant inbred, NS, conplastic
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
LOG.info("getting strains and adding their taxa")
with open(raw, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(reader)
if not self.check_fileheader(col, row, src_key):
pass
for row in reader:
strain_key = row[col.index('_strain_key')].strip()
strain = row[col.index('strain')].strip()
species = row[col.index('species')].strip()
if self.test_mode is True:
if int(strain_key) not in self.test_keys.get('strain'):
continue
strain_id = self.idhash['strain'].get(strain_key)
if strain_id is not None:
self.label_hash[strain_id] = strain
# add the species to the graph as a class
species = species.strip()
sp = self.resolve(species, False)
if sp == species:
LOG.error("No taxon mapping for " + species)
# they may tag a geo name on house mouse
if species[:17] == 'M. m. domesticus ':
LOG.warning("defaulting to Mus musculus")
sp = self.globaltt['Mus musculus']
else:
LOG.warning("defaulting to genus 'Mus'")
sp = self.globaltt['Mus']
elif species in MGI.unknown_taxa:
LOG.warning("defaulting to genus 'Mus'")
sp = self.globaltt['Mus']
model.addClassToGraph(sp, None)
geno.addTaxon(sp, strain_id)
model.addIndividualToGraph(strain_id, strain, sp)
if not self.test_mode and limit is not None and reader.line_num > limit:
break
def _process_mrk_marker_view(self, limit):
"""
This is the definition of markers
(as in genes, but other genomic loci types as well).
It looks up the identifiers in the hashmap
This includes their labels, specific class, and identifiers
TODO should we use the mrk_mouse_view instead?
Triples:
<marker_id> a owl:Class OR owl:NamedIndividual
GENO:marker_type
rdfs:label <symbol>
RO:in_taxon <NCBITaxon_id>
:param limit:
:return:
"""
src_key = 'mrk_marker_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
line_num = 0
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
LOG.info("getting markers and assigning types")
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
(marker_key,
organism_key,
marker_status_key,
symbol,
name,
latin_name,
marker_type) = line.split('\t')
if self.test_mode is True:
if int(marker_key) not in self.test_keys.get('marker'):
continue
# use only non-withdrawn markers
if marker_status_key != '2':
marker_id = self.idhash['marker'].get(marker_key)
# only pull info for mouse genes for now
# other species should come from other dbs
if organism_key != '1':
continue
if marker_id is None:
LOG.error(
"can't find %s %s in the id hash", marker_key, symbol)
# check "Not Applicable" -> "reference_locus"
mapped_marker_type = self.resolve(marker_type.strip())
# if it's unlocated, or is not a gene,
# then don't add it as a class because
# it's not added as a gene.
# everything except for genes are modeled as individuals
if mapped_marker_type in [
self.globaltt['gene'],
self.globaltt['pseudogene']]:
model.addClassToGraph(
marker_id, symbol, mapped_marker_type, name
)
model.addSynonym(
marker_id, name, self.globaltt['has_exact_synonym']
)
self.markers['classes'].append(marker_id)
else:
model.addIndividualToGraph(
marker_id, symbol, mapped_marker_type, name
)
model.addSynonym(
marker_id, name, self.globaltt['has_exact_synonym']
)
self.markers['indiv'].append(marker_id)
self.label_hash[marker_id] = symbol
# add the taxon (default to Mus m.)
# latin_name is not always a proper binomial
if latin_name in MGI.unknown_taxa: # localtt conflict
latin_name = 'Mus'
taxon_id = self.resolve(
latin_name, default=self.globaltt['Mus musculus'])
geno.addTaxon(taxon_id, marker_id)
# make MGI the leader for mouse genes.
if taxon_id == self.globaltt['Mus musculus']:
model.makeLeader(marker_id)
if not self.test_mode and limit is not None \
and line_num > limit:
break
def _process_mrk_summary_view(self, limit):
"""
Here we pull the mgiid of the features, and make equivalent (or sameAs)
associations to referenced ids.
Only adding the ENSEMBL genes and NCBI gene ids.
Will wait on other ids later.
:param limit:
:return:
"""
src_key = 'mrk_summary_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("getting markers and equivalent ids from mrk_summary_view")
line_num = 0
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
accid = row[col.index('accid')].strip()
logicaldb_key = row[col.index('_logicaldb_key')].strip()
object_key = row[col.index('_object_key')].strip()
preferred = row[col.index('preferred')].strip()
mgiid = row[col.index('mgiid')].strip()
subtype = row[col.index('subtype')].strip()
short_description = row[col.index('short_description')].strip()
if self.test_mode is True and \
int(object_key) not in self.test_keys.get('marker'):
continue
if preferred == '1':
if self.idhash['marker'].get(object_key) is None:
# can't find the marker in the hash; add it here:
self.idhash['marker'][object_key] = mgiid
LOG.error(
"this marker hasn't been seen before %s %s",
mgiid, short_description)
if accid == mgiid:
# don't need to make equivalences to itself
continue
mapped_id = None
if logicaldb_key == '60':
mapped_id = 'ENSEMBL:' + accid
elif logicaldb_key == '1':
# don't need to add the equivalence to itself.
continue
elif logicaldb_key == '55':
mapped_id = 'NCBIGene:' + accid
if mapped_id is not None:
if mgiid in self.markers['classes'] \
or subtype in ['Gene', 'Pseudogene']:
model.addClassToGraph(mapped_id, None)
model.addEquivalentClass(mgiid, mapped_id)
elif mgiid in self.markers['indiv']:
model.addIndividualToGraph(mapped_id, None)
model.addSameIndividual(mgiid, mapped_id)
# could parse the "subtype" string
# to get the kind of thing the marker is
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_mrk_acc_view(self):
"""
Use this table to create the idmap between the internal marker id and
the public mgiid.
No triples are produced in this process
a second pass through the same file is made
:return:
"""
src_key = 'mrk_acc_view'
# make a pass through the table first,
# to create the mapping between the external and internal identifiers
line_num = 0
LOG.info("mapping markers to internal identifiers")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip('\n')
line_num += 1
row = line.split('\t')
accid = row[col.index('accid')]
prefix_part = row[col.index('prefixpart')]
logicaldb_key = row[col.index('_logicaldb_key')]
object_key = row[col.index('_object_key')]
preferred = row[col.index('preferred')]
# = row[col.index('_organism_key')]
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('marker'):
continue
# get the hashmap of the identifiers
if logicaldb_key == '1' and prefix_part == 'MGI:' and preferred == '1':
self.idhash['marker'][object_key] = accid
def _process_mrk_acc_view_for_equiv(self, limit):
"""
Add the equivalences, either sameAs or equivalentClass,
depending on the nature of the marker.
We only process the ENSEMBL genes and NCBI gene ids.
:param limit:
:return:
"""
src_key = 'mrk_acc_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
# pass through the file again,
# and make the equivalence statements to a subset of the idspaces.
# TODO verify the difference between what the
# mrk_acc_view vs mrk_summary_view buys us here.
# if nothing, then we should remove one or the other.
LOG.info("mapping marker equivalent identifiers in mrk_acc_view")
line_num = 0
col = self.tables[src_key]['columns']
with open('/'.join((self.rawdir, src_key)), 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
accid = row[col.index('accid')]
prefix_part = row[col.index('prefixpart')]
logicaldb_key = row[col.index('_logicaldb_key')]
object_key = row[col.index('_object_key')]
preferred = row[col.index('preferred')]
organism_key = row[col.index('_organism_key')]
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('marker'):
continue
# right now not caring about other organisms
if organism_key != 1:
continue
mgiid = self.idhash['marker'].get(object_key)
if mgiid is None:
# presumably we've already added the relevant MGI ids,
# so skip those that we can't find
LOG.debug("can't find mgiid for %s", object_key)
continue
marker_id = None
if preferred == '1': # TODO what does it mean if it's 0?
if logicaldb_key == '55': # entrez/ncbi
marker_id = 'NCBIGene:' + accid
elif logicaldb_key == '1' and prefix_part != 'MGI:':
marker_id = accid
elif logicaldb_key == '60':
marker_id = 'ENSEMBL:' + accid
# TODO get non-preferred ids==deprecated?
if marker_id is not None:
if mgiid in self.markers['classes']:
model.addClassToGraph(marker_id, None)
model.addEquivalentClass(mgiid, marker_id)
elif mgiid in self.markers['indiv']:
model.addIndividualToGraph(marker_id, None)
model.addSameIndividual(mgiid, marker_id)
else:
LOG.error("mgiid not in class or indiv hash %s", mgiid)
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_prb_strain_acc_view(self, limit):
"""
Use this table to create the idmap between
the internal marker id and the public mgiid.
Also, add the equivalence statements between strains for MGI and JAX
Triples:
<strain_id> a GENO:intrinsic genotype
<other_strain_id> a GENO:intrinsic_genotype
<strain_id> owl:sameAs <other_strain_id>
:param limit:
:return:
"""
src_key = 'prb_strain_acc_view'
# make a pass through the table first,
# to create the mapping between the external and internal identifiers
line_num = 0
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("mapping strains to internal identifiers")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
tax_id = self.globaltt["Mus musculus"]
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
accid = row[col.index('accid')].strip()
prefixpart = row[col.index('prefixpart')].strip()
logicaldb_key = row[col.index('_logicaldb_key')].strip()
object_key = row[col.index('_object_key')].strip()
preferred = row[col.index('preferred')].strip()
# scrub out the backticks from accids
# TODO notify the source upstream
accid = re.sub(r'`', '', accid)
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('strain'):
continue
# get the hashmap of the identifiers
if logicaldb_key == '1' and prefixpart == 'MGI:' and preferred == '1':
self.idhash['strain'][object_key] = accid
model.addIndividualToGraph(
accid, self.globaltt['intrinsic genotype'], tax_id)
# The following are the stock centers for the strains
# (asterisk indicates complete)
# *1 MGI Mouse Genome Informatics
# *22 JAX Registry (null)
# *37 EMMA European Mutant Mouse Archive
# *38 MMRRC Mutant Mouse Regional Resource Center
# 39 Harwell Mammalian Genome Unit Stock List
# *40 ORNL Oak Ridge National Lab mutant resource
# *54 NCIMR NCI Mouse Repository
# *56 NMICE Neuromice.org, a consortium of three NIH-sponsored
# mutagenesis projects designed to search for
# neurological mutations
# 57 CARD Center for Animal Resources and Development @ Kumamoto U
# *70 RIKEN BRC RIKEN BioResource Center
# *71 CMMR Canadian Mouse Mutant Resource
# 84 JPGA The Center for New Mouse Models of
# Heart, Lung, BLood and Sleep Disorders,
# JAX-PGA at The Jackson Laboratory
# *87 MUGEN Network of Excellence in Integrated Functional Genomics
# in Mutant Mouse Models as Tools to Investigate the
# Complexity of Human Immunological Disease
# *90 APB Australian Phenomics Bank
# ? 91 EMS Elizabeth M. Simpson
# ? 93 NIG National Institute of Genetics,
# Mammalian Genetics Laboratory, Japan
# 94 TAC Taconic
# 154 OBS Oriental BioService , Inc.
# 161 RMRC-NLAC National Applied Research Laboratories,Taiwan, R.O.C.
# pass through the file again,
# and make the equivalence statements to a subset of the idspaces
LOG.info("mapping strain equivalent identifiers")
line_num = 0
with open(raw, 'r') as reader:
reader.readline() # read the header row; skip
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
accid = row[col.index('accid')].strip()
prefixpart = row[col.index('prefixpart')].strip()
logicaldb_key = row[col.index('_logicaldb_key')].strip()
object_key = row[col.index('_object_key')].strip()
preferred = row[col.index('preferred')].strip()
# scrub out the backticks from accids
# TODO notify the source upstream
accid = re.sub(r'`', '', accid)
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('strain'):
continue
mgiid = self.idhash['strain'].get(object_key)
if mgiid is None:
# presumably we've already added the relevant MGI ids,
# so skip those that we can't find
# LOG.info("can't find mgiid for %s",object_key)
continue
strain_id = None
deprecated = False
comment = None
if preferred == '1': # what does it mean if it's 0?
if logicaldb_key == '22': # JAX
# scrub out the backticks from accids
# TODO notify the source upstream
accid = re.sub(r'`', '', accid).strip()
strain_id = 'JAX:' + accid
elif logicaldb_key == '38': # MMRRC
strain_id = accid
if not re.match(r'MMRRC:', strain_id):
strain_id = 'MMRRC:' + strain_id
elif logicaldb_key == '37': # EMMA
# replace EM: prefix with EMMA:, or for accid's
# with bare digits (e.g. 06335) prepend 'EMMA:'
strain_id = re.sub(r'^(EM:)*', 'EMMA:', accid)
elif logicaldb_key == '90': # APB
strain_id = 'APB:' + accid # Check
elif logicaldb_key == '40': # ORNL
# ORNL is not in existence any more.
# these are deprecated, and we will prefix with JAX
strain_id = 'JAX:' + accid
comment = "Originally from ORNL."
deprecated = True
# add these as synonyms of the MGI mouse
model.addSynonym(mgiid, accid)
elif logicaldb_key == '54': # NCIMR
strain_id = 'NCIMR:' + accid
# CMMR not great - doesn't resolve well
# elif logicaldb_key == '71':
# strain_id = 'CMMR:'+accid
elif logicaldb_key == '56': # neuromice
# neuromice.org doesn't exist any more.
# but all these are actually MGI ids
strain_id = accid
elif logicaldb_key == '70': # RIKEN
# like
# http://www2.brc.riken.jp/lab/animal/detail.php?brc_no=RBRC00160
strain_id = 'RBRC:RBRC' + accid
elif logicaldb_key == '87':
strain_id = 'MUGEN:' + accid
# I can't figure out how to get to some of the strains
# TODO get non-preferred ids==deprecated?
# TODO make these strains, rather than instance of taxon?
if strain_id is not None:
model.addIndividualToGraph(strain_id, None, tax_id)
if deprecated:
model.addDeprecatedIndividual(strain_id, [mgiid])
model.addSynonym(mgiid, accid)
else:
model.addSameIndividual(mgiid, strain_id)
if re.match(r'MMRRC', strain_id):
model.makeLeader(strain_id)
if comment is not None:
model.addComment(strain_id, comment)
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_mgi_note_vocevidence_view(self, limit):
"""
Here we fetch the free text descriptions of the phenotype associations.
Triples:
<annot_id> dc:description "description text"
:param limit:
:return:
"""
src_key = 'mgi_note_vocevidence_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("getting free text descriptions for annotations")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(reader)
if not self.check_fileheader(col, row, src_key):
pass
for row in reader:
object_key = row[col.index('_object_key')].strip()
note = row[col.index('note')].strip()
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('notes'):
continue
# object_key == evidence._annotevidence_key
annotkey = self.idhash['notes'].get(object_key)
annot_id = self.idhash['annot'].get(annotkey)
# only add the description for the annotations
# we have captured through processing
if annot_id is not None:
model.addDescription(annot_id, note.strip())
if not self.test_mode and limit is not None and reader.line_num > limit:
break
def _process_mrk_location_cache(self, limit):
src_key = 'mrk_location_cache'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
LOG.info("getting marker locations")
raw = '/'.join((self.rawdir, src_key))
geno = Genotype(graph)
col = self.tables[src_key]['columns']
with open(raw, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(reader)
if not self.check_fileheader(col, row, src_key):
pass
for row in reader:
marker_key = row[col.index('_marker_key')].strip()
organism_key = row[col.index('_organism_key')].strip()
chromosome = row[col.index('chromosome')].strip()
startcoordinate = row[col.index('startcoordinate')].strip()
endcoordinate = row[col.index('endcoordinate')].strip()
strand = row[col.index('strand')].strip()
version = row[col.index('version')].strip()
# only get the location information for mouse
if str(organism_key) != '1' or str(chromosome) == 'UN':
continue
if self.test_mode is True:
if int(marker_key) not in self.test_keys.get('marker'):
continue
# make the chromosome, and the build-instance
chrom_id = makeChromID(chromosome, 'NCBITaxon:10090', 'CHR')
if version is not None and version != '' and version != '(null)':
# switch on maptype or mapkey
assembly = version
build_id = 'NCBIGenome:' + assembly
geno.addChromosomeInstance(
chromosome, build_id, assembly, chrom_id)
chrom_id = makeChromID(chromosome, build_id, 'MONARCH')
if marker_key in self.idhash['marker']:
gene_id = self.idhash['marker'][marker_key]
feature = Feature(graph, gene_id, None, None)
if strand == '(null)' or strand == '':
strand = None
if startcoordinate == '(null)' or startcoordinate == '':
startcoordinate = None
if endcoordinate == '(null)' or endcoordinate == '':
endcoordinate = None
if startcoordinate is not None:
feature.addFeatureStartLocation(
int(float(startcoordinate)), chrom_id, strand)
else:
feature.addFeatureStartLocation(
startcoordinate, chrom_id, strand,
[self.globaltt['FuzzyPosition']])
if endcoordinate is not None:
feature.addFeatureEndLocation(
int(float(endcoordinate)), chrom_id, strand)
# note we don't add the uncertain end coordinate,
# because we don't know what it is.
add_as_class = False
if gene_id in self.markers['classes']:
add_as_class = True
feature.addFeatureToGraph(True, None, add_as_class)
else:
LOG.warning('marker key %s not in idhash', str(marker_key))
if not self.test_mode and limit is not None and reader.line_num > limit:
break
def process_mgi_relationship_transgene_genes(self, limit=None):
"""
Here, we have the relationship between MGI transgene alleles,
and the non-mouse gene ids that are part of them.
We augment the allele with the transgene parts.
:param limit:
:return:
"""
src_key = 'mgi_relationship_transgene_genes'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
LOG.info("getting transgene genes")
raw = '/'.join((self.rawdir, src_key))
geno = Genotype(graph)
col = self.tables[src_key]['columns']
with open(raw, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(reader)
if not self.check_fileheader(col, row, src_key):
pass
for row in reader:
# rel_key = row[col.index('rel_key')].strip()
allele_key = int(row[col.index('object_1')])
allele_id = row[col.index('allele_id')]
# allele_label = row[col.index('allele_label')].strip()
# category_key = row[col.index('category_key')].strip()
# category_name = row[col.index('category_name')].strip()
# property_key = row[col.index('property_key')].strip()
# property_name = row[col.index('property_name')].strip()
gene_num = int(row[col.index('property_value')])
if self.test_mode and allele_key not in self.test_keys.get('allele') \
and gene_num not in self.test_ids:
continue
gene_id = 'NCBIGene:' + str(gene_num)
# geno.addParts(gene_id, allele_id, self.globaltt['has_variant_part'])
seqalt_id = self.idhash['seqalt'].get(allele_key)
if seqalt_id is None:
seqalt_id = allele_id
geno.addSequenceDerivesFrom(seqalt_id, gene_id)
if not self.test_mode and limit is not None and \
reader.line_num > limit:
break
def process_mgi_note_allele_view(self, limit=None):
"""
These are the descriptive notes about the alleles.
Note that these notes have embedded HTML -
should we do anything about that?
:param limit:
:return:
"""
src_key = 'mgi_note_allele_view'
line_num = 0
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("Assembling notes on alleles")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
notehash = {}
with open(raw, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(reader)
if not self.check_fileheader(col, row, src_key):
pass
for row in reader:
object_key = row[col.index('_object_key')].strip()
notetype = row[col.index('notetype')].strip()
note = row[col.index('note')].strip()
sequencenum = row[col.index('sequencenum')].strip()
# read all the notes into a hash to concatenate
if object_key not in notehash:
notehash[object_key] = {}
if notetype not in notehash[object_key]:
notehash[object_key][notetype] = []
if len(notehash[object_key][notetype]) < int(sequencenum):
for i in range(
len(notehash[object_key][notetype]),
int(sequencenum)
):
notehash[object_key][notetype].append('')
notehash[object_key][notetype][int(sequencenum) - 1] = note.strip()
# finish iteration over notes
line_num = 0
for allele_key in notehash:
line_num += 1
if self.test_mode is True:
if int(allele_key) not in self.test_keys.get('allele'):
continue
allele_id = self.idhash['allele'].get(allele_key)
if allele_id is None:
continue
for n in notehash[allele_key]:
# pretty chatty for expected behavior
# LOG.info(
# "found %d %s notes for %s",
# len(notehash[allele_key]), n, allele_id)
notes = ''.join(notehash[allele_key][n])
notes += ' [' + n + ']'
model.addDescription(allele_id, notes)
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_prb_strain_genotype_view(self, limit=None):
"""
Here we fetch the free text descriptions of the phenotype associations.
Triples:
<annot_id> dc:description "description text"
:param limit:
:return:
"""
src_key = 'prb_strain_genotype_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
LOG.info("Getting genotypes for strains")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(reader)
if not self.check_fileheader(col, row, src_key):
pass
for row in reader:
strain_key = row[col.index('_strain_key')].strip()
genotype_key = row[col.index('_genotype_key')].strip()
if self.test_mode is True and \
int(genotype_key) not in self.test_keys.get('genotype') \
and int(strain_key) not in self.test_keys.get('strain'):
continue
strain_id = self.idhash['strain'].get(strain_key)
if strain_id is None:
strain_id = self._make_internal_identifier(
'strain', strain_key)
genotype_id = self.idhash['genotype'].get(genotype_key)
if genotype_id is None:
genotype_id = self._make_internal_identifier(
'genotype', genotype_key)
if strain_id is not None and genotype_id is not None:
self.strain_to_genotype_map[strain_id] = genotype_id
graph.addTriple(strain_id, self.globaltt['has_genotype'], genotype_id)
# TODO
# verify if this should be contingent on the exactness or not
# if qualifier == 'Exact':
# gu.addTriple(
# graph, strain_id,
# self.globaltt['has_genotype'],
# genotype_id)
# else:
# gu.addXref(graph, strain_id, genotype_id)
if not self.test_mode and limit is not None and reader.line_num > limit:
break
def _make_internal_identifier(self, prefix, key):
"""
This is a special MGI-to-MONARCH-ism.
MGI tables have unique keys that we use here, but don't want to
necessarily re-distribute those internal identifiers.
Therefore, we make them into keys in a consistent way here.
:param prefix: the object type to prefix the key with,
since the numbers themselves are not unique across tables
:param key: the number
:return:
"""
# these are just more blank node identifiers
iid = self.make_id('mgi' + prefix + 'key' + key, '_')
return iid
# def _querysparql(self):
#
# #load the graph
# vg = Graph()
# vg.parse(self.outfile, format="turtle")
#
# qres = g.query(
# """SELECT DISTINCT ?aname ?bname
# WHERE {
# ?a foaf:knows ?b .
# ?a foaf:name ?aname .
# ?b foaf:name ?bname .
# }""")
#
# for row in qres:
# print("%s knows %s" % row)<|fim▁end|> | |
<|file_name|>factories.py<|end_file_name|><|fim▁begin|>import factory
from api import models
class ClientFactory(factory.DjangoModelFactory):
class Meta:
model = models.Client<|fim▁hole|>
name = 'Coaxis'
@factory.django.mute_signals(models.post_save)
class UserFactory(factory.DjangoModelFactory):
class Meta:
model = models.MyUser
email = factory.Sequence(lambda n: 'u{0}@coaxis.com'.format(n))
password = factory.PostGenerationMethodCall('set_password', 'password')
is_staff = False
class EmployeeFactory(factory.DjangoModelFactory):
class Meta:
model = models.Employee
user = factory.SubFactory(UserFactory)
is_technician = False
@factory.post_generation
def clients(self, create, extracted, **kwargs):
if not create: # Simple build, do nothing.
return
if extracted: # A list of objects were passed in, use them
for client in extracted:
self.clients.add(client)
class TechnicianFactory(EmployeeFactory):
is_technician = True
class DaemonFactory(factory.DjangoModelFactory):
class Meta:
model = models.Daemon
client = factory.SubFactory(ClientFactory)<|fim▁end|> | |
<|file_name|>lassplit.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
***************************************************************************
lassplit.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *<|fim▁hole|>* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from sextante.lidar.lastools.LasToolsUtils import LasToolsUtils
from sextante.lidar.lastools.LasToolsAlgorithm import LasToolsAlgorithm
from sextante.parameters.ParameterFile import ParameterFile
from sextante.outputs.OutputFile import OutputFile
from sextante.parameters.ParameterNumber import ParameterNumber
class lassplit(LasToolsAlgorithm):
INPUT = "INPUT"
OUTPUT = "OUTPUT"
NUM_POINTS = "NUM_POINTS"
def defineCharacteristics(self):
self.name = "lassplit"
self.group = "Tools"
self.addParameter(ParameterFile(lassplit.INPUT, "Input las layer"))
self.addParameter(ParameterNumber(lassplit.NUM_POINTS, "Point in each output file", 1, None, 1000000))
self.addOutput(OutputFile(lassplit.OUTPUT, "Output las file basename"))
self.addCommonParameters()
def processAlgorithm(self, progress):
commands = [os.path.join(LasToolsUtils.LasToolsPath(), "bin", "lassplit.exe")]
commands.append("-i")
commands.append(self.getParameterValue(lassplit.INPUT))
commands.append("-o")
commands.append(self.getOutputValue(lassplit.OUTPUT))
commands.append("-split")
commands.append(self.getParameterValue(lassplit.NUM_POINTS))
self.addCommonParameterValuesToCommand(commands)
LasToolsUtils.runLasTools(commands, progress)<|fim▁end|> | |
<|file_name|>fibonacci.py<|end_file_name|><|fim▁begin|>def _checkInput(index):
if index < 0:
raise ValueError("Indice negativo non supportato [{}]".format(index))
elif type(index) != int:
raise TypeError("Inserire un intero [tipo input {}]".format(type(index).__name__))<|fim▁hole|> serie = "0 1 1 2 3 5 8".replace(" ", "")
return int(serie[index])
def fib_from_list(index):
_checkInput(index)
serie = [0,1,1,2,3,5,8]
return serie[index]
def fib_from_algo(index):
_checkInput(index)
current_number = current_index = 0
base = 1
while current_index < index:
old_base = current_number
current_number = current_number + base
base = old_base
current_index += 1
pass
return current_number
def recursion(index):
if index <= 1:
return index
return recursion(index - 1) + recursion(index - 2)
def fib_from_recursion_func(index):
_checkInput(index)
return recursion(index)
calculate = fib_from_recursion_func<|fim▁end|> |
def fib_from_string(index):
_checkInput(index) |
<|file_name|>decode.rs<|end_file_name|><|fim▁begin|>const GREEK_FROM_BETA: [char; 26] = [
'\u{03b1}', // A => alpha
'\u{03b2}', // B => beta
'\u{03be}', // C => xi
'\u{03b4}', // D => delta
'\u{03b5}', // E => epsilon
'\u{03c6}', // F => phi
'\u{03b3}', // G => gamma
'\u{03b7}', // H => eta
'\u{03b9}', // I => iota
'\u{03c2}', // J => final sigma
'\u{03ba}', // K => kappa
'\u{03bb}', // L => lambda
'\u{03bc}', // M => mu
'\u{03bd}', // N => nu
'\u{03bf}', // O => omicron
'\u{03c0}', // P => pi
'\u{03b8}', // Q => theta
'\u{03c1}', // R => rho
'\u{03c3}', // S => sigma (medial by default)
'\u{03c4}', // T => tau
'\u{03c5}', // U => upsilon
'\u{03dd}', // V => digamma (wau)
'\u{03c9}', // W => omega
'\u{03c7}', // X => chi
'\u{03c8}', // Y => psi
'\u{03b6}', // Z => zeta
];
const MEDIAL_SIGMA: char = '\u{03c3}';
const FINAL_SIGMA: char = '\u{03c2}';
const QUESTION_MARK: char = ';'; // normal semicolon is prefered
const APOSTROPHE: char = '\u{2019}'; // right single quotation mark
const HYPHEN: char = '\u{2010}'; // TLG says to use this instead of '-'
const DASH: char = '\u{2014}'; // em dash
const MIDDLE_DOT: char = '\u{00B7}';
const NUMERAL_SIGN: char = '\u{0374}';
const SMOOTH_BREATHING: char = '\u{0313}';
const ROUGH_BREATHING: char = '\u{0314}';
const ACUTE_ACCENT: char = '\u{0301}';
const CIRCUMFLEX_ACCENT: char = '\u{0342}';
const GRAVE_ACCENT: char = '\u{0300}';
const DIAERESIS: char = '\u{0308}';
const IOTA_SUBSCRIPT: char = '\u{0345}';
pub fn beta_decode(c: char) -> char {
match c {
'a' ... 'z' => {
const LITTLE_A: usize = 'a' as usize;
let index = (c as usize) - LITTLE_A;
GREEK_FROM_BETA[index]
},
'A' ... 'Z' => {
const BIG_A: usize = 'A' as usize;
let index = (c as usize) - BIG_A;
GREEK_FROM_BETA[index]
}
')' => SMOOTH_BREATHING,
'(' => ROUGH_BREATHING,
'/' => ACUTE_ACCENT,
'=' => CIRCUMFLEX_ACCENT,
'\\'=> GRAVE_ACCENT,
'+' => DIAERESIS,<|fim▁hole|> '-' => HYPHEN,
'_' => DASH,
':' => MIDDLE_DOT,
'#' => NUMERAL_SIGN,
_ => c,
}
}
pub struct BetaDecoding<I: Iterator<Item=char>> {
betacode: I,
lookahead: Option<char>,
breathing: Option<char>,
accent: Option<char>,
awaiting_uppercase: bool,
}
impl<I: Iterator<Item=char>> BetaDecoding<I> {
pub fn new(betacode: I) -> BetaDecoding<I> {
BetaDecoding {
betacode: betacode,
lookahead: None,
breathing: None,
accent: None,
awaiting_uppercase: false,
}
}
fn next_helper(&mut self) -> Option<char> {
// get_greek_char does most of the work, just have to determine correct sigma
let greek_char = if let Some(_) = self.lookahead {
self.lookahead
} else {
self.get_greek_char()
};
self.lookahead = None;
match greek_char {
Some(MEDIAL_SIGMA) => {
self.lookahead = self.get_greek_char();
match self.lookahead {
Some(c) if c.is_alphabetic() => Some(MEDIAL_SIGMA),
_ => Some(FINAL_SIGMA),
}
},
_ => greek_char,
}
}
fn get_greek_char(&mut self) -> Option<char> {
if let Some(breathing) = self.breathing {
self.breathing = None;
Some(breathing)
} else if let Some(accent) = self.accent {
self.accent = None;
Some(accent)
} else {
match self.betacode.next() {
Some('`') => {
// escape character
self.betacode.next()
},
Some('*') => {
self.awaiting_uppercase = true;
self.next()
},
Some(')') if self.awaiting_uppercase => {
let next = self.next();
self.breathing = Some(SMOOTH_BREATHING);
next
},
Some('(') if self.awaiting_uppercase => {
let next = self.next();
self.breathing = Some(ROUGH_BREATHING);
next
},
Some('/') if self.awaiting_uppercase => {
let next = self.next();
self.accent = Some(ACUTE_ACCENT);
next
},
Some('=') if self.awaiting_uppercase => {
let next = self.next();
self.accent = Some(CIRCUMFLEX_ACCENT);
next
},
Some('\\') if self.awaiting_uppercase => {
let next = self.next();
self.accent = Some(GRAVE_ACCENT);
next
},
Some(c) => {
let greek = beta_decode(c);
if self.awaiting_uppercase {
self.awaiting_uppercase = false;
greek.to_uppercase().next()
} else {
Some(greek)
}
},
None => None,
}
}
}
}
impl<I: Iterator<Item=char>> Iterator for BetaDecoding<I> {
type Item = char;
#[inline]
fn next (&mut self) -> Option<char> {
self.next_helper()
}
}<|fim▁end|> | '|' => IOTA_SUBSCRIPT,
';' => QUESTION_MARK,
'\''=> APOSTROPHE, |
<|file_name|>cms_form.py<|end_file_name|><|fim▁begin|>##############################################################################
#
# Copyright (C) 2019-2020 Compassion CH (http://www.compassion.ch)
# @author: Christopher Meier <[email protected]>
#
# The licence is in the file __manifest__.py
#
##############################################################################
"""
This file blocks all the routes defined automatically by cms_form.<|fim▁hole|>from odoo.addons.cms_form.controllers.main import (
CMSFormController,
CMSWizardFormController,
CMSSearchFormController,
)
class UwantedCMSFormController(CMSFormController):
@http.route()
def cms_form(self, model, model_id=None, **kw):
return http.request.render("website.404")
class UnwantedCMSWizardFormController(CMSWizardFormController):
@http.route()
def cms_wiz(self, wiz_model, model_id=None, **kw):
return http.request.render("website.404")
class UnwantedCMSSearchFormController(CMSSearchFormController):
@http.route()
def cms_form(self, model, **kw):
return http.request.render("website.404")<|fim▁end|> | """
from odoo import http |
<|file_name|>main.go<|end_file_name|><|fim▁begin|>package main
import (
"fmt"
"os"
"path/filepath"
"github.com/karrick/godirwalk"
"github.com/pkg/errors"
)
func main() {
if len(os.Args) < 2 {
fmt.Fprintf(os.Stderr, "usage: %s dir1 [dir2 [dir3...]]\n", filepath.Base(os.Args[0]))
os.Exit(2)
}
scratchBuffer := make([]byte, 64*1024) // allocate once and re-use each time
var count, total int
var err error<|fim▁hole|> for _, arg := range os.Args[1:] {
count, err = pruneEmptyDirectories(arg, scratchBuffer)
total += count
if err != nil {
break
}
}
fmt.Fprintf(os.Stderr, "Removed %d empty directories\n", total)
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: %s\n", err)
os.Exit(1)
}
}
func pruneEmptyDirectories(osDirname string, scratchBuffer []byte) (int, error) {
var count int
err := godirwalk.Walk(osDirname, &godirwalk.Options{
Unsorted: true,
ScratchBuffer: scratchBuffer,
Callback: func(_ string, _ *godirwalk.Dirent) error {
// no-op while diving in; all the fun happens in PostChildrenCallback
return nil
},
PostChildrenCallback: func(osPathname string, _ *godirwalk.Dirent) error {
deChildren, err := godirwalk.ReadDirents(osPathname, scratchBuffer)
if err != nil {
return errors.Wrap(err, "cannot ReadDirents")
}
// NOTE: ReadDirents skips "." and ".."
if len(deChildren) > 0 {
return nil // this directory has children; no additional work here
}
if osPathname == osDirname {
return nil // do not remove provided root directory
}
err = os.Remove(osPathname)
if err == nil {
count++
}
return err
},
})
return count, err
}<|fim▁end|> | |
<|file_name|>check_badges.py<|end_file_name|><|fim▁begin|>import sys
import os
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand, CommandError
from workshops.models import Award, Badge, Person
SKIP_DIRS = ['class']
class Command(BaseCommand):
args = '/path/to/site'
help = 'Report inconsistencies in badges.'
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError('Usage: check_badges /path/to/site')
path_to_site = args[0]
badge_dir = os.path.join(path_to_site, 'badges')
for entry in os.listdir(badge_dir):
entry_path = os.path.join(badge_dir, entry)
if os.path.isdir(entry_path) and entry not in SKIP_DIRS:
self.check_badges(entry, entry_path)
def check_badges(self, badge_name, badge_path):
try:
badge = Badge.objects.get(name=badge_name)
db_awards = set([a.person.username for a in Award.objects.filter(badge=badge)])
path_awards = set([os.path.splitext(p)[0] for p in os.listdir(badge_path) if p.endswith('.json')])
self.report_missing('in database but not site', badge_name, db_awards - path_awards)
self.report_missing('in site but not database', badge_name, path_awards - db_awards)
except ObjectDoesNotExist:
print('badge "{0}" not known'.format(badge_name, file=sys.stderr))
def report_missing(self, title, badge_name, items):
if items:
print('{0} {1}'.format(badge_name, title))
for i in sorted(list(items)):
try:
p = Person.objects.get(username=i)
print(' {0}: {1}'.format(i, p))
except ObjectDoesNotExist:<|fim▁hole|> print(' {0}'.format(i))<|fim▁end|> | |
<|file_name|>wrong-abi.rs<|end_file_name|><|fim▁begin|>// compile-flags: --target thumbv8m.main-none-eabi --crate-type lib
// needs-llvm-components: arm
#![feature(cmse_nonsecure_entry, no_core, lang_items)]<|fim▁hole|>#[no_mangle]
#[cmse_nonsecure_entry]
//~^ ERROR `#[cmse_nonsecure_entry]` requires C ABI [E0776]
pub fn entry_function(_: u32, _: u32, _: u32, d: u32) -> u32 {
d
}<|fim▁end|> | #![no_core]
#[lang="sized"]
trait Sized { }
|
<|file_name|>log.py<|end_file_name|><|fim▁begin|><|fim▁hole|>-------------
Provides a central logging facility. It is used to record log info
and report both to a log file and stdout
"""
import sys
import logging
import traceback
CLINT_AVAILABLE = True
try:
from clint.textui import puts, colored
except:
# Clint is still not stable enough yet to just import with so much
# trust, but I really like colored output. So we'll give it a shot
# and if it doesn't work we will just do something else.
CLINT_AVAILABLE = False
def get_logging_level(level):
logging_level = None
if isinstance(level, (str, unicode)):
level = level.upper()
try:
logging_level = getattr(logging, level.upper())
except AttributeError:
raise AttributeError('Tried to grab logging level "%s"'
' but it does not exist' % level)
elif isinstance(level, int):
# Do nothing
logging_level = level
else:
raise TypeError('Invalid logging level. Must be string or int %s'
% str(level))
return logging_level
class VirtstrapLogger(object):
"""Custom logger for use with virtstrap
It'll allow the logger to store logged data before a log file is setup. It
is meant to be used globally.
"""
def __init__(self):
self._handlers = []
self._log_lines = [] #storage before any handlers appear
def add_handler(self, handler):
self._handlers.append(handler)
log_lines = self._log_lines
for level, message in log_lines:
self.log(level, message, new_line=False)
self._log_lines = []
def debug(self, message, **kwargs):
self.log('debug', message, **kwargs)
def error(self, message, **kwargs):
self.log('error', message, **kwargs)
def info(self, message, **kwargs):
self.log('info', message, **kwargs)
def warning(self, message, **kwargs):
self.log('warning', message, **kwargs)
def critical(self, message, **kwargs):
self.log('critical', message, **kwargs)
def exception(self, message, **kwargs):
exception_str = self._get_exception_str()
self.log('error', '%s\n%s' % (message, exception_str))
def debug_exception(self, message, **kwargs):
"""Stores exception except using the debug level"""
exception_str = self._get_exception_str()
self.log('debug', '%s\n%s' % (message, exception_str))
def _get_exception_str(self):
exception_info = sys.exc_info()
exception_lines = traceback.format_exception(*exception_info)
exception_str = ''.join(exception_lines)
return exception_str
def log(self, level, message, new_line=True):
if new_line:
message = "%s\n" % message
handlers = self._handlers
if not handlers:
self._log_lines.append((level, message))
else:
for handler in handlers:
handler.log(level, message)
def close(self):
handlers = self._handlers
for handler in handlers:
close = getattr(handler, 'close')
if close:
close()
class VirtstrapLogHandler(object):
def __init__(self, level='debug'):
self._level = get_logging_level(level)
def set_level(self, level):
self._level = get_logging_level(level)
def log(self, level, message):
current_level = get_logging_level(level)
if current_level >= self._level:
self.emit(level, message)
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method')
def close(self):
pass
class ConsoleLogHandler(VirtstrapLogHandler):
def emit(self, level, message):
sys.stdout.write(message)
class ColoredConsoleLogHandler(VirtstrapLogHandler):
level_colors = {
"DEBUG": "green",
"INFO": "black",
"WARNING": "yellow",
"CRITICAL": "purple",
"ERROR": "red",
"EXCEPTION": "red",
}
def emit(self, level, output):
color = self.level_colors.get(level, "black")
colored_function = getattr(colored, color, lambda text: text)
colored_output = colored_function(output)
puts(colored_output)
class FileLogHandler(VirtstrapLogHandler):
"""File Log Handler that uses built in logging to log"""
def __init__(self, filename):
self._file = open(filename, 'a')
def emit(self, level, message):
if self._file:
self._file.write(message)
def close(self):
self._file.close()
self._file = None
class VirtstrapConsoleLogHandler(logging.Handler):
def __init__(self, outputter):
self._outputter = outputter
logging.Handler.__init__(self)
def emit(self, record):
outputter = self._outputter
output_string = self.format(record)
outputter.write(output_string, record.levelname)
class ConsoleLogOutputter(object):
def write(self, output, level):
print(output)
class ColoredConsoleLogOutputter(ConsoleLogOutputter):
level_colors = {
"DEBUG": "green",
"INFO": "black",
"WARNING": "yellow",
"CRITICAL": "purple",
"ERROR": "red",
"EXCEPTION": "red",
}
def write(self, output, level):
color = self.level_colors.get(level, "black")
colored_function = getattr(colored, color, lambda text: text)
colored_output = colored_function(output)
puts(colored_output)
logger = VirtstrapLogger()
VERBOSITY_LEVELS = {
0: None,
1: logging.WARNING,
2: logging.INFO,
3: logging.DEBUG,
}
def setup_logger(verbosity, no_colored_output=False, log_file=None):
"""Sets up the logger for the program. DO NOT USE DIRECTLY IN COMMANDS"""
verbosity_level = VERBOSITY_LEVELS.get(verbosity, logging.INFO)
if log_file:
file_handler = FileLogHandler(log_file)
# The file should log all things to be used for error reporting
file_handler.set_level(logging.DEBUG)
logger.add_handler(file_handler)
if not verbosity_level:
return
console_handler = ConsoleLogHandler()
if CLINT_AVAILABLE:
console_handler = ColoredConsoleLogHandler()
console_handler.set_level(verbosity_level)
logger.add_handler(console_handler)<|fim▁end|> | """
virtstrap.log |
<|file_name|>x86.py<|end_file_name|><|fim▁begin|>from capstone import *
from .architecture import Architecture
from avatar2.installer.config import GDB_X86, OPENOCD
class X86(Architecture):
get_gdb_executable = Architecture.resolve(GDB_X86)
get_oocd_executable = Architecture.resolve(OPENOCD)
qemu_name = 'i386'
gdb_name = 'i386'
registers = {'eax': 0,
'ecx': 1,
'edx': 2,
'ebx': 3,
'esp': 4,
'ebp': 5,
'esi': 6,
'edi': 7,
'eip': 8,
'pc': 8,
'eflags': 9,
'cs': 10,
'ss': 11,
'ds': 12,
'es': 13,
'fs': 14,
'gs': 15, }
special_registers = {
#SSE
'xmm0': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm0.v4_int32',
},
'xmm1': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm1.v4_int32',
},
'xmm2': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm2.v4_int32',
},
'xmm3': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm3.v4_int32',
},
'xmm4': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm4.v4_int32',
},
'xmm5': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm5.v4_int32',
},
'xmm6': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm6.v4_int32',
},
'xmm7': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm7.v4_int32',
},
'xmm8': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm8.v4_int32',
},
'xmm9': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm9.v4_int32',
},
'xmm10': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm10.v4_int32',
},
'xmm11': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm11.v4_int32',
},
'xmm12': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm12.v4_int32',
},
'xmm13': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm13.v4_int32',
},
'xmm14': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm14.v4_int32',
},
'xmm15': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm15.v4_int32',
},
#AVX
'ymm0': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm0.v8_int32',
},
'ymm1': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm1.v8_int32',
},
'ymm2': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm2.v8_int32',
},
'ymm3': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm3.v8_int32',
},
'ymm4': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm4.v8_int32',
},
'ymm5': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm5.v8_int32',
},
'ymm6': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm6.v8_int32',
},
'ymm7': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm7.v8_int32',
},
'ymm8': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm8.v8_int32',
},
'ymm9': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm9.v8_int32',
},
'ymm10': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm10.v8_int32',
},
'ymm11': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm11.v8_int32',
},
'ymm12': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm12.v8_int32',
},
'ymm13': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm13.v8_int32',
},
'ymm14': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm14.v8_int32',
},
'ymm15': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm15.v8_int32',
},
}
sr_name = 'eflags'
unemulated_instructions = []
capstone_arch = CS_ARCH_X86
capstone_mode = CS_MODE_32
word_size = 32
<|fim▁hole|> qemu_name = 'x86_64'
gdb_name = 'i386:x86-64'
registers = {'rax': 0,
'rbx': 1,
'rcx': 2,
'rdx': 3,
'rsi': 4,
'rdi': 5,
'rbp': 6,
'rsp': 7,
'r8': 8,
'r9': 9,
'r10': 10,
'r11': 11,
'r12': 12,
'r13': 13,
'r14': 14,
'r15': 15,
'rip': 16,
'pc': 16,
'eflags': 17,
'cs': 18,
'ss': 19,
'ds': 20,
'es': 21,
'fs': 22,
'gs': 23,
}
capstone_mode = CS_MODE_64
unemulated_instructions = []
capstone_mode = CS_MODE_64
word_size = 64<|fim▁end|> |
class X86_64(X86): |
<|file_name|>ve.init.mw.TargetEvents.js<|end_file_name|><|fim▁begin|>/*!
* VisualEditor MediaWiki Initialization class.
*
* @copyright 2011-2015 VisualEditor Team and others; see AUTHORS.txt
* @license The MIT License (MIT); see LICENSE.txt
*/
/**
* Initialization MediaWiki Target Analytics.
*
* @class
*
* @constructor
* @param {ve.init.mw.Target} target Target class to log events for
*/
ve.init.mw.TargetEvents = function VeInitMwTargetEvents( target ) {
this.target = target;
this.timings = { saveRetries: 0 };
// Events
this.target.connect( this, {
saveWorkflowBegin: 'onSaveWorkflowBegin',
saveWorkflowEnd: 'onSaveWorkflowEnd',
saveInitiated: 'onSaveInitated',
save: 'onSaveComplete',
saveReview: 'onSaveReview',
saveErrorEmpty: [ 'trackSaveError', 'empty' ],
saveErrorSpamBlacklist: [ 'trackSaveError', 'spamblacklist' ],
saveErrorAbuseFilter: [ 'trackSaveError', 'abusefilter' ],
saveErrorBadToken: [ 'trackSaveError', 'badtoken' ],
saveErrorNewUser: [ 'trackSaveError', 'newuser' ],
saveErrorPageDeleted: [ 'trackSaveError', 'pagedeleted' ],
saveErrorTitleBlacklist: [ 'trackSaveError', 'titleblacklist' ],
saveErrorCaptcha: [ 'trackSaveError', 'captcha' ],
saveErrorUnknown: [ 'trackSaveError', 'unknown' ],
editConflict: [ 'trackSaveError', 'editconflict' ],
surfaceReady: 'onSurfaceReady',
showChanges: 'onShowChanges',
showChangesError: 'onShowChangesError',<|fim▁hole|> serializeError: 'onSerializeError'
} );
};
/**
* Target specific ve.track wrapper
*
* @param {string} topic Event name
* @param {Object} data Additional data describing the event, encoded as an object
*/
ve.init.mw.TargetEvents.prototype.track = function ( topic, data ) {
data.targetName = this.target.constructor.static.name;
ve.track( 'mwtiming.' + topic, data );
if ( topic.indexOf( 'performance.system.serializeforcache' ) === 0 ) {
// HACK: track serializeForCache duration here, because there's no event for that
this.timings.serializeForCache = data.duration;
}
};
/**
* Track when user begins the save workflow
*/
ve.init.mw.TargetEvents.prototype.onSaveWorkflowBegin = function () {
this.timings.saveWorkflowBegin = ve.now();
this.track( 'behavior.lastTransactionTillSaveDialogOpen', {
duration: this.timings.saveWorkflowBegin - this.timings.lastTransaction
} );
ve.track( 'mwedit.saveIntent' );
};
/**
* Track when user ends the save workflow
*/
ve.init.mw.TargetEvents.prototype.onSaveWorkflowEnd = function () {
this.track( 'behavior.saveDialogClose', { duration: ve.now() - this.timings.saveWorkflowBegin } );
this.timings.saveWorkflowBegin = null;
};
/**
* Track when document save is initiated
*/
ve.init.mw.TargetEvents.prototype.onSaveInitated = function () {
this.timings.saveInitiated = ve.now();
this.timings.saveRetries++;
this.track( 'behavior.saveDialogOpenTillSave', {
duration: this.timings.saveInitiated - this.timings.saveWorkflowBegin
} );
ve.track( 'mwedit.saveAttempt' );
};
/**
* Track when the save is complete
*
* @param {string} content
* @param {string} categoriesHtml
* @param {number} newRevId
*/
ve.init.mw.TargetEvents.prototype.onSaveComplete = function ( content, categoriesHtml, newRevId ) {
this.track( 'performance.user.saveComplete', { duration: ve.now() - this.timings.saveInitiated } );
this.timings.saveRetries = 0;
ve.track( 'mwedit.saveSuccess', {
timing: ve.now() - this.timings.saveInitiated + ( this.timings.serializeForCache || 0 ),
'page.revid': newRevId
} );
};
/**
* Track a save error by type
*
* @method
* @param {string} type Text for error type
*/
ve.init.mw.TargetEvents.prototype.trackSaveError = function ( type ) {
var key, data,
failureArguments = [],
// Maps mwtiming types to mwedit types
typeMap = {
badtoken: 'userBadToken',
newuser: 'userNewUser',
abusefilter: 'extensionAbuseFilter',
captcha: 'extensionCaptcha',
spamblacklist: 'extensionSpamBlacklist',
empty: 'responseEmpty',
unknown: 'responseUnknown',
pagedeleted: 'editPageDeleted',
titleblacklist: 'extensionTitleBlacklist',
editconflict: 'editConflict'
},
// Types that are logged as performance.user.saveError.{type}
// (for historical reasons; this sucks)
specialTypes = [ 'editconflict' ];
if ( arguments ) {
failureArguments = Array.prototype.slice.call( arguments, 1 );
}
key = 'performance.user.saveError';
if ( specialTypes.indexOf( type ) !== -1 ) {
key += '.' + type;
}
this.track( key, {
duration: ve.now() - this.timings.saveInitiated,
retries: this.timings.saveRetries,
type: type
} );
data = {
type: typeMap[ type ] || 'responseUnknown',
timing: ve.now() - this.timings.saveInitiated + ( this.timings.serializeForCache || 0 )
};
if ( type === 'unknown' && failureArguments[ 1 ].error && failureArguments[ 1 ].error.code ) {
data.message = failureArguments[ 1 ].error.code;
}
ve.track( 'mwedit.saveFailure', data );
};
/**
* Record activation having started.
*
* @param {number} [startTime] Timestamp activation started. Defaults to current time
*/
ve.init.mw.TargetEvents.prototype.trackActivationStart = function ( startTime ) {
this.timings.activationStart = startTime || ve.now();
};
/**
* Record activation being complete.
*/
ve.init.mw.TargetEvents.prototype.trackActivationComplete = function () {
this.track( 'performance.system.activation', { duration: ve.now() - this.timings.activationStart } );
};
/**
* Record the time of the last transaction in response to a 'transact' event on the document.
*/
ve.init.mw.TargetEvents.prototype.recordLastTransactionTime = function () {
this.timings.lastTransaction = ve.now();
};
/**
* Track time elapsed from beginning of save workflow to review
*/
ve.init.mw.TargetEvents.prototype.onSaveReview = function () {
this.timings.saveReview = ve.now();
this.track( 'behavior.saveDialogOpenTillReview', {
duration: this.timings.saveReview - this.timings.saveWorkflowBegin
} );
};
ve.init.mw.TargetEvents.prototype.onSurfaceReady = function () {
this.target.surface.getModel().getDocument().connect( this, {
transact: 'recordLastTransactionTime'
} );
};
/**
* Track when the user enters the review workflow
*/
ve.init.mw.TargetEvents.prototype.onShowChanges = function () {
this.track( 'performance.user.reviewComplete', { duration: ve.now() - this.timings.saveReview } );
};
/**
* Track when the diff request fails in the review workflow
*/
ve.init.mw.TargetEvents.prototype.onShowChangesError = function () {
this.track( 'performance.user.reviewError', { duration: ve.now() - this.timings.saveReview } );
};
/**
* Track when the diff request detects no changes
*/
ve.init.mw.TargetEvents.prototype.onNoChanges = function () {
this.track( 'performance.user.reviewComplete', { duration: ve.now() - this.timings.saveReview } );
};
/**
* Track whe serilization is complete in review workflow
*/
ve.init.mw.TargetEvents.prototype.onSerializeComplete = function () {
this.track( 'performance.user.reviewComplete', { duration: ve.now() - this.timings.saveReview } );
};
/**
* Track when there is a serlization error
*/
ve.init.mw.TargetEvents.prototype.onSerializeError = function () {
if ( this.timings.saveWorkflowBegin ) {
// This function can be called by the switch to wikitext button as well, so only log
// reviewError if we actually got here from the save workflow
this.track( 'performance.user.reviewError', { duration: ve.now() - this.timings.saveReview } );
}
};<|fim▁end|> | noChanges: 'onNoChanges',
serializeComplete: 'onSerializeComplete', |
<|file_name|>encoder.go<|end_file_name|><|fim▁begin|>// Package twooffive can create interleaved and standard "2 of 5" barcodes.
package twooffive
import (
"errors"
"fmt"
"github.com/boombuler/barcode"
"github.com/boombuler/barcode/utils"
)
const patternWidth = 5
type pattern [patternWidth]bool
type encodeInfo struct {
start []bool
end []bool
widths map[bool]int
}
var (
encodingTable = map[rune]pattern{
'0': pattern{false, false, true, true, false},
'1': pattern{true, false, false, false, true},
'2': pattern{false, true, false, false, true},
'3': pattern{true, true, false, false, false},
'4': pattern{false, false, true, false, true},
'5': pattern{true, false, true, false, false},<|fim▁hole|> '9': pattern{false, true, false, true, false},
}
modes = map[bool]encodeInfo{
false: encodeInfo{ // non-interleaved
start: []bool{true, true, false, true, true, false, true, false},
end: []bool{true, true, false, true, false, true, true},
widths: map[bool]int{
true: 3,
false: 1,
},
},
true: encodeInfo{ // interleaved
start: []bool{true, false, true, false},
end: []bool{true, true, false, true},
widths: map[bool]int{
true: 3,
false: 1,
},
},
}
nonInterleavedSpace = pattern{false, false, false, false, false}
)
// AddCheckSum calculates the correct check-digit and appends it to the given content.
func AddCheckSum(content string) (string, error) {
if content == "" {
return "", errors.New("content is empty")
}
even := len(content)%2 == 1
sum := 0
for _, r := range content {
if _, ok := encodingTable[r]; ok {
value := utils.RuneToInt(r)
if even {
sum += value * 3
} else {
sum += value
}
even = !even
} else {
return "", fmt.Errorf("can not encode \"%s\"", content)
}
}
return content + string(utils.IntToRune(sum%10)), nil
}
// Encode creates a codabar barcode for the given content
func Encode(content string, interleaved bool) (barcode.Barcode, error) {
if content == "" {
return nil, errors.New("content is empty")
}
if interleaved && len(content)%2 == 1 {
return nil, errors.New("can only encode even number of digits in interleaved mode")
}
mode := modes[interleaved]
resBits := new(utils.BitList)
resBits.AddBit(mode.start...)
var lastRune *rune
for _, r := range content {
var a, b pattern
if interleaved {
if lastRune == nil {
lastRune = new(rune)
*lastRune = r
continue
} else {
var o1, o2 bool
a, o1 = encodingTable[*lastRune]
b, o2 = encodingTable[r]
if !o1 || !o2 {
return nil, fmt.Errorf("can not encode \"%s\"", content)
}
lastRune = nil
}
} else {
var ok bool
a, ok = encodingTable[r]
if !ok {
return nil, fmt.Errorf("can not encode \"%s\"", content)
}
b = nonInterleavedSpace
}
for i := 0; i < patternWidth; i++ {
for x := 0; x < mode.widths[a[i]]; x++ {
resBits.AddBit(true)
}
for x := 0; x < mode.widths[b[i]]; x++ {
resBits.AddBit(false)
}
}
}
resBits.AddBit(mode.end...)
if interleaved {
return utils.New1DCode(barcode.Type2of5Interleaved, content, resBits), nil
} else {
return utils.New1DCode(barcode.Type2of5, content, resBits), nil
}
}<|fim▁end|> | '6': pattern{false, true, true, false, false},
'7': pattern{false, false, false, true, true},
'8': pattern{true, false, false, true, false}, |
<|file_name|>dao.py<|end_file_name|><|fim▁begin|>'''
Created on Aug 29, 2015
@author: kevinchien
'''
import datetime
# from bson import ObjectId
from tornado.gen import Task, Return
from tornado.gen import coroutine
from src.common.logutil import get_logger
# from src.core.mongoutil import get_instance
#
# @coroutine
# def update_auth(auth_info):
# new_auth_info = auth_info.copy()
# new_auth_info['updated_at'] = datetime.datetime.utcnow()
# <|fim▁hole|>#
# fields = {'$set': new_auth_info}
#
# result, error = yield Task(get_instance().auth_info.update, criteria, fields)
# if error is not None:
# raise error
#
# raise Return(result)<|fim▁end|> | # criteria = {"user_id": new_auth_info.get('user_id'),
# "access_token": new_auth_info.get('access_token'),
# "refresh_token": new_auth_info.get('refresh_token')} |
<|file_name|>character.py<|end_file_name|><|fim▁begin|>import AI.pad
import AI.state
class Character:
def __init__(self, pad_path):
self.action_list = []
self.last_action = 0
self.pad = AI.pad.Pad(pad_path)
self.state = AI.state.State()
#Set False to enable character selection
self.test_mode = True
self.sm = AI.state_manager.StateManager(self.state, self.test_mode)
#test_mode = False, Selects character each run
def make_action(self, mm):
if self.state.menu == AI.state.Menu.Game:
self.advance()
elif self.state.menu == AI.state.Menu.Characters:
mm.pick_fox(self.state, self.pad)
elif self.state.menu == AI.state.Menu.Stages:
self.pad.tilt_stick(AI.pad.Stick.C, 0.5, 0.5)
elif self.state.menu == AI.state.Menu.PostGame:
mm.press_start_lots(self.state, self.pad)
#test_mode = True, AI starts fighting each run, saves time during testing
def make_action_test(self, mm):
if self.state.menu == AI.state.Menu.Game:
self.advance()
elif self.state.menu == AI.state.Menu.PostGame:
mm.press_start_lots(self.state, self.pad)
#implemented by each character to decide what to do
#includes some states where each character will respond the same
def logic(self):
if AI.state.is_spawning(self.state.players[2].action_state):
self.tilt_stick(60, 'DOWN')
self.tilt_stick(3, None)
#compare AI's current state
def compare_AI_state(self, test_state):
return self.state.players[2].action_state is test_state
#compare P1 current state
def compare_P1_state(self, test_state):
return self.state.players[0].action_state is test_state
#executes button presses defined in action_list, runs logic() once list is empty
def advance(self):
while self.action_list:
wait, func, args = self.action_list[0]
if self.state.frame - self.last_action < wait:
return
else:
self.action_list.pop(0)
if func is not None:
func(*args)
self.last_action = self.state.frame
else:
self.logic()
'''Methods simulate controller input; appends necessary tuple to action_list'''
def press_button(self, wait, button):
self.action_list.append((wait, self.pad.press_button, [button]))
def release_button(self, wait, button):
self.action_list.append((wait, self.pad.release_button, [button]))
def tilt_stick(self, wait, direction):
if direction is 'UP':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 0.5, 1.0]))
elif direction is 'DOWN':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 0.5, 0.0]))
elif direction is 'DOWN_LEFT':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 0.25, 0.25]))
elif direction is 'DOWN_RIGHT':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 0.75, 0.25]))
elif direction is 'RIGHT':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 1.0, 0.5]))
elif direction is 'LEFT':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 0.0, 0.5]))
elif direction is None:
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 0.5, 0.5]))
def tilt_c_stick(self, wait, direction):
if direction is 'UP':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.C, 0.5, 1.0]))
elif direction is 'DOWN':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.C, 0.5, 0.0]))
elif direction is 'RIGHT':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.C, 1.0, 0.5]))
elif direction is 'LEFT':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.C, 0.0, 0.5]))
elif direction is None:
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.C, 0.5, 0.5]))
def press_trigger(self, wait, amount):
self.action_list.append((wait, self.pad.press_trigger, [AI.pad.Trigger.L, amount]))
def wait(self, wait):
self.action_list.append((wait, None, []))
'''Execute actions shared among all characters'''
def style(self, wait):
pass
def side_b(self, wait):
self.tilt_stick(wait, 'RIGHT')
self.press_button(1, AI.pad.Button.B)
self.release_button(2, AI.pad.Button.B)<|fim▁hole|> self.tilt_stick(2, None)
def shield(self, wait, length):
self.press_trigger(wait, 0.3)
self.press_trigger(length, 0.0)
def dashdance(self, wait, length):
self.wait(wait)
for _ in range(length):
self.tilt_stick(4, 'LEFT')
self.tilt_stick(4, 'RIGHT')
self.tilt_stick(1, None)
def shorthop(self, wait):
self.press_button(wait, AI.pad.Button.X)
self.release_button(1, AI.pad.Button.X)
'''Execute similar actions that is dependent on character frame data'''
def wavedash(self, wait, direction, wait_airdodge):
self.tilt_stick(wait, direction)
self.shorthop(1)
self.press_button(wait_airdodge, AI.pad.Button.L)
self.release_button(2, AI.pad.Button.L)
self.tilt_stick(1, None)
def shorthop_nair(self, wait, wait_attack, wait_ff):
self.shorthop(wait)
self.press_button(wait_attack, AI.pad.Button.A)
self.release_button(1, AI.pad.Button.A)
self.tilt_stick(wait_ff, 'DOWN')
self.tilt_stick(3, None)
self.press_trigger(2, 0.5)
self.press_trigger(1, 0.0)<|fim▁end|> | |
<|file_name|>retrieve-map-item.6.x.py<|end_file_name|><|fim▁begin|># Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account<|fim▁hole|>auth_token = "your_auth_token"
client = Client(account_sid, auth_token)
map_item = client.sync \
.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_maps("Players") \
.sync_map_items("steph_curry") \
.fetch()
print(map_item.data)<|fim▁end|> | account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" |
<|file_name|>txt2json_parser.py<|end_file_name|><|fim▁begin|>import json
from sets import Set
from sys import maxint
import math
# tmp hacky functions for vec3
def norm2 (a):
return dot(a, a)
def dot ( a, b ):
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
def area (a, b, c):
u = [ b[0] - a[0], b[1] - a[1], b[2] - a[2] ]
v = [ c[0] - a[0], c[1] - a[1], c[2] - a[2] ]
dot_uv = dot(u, v)
cross2 = norm2(u) * norm2(v) - dot_uv * dot_uv
return math.sqrt(cross2) * 0.5
class DiagramJson:
def __init__(self):
self.json = {
'form': {
'vertices': {},
'vertices_2_force_faces': {}, # face array
'vertices_2_force_cells': {},
'vertices_external': None, # converted from set: vid: 1
'edges': {}
},
'force': {
'vertices': {},
'edges': {},
'faces_e': {},
'faces_v': {},
'cells': {}
},
'strength_scaler': {
'min': maxint,
'max': 0
},
'force_face_2_strength': {}
}
class Txt2JsonParser:
def __init__(self):
self.diagramJson = DiagramJson()
# # tmp data structures used only when parsing
# self.form_edge_2_vertex = {}
self.force_face_2_form_edge = {} # inverse index, for caluclate edge width i.e. area of faces (strength)
# self.form_vertex_external_count = {} # vid: count - 0, 1, 2
def readFormVertex(self, filename):
f = open(filename)
v = self.diagramJson.json['form']['vertices']
v2fa = self.diagramJson.json['form']['vertices_2_force_faces']
for line in f:
vertex = line.strip().split('\t')
# print vertex
v[vertex[0]] = map(float, vertex[1:])
# create array for form_vertices to force_face array (cells)
v2fa[vertex[0]] = []
# print self.diagramJson.json
f.close()
def readFormEdge(self, filename_edge_vertex, filename_edge_to_force_face, filename_edge_ex):
f_edge_vertex = open(filename_edge_vertex)
edges = self.diagramJson.json['form']['edges']
for line in f_edge_vertex:
edge = line.strip().split('\t')
e = edges[edge[0]] = {}
e['vertex'] = edge[1:]
# e['external'] = False
# print edge[0], e['vertex']
# print edges
f_edge_vertex.close()
v2fa = self.diagramJson.json['form']['vertices_2_force_faces']
f_edge_to_force_face = open(filename_edge_to_force_face)
for line in f_edge_to_force_face:
edge = line.strip().split('\t')
f = edge[1] if edge[1] != "Null" else None
edges[edge[0]]['force_face'] = f
edge_vertex = edges[edge[0]]['vertex']
for v in edge_vertex:
v2fa[v].append(f)
# force_face_2_form_edge (tmp structure) for compute strength
if f != None:
self.force_face_2_form_edge[f] = edge[0]
f_edge_to_force_face.close()
vertex_ex_set = Set()
f_edge_ex = open(filename_edge_ex)
for line in f_edge_ex:
edge = line.strip().split('\t')
for e in edge:
edges[e]['external'] = True
vertex_ex_set.add(edges[e]['vertex'][0])
vertex_ex_set.add(edges[e]['vertex'][1])
f_edge_ex.close()
<|fim▁hole|>
# label external force edge
for e in edges:
is_ex_vertex_0 = edges[e]['vertex'][0] in vertex_ex_set
is_ex_vertex_1 = edges[e]['vertex'][1] in vertex_ex_set
if is_ex_vertex_0 != is_ex_vertex_1:
# print edges[e]['vertex'][0], ':', is_ex_vertex_0, ' , ', edges[e]['vertex'][1], ':', is_ex_vertex_1
# force vector: from v0 to v1
edges[e]['ex_force'] = True
# print edges
# print self.diagramJson.json
def readForceVertex(self, filename):
f = open(filename)
v = self.diagramJson.json['force']['vertices']
for line in f:
vertex = line.strip().split('\t')
# print vertex
v[vertex[0]] = map(float, vertex[1:])
# print self.diagramJson.json
f.close()
def readForceEdge(self, filename_edge_vertex):
f_edge_vertex = open(filename_edge_vertex)
edges = self.diagramJson.json['force']['edges']
for line in f_edge_vertex:
edge = line.strip().split('\t')
edges[edge[0]] = edge[1:]
# print edges
f_edge_vertex.close()
# print self.diagramJson.json
def readForceFaceEdge(self, filename_face_edge):
f_face_edge = open(filename_face_edge)
edges = self.diagramJson.json['force']['edges']
faces_e = self.diagramJson.json['force']['faces_e']
# faces_v = self.diagramJson.json['force']['faces_v']
for line in f_face_edge:
face = line.strip().split('\t')
faces_e[face[0]] = face[1:]
# # convert face edge to face vertex
# cur_face_vertex = Set()
# for e in face[1:]:
# # extend vertex array
# # cur_face_vertex.extend(edges[e])
# for v in edges[e]:
# cur_face_vertex.add(v)
# faces_v[face[0]] = list(cur_face_vertex)
# print faces_v[face[0]]
f_face_edge.close()
# print self.diagramJson.json
def readForceFaceVertex(self, filename_face_vertex):
f_face_vertex = open(filename_face_vertex)
# fan shape order
faces_v = self.diagramJson.json['force']['faces_v']
strengthScaler = self.diagramJson.json['strength_scaler']
force_face_2_strength = self.diagramJson.json['force_face_2_strength']
v = self.diagramJson.json['force']['vertices']
e = self.diagramJson.json['form']['edges']
for line in f_face_vertex:
face = line.strip().split('\t')
faces_v[face[0]] = face[1:]
strength = 0
if len(face) == 4:
# tri
strength = area( v[face[1]], v[face[2]], v[face[3]] )
elif len(face) == 5:
# quad
strength = area( v[face[1]], v[face[2]], v[face[3]] ) + area( v[face[1]], v[face[3]], v[face[4]] )
else:
print 'Error: face ', face[0], ' is not tri or quad!!'
# if face[0] == '17f' or face[0] == '19f':
# print face[0], face[1:], map( lambda vid: v[vid], face[1:] ), area(v[face[1]], v[face[2]], v[face[3]]), strength
# e[ self.force_face_2_form_edge[face[0]] ]['strength'] = strength
force_face_2_strength[ face[0] ] = strength
curEdge = e[ self.force_face_2_form_edge[face[0]] ]
if 'external' not in curEdge and 'ex_force' not in curEdge:
strengthScaler['max'] = max(strength, strengthScaler['max'])
strengthScaler['min'] = min(strength, strengthScaler['min'])
f_face_vertex.close()
if __name__ == "__main__":
# foldername = "example_01"
# foldername = "example_02"
# foldername = "example_03"
foldername = "example_04"
parser = Txt2JsonParser()
parser.readFormVertex(foldername + "/form_v.txt")
parser.readFormEdge(foldername + "/form_e_v.txt", \
foldername + "/form_e_to_force_f.txt", \
foldername + "/form_e_ex.txt")
parser.readForceVertex(foldername + "/force_v.txt")
parser.readForceEdge(foldername + "/force_e_v.txt")
# parser.readForceFaceEdge(foldername + "/force_f_e.txt")
parser.readForceFaceVertex(foldername + "/force_f_v.txt")
with open(foldername + '/diagram.json', 'w') as out:
json.dump(parser.diagramJson.json, out)<|fim▁end|> | self.diagramJson.json['form']['vertices_external'] = dict.fromkeys(vertex_ex_set, 1)
|
<|file_name|>build.rs<|end_file_name|><|fim▁begin|>use std::env;
<|fim▁hole|>use cargo::ops::CompileOptions;
use cargo::ops;
use cargo::util::important_paths::{find_root_manifest_for_cwd};
use cargo::util::{CliResult, CliError, Config};
#[derive(RustcDecodable)]
struct Options {
flag_package: Option<String>,
flag_jobs: Option<u32>,
flag_features: Vec<String>,
flag_no_default_features: bool,
flag_target: Option<String>,
flag_manifest_path: Option<String>,
flag_verbose: bool,
flag_quiet: bool,
flag_color: Option<String>,
flag_release: bool,
flag_lib: bool,
flag_bin: Vec<String>,
flag_example: Vec<String>,
flag_test: Vec<String>,
flag_bench: Vec<String>,
}
pub const USAGE: &'static str = "
Compile a local package and all of its dependencies
Usage:
cargo build [options]
Options:
-h, --help Print this message
-p SPEC, --package SPEC Package to build
-j N, --jobs N The number of jobs to run in parallel
--lib Build only this package's library
--bin NAME Build only the specified binary
--example NAME Build only the specified example
--test NAME Build only the specified test target
--bench NAME Build only the specified benchmark target
--release Build artifacts in release mode, with optimizations
--features FEATURES Space-separated list of features to also build
--no-default-features Do not build the `default` feature
--target TRIPLE Build for the target triple
--manifest-path PATH Path to the manifest to compile
-v, --verbose Use verbose output
-q, --quiet No output printed to stdout
--color WHEN Coloring: auto, always, never
If the --package argument is given, then SPEC is a package id specification
which indicates which package should be built. If it is not given, then the
current package is built. For more information on SPEC and its format, see the
`cargo help pkgid` command.
Compilation can be configured via the use of profiles which are configured in
the manifest. The default profile for this command is `dev`, but passing
the --release flag will use the `release` profile instead.
";
pub fn execute(options: Options, config: &Config) -> CliResult<Option<()>> {
debug!("executing; cmd=cargo-build; args={:?}",
env::args().collect::<Vec<_>>());
try!(config.shell().set_verbosity(options.flag_verbose, options.flag_quiet));
try!(config.shell().set_color_config(options.flag_color.as_ref().map(|s| &s[..])));
let root = try!(find_root_manifest_for_cwd(options.flag_manifest_path));
let opts = CompileOptions {
config: config,
jobs: options.flag_jobs,
target: options.flag_target.as_ref().map(|t| &t[..]),
features: &options.flag_features,
no_default_features: options.flag_no_default_features,
spec: options.flag_package.as_ref().map(|s| &s[..]),
exec_engine: None,
mode: ops::CompileMode::Build,
release: options.flag_release,
filter: ops::CompileFilter::new(options.flag_lib,
&options.flag_bin,
&options.flag_test,
&options.flag_example,
&options.flag_bench),
target_rustc_args: None,
};
ops::compile(&root, &opts).map(|_| None).map_err(|err| {
CliError::from_boxed(err, 101)
})
}<|fim▁end|> | |
<|file_name|>AddressType.java<|end_file_name|><|fim▁begin|>package com.neemre.bitplexus.backend.model;
import java.util.Date;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.FetchType;<|fim▁hole|>import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.JoinColumn;
import javax.persistence.ManyToOne;
import javax.persistence.NamedStoredProcedureQueries;
import javax.persistence.NamedStoredProcedureQuery;
import javax.persistence.ParameterMode;
import javax.persistence.SequenceGenerator;
import javax.persistence.StoredProcedureParameter;
import javax.persistence.Table;
import javax.persistence.Temporal;
import javax.persistence.TemporalType;
import javax.validation.constraints.NotNull;
import javax.validation.constraints.Past;
import javax.validation.constraints.Pattern;
import javax.validation.constraints.Size;
import org.hibernate.annotations.Generated;
import org.hibernate.annotations.GenerationTime;
import com.google.common.base.Function;
import com.google.common.collect.Ordering;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import lombok.ToString;
@Data
@NoArgsConstructor
@AllArgsConstructor
@ToString(callSuper = true)
@EqualsAndHashCode(callSuper = false)
@Entity
@Table(name = "address_type", schema = "public")
@SequenceGenerator(name = "seq_address_type_id", sequenceName = "seq_address_type_address_type_id",
allocationSize = 1)
@NamedStoredProcedureQueries(value = {
@NamedStoredProcedureQuery(name = "findIdByAddressAndChainCode",
procedureName = "f_get_address_type_id", parameters = {
@StoredProcedureParameter(mode = ParameterMode.IN, name = "in_address", type = String.class),
@StoredProcedureParameter(mode = ParameterMode.IN, name = "in_chain_code", type = String.class)})
})
public class AddressType extends BaseEntity {
public static final Ordering<AddressType> LEADING_SYMBOL_ORDERING = Ordering.natural().nullsLast()
.onResultOf(new LeadingSymbolExtractor());
public static final Ordering<AddressType> NAME_ORDERING = Ordering.natural().nullsLast()
.onResultOf(new NameExtractor());
public static final Ordering<AddressType> NATURAL_ORDERING = NAME_ORDERING
.compound(LEADING_SYMBOL_ORDERING);
private static final long serialVersionUID = 1L;
@NotNull
@Id
@GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "seq_address_type_id")
@Column(name = "address_type_id", insertable = false, updatable = false)
private Short addressTypeId;
@NotNull
@ManyToOne(fetch = FetchType.LAZY, optional = false)
@JoinColumn(name = "chain_id", updatable = false)
private Chain chain;
@NotNull
@Size(max = 30)
@Column(name = "code")
private String code;
@NotNull
@Size(max = 60)
@Column(name = "name")
private String name;
@NotNull
@Size(min = 1, max = 1)
@Pattern(regexp = "^[1-9a-km-zA-HJ-NP-Z]*$")
@Column(name = "leading_symbol", updatable = false)
private String leadingSymbol;
@Past
@Generated(GenerationTime.INSERT)
@Temporal(TemporalType.TIMESTAMP)
@Column(name = "created_at", insertable = false, updatable = false)
private Date createdAt;
@NotNull
@ManyToOne(fetch = FetchType.LAZY, optional = false)
@JoinColumn(name = "created_by", updatable = false)
private Employee createdBy;
@Past
@Generated(GenerationTime.ALWAYS)
@Temporal(TemporalType.TIMESTAMP)
@Column(name = "updated_at", insertable = false, updatable = false)
private Date updatedAt;
@ManyToOne(fetch = FetchType.LAZY, optional = true)
@JoinColumn(name = "updated_by", insertable = false)
private Employee updatedBy;
public void setChain(Chain chain) {
if (this.chain != chain) {
if (this.chain != null) {
this.chain.removeAddressType(this);
}
this.chain = chain;
if (chain != null) {
chain.addAddressType(this);
}
}
}
private static class LeadingSymbolExtractor implements Function<AddressType, String> {
@Override
public String apply(AddressType addressType) {
return addressType.getLeadingSymbol();
}
}
private static class NameExtractor implements Function<AddressType, String> {
@Override
public String apply(AddressType addressType) {
return addressType.getName();
}
}
}<|fim▁end|> | |
<|file_name|>build.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>fn main() {
// special case: sometimes the OpenCL library is squirreled away in C:\Windows\system32
if cfg!(windows) {
println!("cargo:rustc-link-search=C:\\Windows\\system32")
}
}<|fim▁end|> | |
<|file_name|>gpulearn_z_x.py<|end_file_name|><|fim▁begin|>'''
modified by Chongxuan Li ([email protected])
'''
import sys
sys.path.append('..')
sys.path.append('../../data/')
import os, numpy as np
import scipy.io as sio
import time
import anglepy as ap
import anglepy.paramgraphics as paramgraphics
import anglepy.ndict as ndict
import theano
import theano.tensor as T
from collections import OrderedDict
import preprocessing as pp
import color
def zca_dec(zca_mean, zca_winv, data):
return zca_winv.dot(data) + zca_mean
def labelToMat(y):
label = np.unique(y)
newy = np.zeros((len(y), len(label)))
for i in range(len(y)):
newy[i, y[i]] = 1
return newy.T
def main(n_z, n_hidden, dataset, seed, comment, gfx=True):
# Initialize logdir
import time
pre_dir = 'models/gpulearn_z_x_mnist_96-(500, 500)'
if os.environ.has_key('pretrain') and bool(int(os.environ['pretrain'])) == True:
comment+='_pre-train'
if os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True:
comment+='_prior'
pre_dir+='_prior'
if os.environ.has_key('cutoff'):
comment+=('_'+str(int(os.environ['cutoff'])))
if os.environ.has_key('train_residual') and bool(int(os.environ['train_residual'])) == True:
comment+='_train-residual'
pre_dir+='_train-residual'
if os.environ.has_key('sigma_square'):
comment+=('_'+str(float(os.environ['sigma_square'])))
pre_dir+=('_'+str(float(os.environ['sigma_square'])))
pre_dir+='/'
logdir = 'results/gpulearn_z_x_'+dataset+'_'+str(n_z)+'-'+str(n_hidden)+comment+'_'+str(int(time.time()))+'/'
if not os.path.exists(logdir): os.makedirs(logdir)
print 'logdir:', logdir
print 'gpulearn_z_x', n_z, n_hidden, dataset, seed
with open(logdir+'hook.txt', 'a') as f:
print >>f, 'learn_z_x', n_z, n_hidden, dataset, seed
np.random.seed(seed)
gfx_freq = 1
weight_decay = 0
# Init data
if dataset == 'mnist':
import anglepy.data.mnist as mnist
# MNIST
size = 28
train_x, train_y, valid_x, valid_y, test_x, test_y = mnist.load_numpy(size)
f_enc, f_dec = pp.Identity()
if os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True:
color.printBlue('Loading prior')
mnist_prior = sio.loadmat('data/mnist_prior/mnist_prior.mat')
train_mean_prior = mnist_prior['z_train']
test_mean_prior = mnist_prior['z_test']
valid_mean_prior = mnist_prior['z_valid']
else:
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 50000
n_test = 10000
n_valid = 10000
n_batch = 1000
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'higgs':
size = 28
f_enc, f_dec = pp.Identity()
inputfile = 'data/higgs/HIGGS.csv'
print 'loading file.'
x = np.loadtxt(inputfile, dtype='f4', delimiter=',')
print 'done.'
y = x[:,0].reshape((-1,1))
x = x[:,1:]
x = np.array(x, dtype='float32')
y = np.array(y, dtype='float32')
n_train = 10000000
n_valid = 500000
n_test = 500000
n_batch = 1000
derived_feat = 'all'
if os.environ.has_key('derived_feat'):
derived_feat = os.environ['derived_feat']
color.printBlue(derived_feat)
if derived_feat == 'high':
# Only the 7 high level features.
x = x[:, 21:28]
elif derived_feat == 'low':
# Only the 21 raw features.
x = x[:, 0:21]
else:
pass
train_x = x[0:n_train, :].T
y_train = y[0:n_train, :]
valid_x = x[n_train:n_train+n_valid, :].T
y_valid = y[n_train:n_train+n_valid, :]
test_x = x[n_train+n_valid:n_train+n_valid+n_test, :].T
y_test = y[n_train+n_valid:n_train+n_valid+n_test, :]
n_y = 2
n_x = train_x.shape[0]
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'tanh'
if os.environ.has_key('nonlinear'):
nonlinear = os.environ['nonlinear']
color.printBlue(nonlinear)
L_valid = 1
dim_input = (1,size)
type_px = 'gaussian'
colorImg = False
bernoulli_x = False
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'cifar10':
import anglepy.data.cifar10 as cifar10
size = 32
train_x, train_y, test_x, test_y = cifar10.load_numpy()
train_x = train_x.astype(np.float32).T
test_x = test_x.astype(np.float32).T
##
f_enc, f_dec = pp.Identity()
if os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True:
color.printBlue('Loading prior')
cifar_prior = sio.loadmat('data/cifar10_prior/cifar10_prior.mat')
train_mean_prior = cifar_prior['z_train']
test_mean_prior = cifar_prior['z_test']
else:
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
x_valid = x_test
L_valid = 1
n_y = 10
dim_input = (size,size)
n_x = x['x'].shape[0]
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'gaussian'
if os.environ.has_key('type_px'):
type_px = os.environ['type_px']
color.printBlue('Generative type: '+type_px)
n_train = 50000
n_test = 10000
n_batch = 5000
colorImg = True
bernoulli_x = False
byteToFloat = False
#weight_decay = float(n_batch)/n_train
elif dataset == 'cifar10_zca':
import anglepy.data.cifar10 as cifar10
size = 32
train_x, train_y, test_x, test_y = cifar10.load_numpy()
train_x = train_x.astype(np.float32).T
test_x = test_x.astype(np.float32).T
##
f_enc, f_dec = pp.Identity()
zca_mean, zca_w, zca_winv = cifar10.zca(train_x)
train_x = zca_w.dot(train_x-zca_mean)
test_x = zca_w.dot(test_x-zca_mean)
if os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True:
color.printBlue('Loading prior')
cifar_prior = sio.loadmat('data/cifar10_prior/cifar10_prior.mat')
train_mean_prior = cifar_prior['z_train']
test_mean_prior = cifar_prior['z_test']
else:
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
x_valid = x_test
L_valid = 1
dim_input = (size,size)
n_y = 10
n_x = x['x'].shape[0]
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'gaussian'
n_train = 50000
n_test = 10000
n_batch = 5000
colorImg = True
bernoulli_x = False
byteToFloat = False
if os.environ.has_key('type_px'):
type_px = os.environ['type_px']
color.printBlue('Generative type: '+type_px)
nonlinear = 'softplus'
elif dataset == 'mnist_basic':
# MNIST
size = 28
data_dir = os.environ['ML_DATA_PATH']+'/mnist_variations/'+'mnist_'
tmp = sio.loadmat(data_dir+'train.mat')
#color.printRed(data_dir+'train.mat')
train_x = tmp['x_train'].T
train_y = tmp['t_train'].T.astype(np.int32)
# validation 2000
valid_x = train_x[:,10000:]
valid_y = train_y[10000:]
train_x = train_x[:,:10000]
train_y = train_y[:10000]
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
test_y = tmp['t_test'].T.astype(np.int32)
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
f_enc, f_dec = pp.Identity()
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
'''
x = {'x': train_x.astype(np.float32), 'y': labelToMat(train_y).astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'y': labelToMat(valid_y).astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'y': labelToMat(test_y).astype(np.float32)}
'''
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
n_y = 10
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 10000
n_valid = 2000
n_test = 50000
n_batch = 200
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'rectangle':
# MNIST
size = 28
data_dir = os.environ['ML_DATA_PATH']+'/mnist_variations/'+'rectangles_'
tmp = sio.loadmat(data_dir+'train.mat')
color.printRed(data_dir+'train.mat')
train_x = tmp['x_train'].T
train_y = tmp['t_train'].T.astype(np.int32)
# validation 2000
valid_x = train_x[:,1000:]
valid_y = train_y[1000:]
train_x = train_x[:,:1000]
train_y = train_y[:1000]
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
test_y = tmp['t_test'].T.astype(np.int32)
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
f_enc, f_dec = pp.Identity()
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
'''
x = {'x': train_x.astype(np.float32), 'y': labelToMat(train_y).astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'y': labelToMat(valid_y).astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'y': labelToMat(test_y).astype(np.float32)}
'''
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}<|fim▁hole|> n_y = 2
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 1000
n_valid = 200
n_test = 50000
n_batch = 500
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
#print '3', n_x
elif dataset == 'convex':
# MNIST
size = 28
data_dir = os.environ['ML_DATA_PATH']+'/mnist_variations/'+'convex_'
tmp = sio.loadmat(data_dir+'train.mat')
train_x = tmp['x_train'].T
train_y = tmp['t_train'].T.astype(np.int32)
# validation 2000
valid_x = train_x[:,6000:]
valid_y = train_y[6000:]
train_x = train_x[:,:6000]
train_y = train_y[:6000]
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
test_y = tmp['t_test'].T.astype(np.int32)
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
f_enc, f_dec = pp.Identity()
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
'''
x = {'x': train_x.astype(np.float32), 'y': labelToMat(train_y).astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'y': labelToMat(valid_y).astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'y': labelToMat(test_y).astype(np.float32)}
'''
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
n_y = 2
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 6000
n_valid = 2000
n_test = 50000
n_batch = 120
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'rectangle_image':
# MNIST
size = 28
data_dir = os.environ['ML_DATA_PATH']+'/mnist_variations/'+'rectangles_im_'
tmp = sio.loadmat(data_dir+'train.mat')
train_x = tmp['x_train'].T
train_y = tmp['t_train'].T.astype(np.int32)
# validation 2000
valid_x = train_x[:,10000:]
valid_y = train_y[10000:]
train_x = train_x[:,:10000]
train_y = train_y[:10000]
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
test_y = tmp['t_test'].T.astype(np.int32)
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
f_enc, f_dec = pp.Identity()
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
'''
x = {'x': train_x.astype(np.float32), 'y': labelToMat(train_y).astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'y': labelToMat(valid_y).astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'y': labelToMat(test_y).astype(np.float32)}
'''
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
n_y = 2
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 10000
n_valid = 2000
n_test = 50000
n_batch = 200
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'mnist_rot':
# MNIST
size = 28
data_dir = os.environ['ML_DATA_PATH']+'/mnist_variations/'+'mnist_all_rotation_normalized_float_'
tmp = sio.loadmat(data_dir+'train.mat')
train_x = tmp['x_train'].T
train_y = tmp['t_train'].T.astype(np.int32)
# validation 2000
valid_x = train_x[:,10000:]
valid_y = train_y[10000:]
train_x = train_x[:,:10000]
train_y = train_y[:10000]
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
test_y = tmp['t_test'].T.astype(np.int32)
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
f_enc, f_dec = pp.Identity()
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
n_y = 10
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 10000
n_valid = 2000
n_test = 50000
n_batch = 200
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'mnist_back_rand':
# MNIST
size = 28
data_dir = os.environ['ML_DATA_PATH']+'/mnist_variations/'+'mnist_background_random_'
tmp = sio.loadmat(data_dir+'train.mat')
train_x = tmp['x_train'].T
train_y = tmp['t_train'].T.astype(np.int32)
# validation 2000
valid_x = train_x[:,10000:]
valid_y = train_y[10000:]
train_x = train_x[:,:10000]
train_y = train_y[:10000]
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
test_y = tmp['t_test'].T.astype(np.int32)
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
f_enc, f_dec = pp.Identity()
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
n_y = 10
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 10000
n_valid = 2000
n_test = 50000
n_batch = 200
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'mnist_back_image':
# MNIST
size = 28
data_dir = os.environ['ML_DATA_PATH']+'/mnist_variations/'+'mnist_background_images_'
tmp = sio.loadmat(data_dir+'train.mat')
train_x = tmp['x_train'].T
train_y = tmp['t_train'].T.astype(np.int32)
# validation 2000
valid_x = train_x[:,10000:]
valid_y = train_y[10000:]
train_x = train_x[:,:10000]
train_y = train_y[:10000]
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
test_y = tmp['t_test'].T.astype(np.int32)
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
f_enc, f_dec = pp.Identity()
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
n_y = 10
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 10000
n_valid = 2000
n_test = 50000
n_batch = 200
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'mnist_back_image_rot':
# MNIST
size = 28
data_dir = os.environ['ML_DATA_PATH']+'/mnist_variations/'+'mnist_all_background_images_rotation_normalized_'
tmp = sio.loadmat(data_dir+'train.mat')
train_x = tmp['x_train'].T
train_y = tmp['t_train'].T.astype(np.int32)
# validation 2000
valid_x = train_x[:,10000:]
valid_y = train_y[10000:]
train_x = train_x[:,:10000]
train_y = train_y[:10000]
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
test_y = tmp['t_test'].T.astype(np.int32)
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
f_enc, f_dec = pp.Identity()
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
n_y = 10
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 10000
n_valid = 2000
n_test = 50000
n_batch = 200
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'mnist_binarized':
#import anglepy.data.mnist_binarized as mnist_binarized
# MNIST
import anglepy.data.mnist as mnist
size = 28
data_dir = '/home/lichongxuan/regbayes2/data/mat_data/'+'binarized_mnist_'
tmp = sio.loadmat(data_dir+'train.mat')
train_x = tmp['x_train'].T
#train_y = tmp['t_train'].T.astype(np.int32)
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
tmp = sio.loadmat(data_dir+'valid.mat')
#print tmp.keys()
valid_x = tmp['x_valid'].T
#test_y = tmp['t_test'].T.astype(np.int32)
f_enc, f_dec = pp.Identity()
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
train_x = np.hstack((train_x, valid_x)).astype(np.float32)
train_mean_prior = np.hstack((train_mean_prior,valid_mean_prior)).astype(np.float32)
print train_mean_prior.shape
print train_x.shape
x = {'x': train_x.astype(np.float32), 'mean_prior':train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': test_x.astype(np.float32),'mean_prior':test_mean_prior.astype(np.float32)}
x_test = x_valid
L_valid = 1
dim_input = (28,28)
n_x = 28*28
n_y = 10
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 60000
n_valid = 10000
n_batch = 1000
colorImg = False
bernoulli_x = False
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'mnist_binarized_own':
#import anglepy.data.mnist_binarized as mnist_binarized
# MNIST
import anglepy.data.mnist as mnist
size = 28
data_dir = 'data/mnist_binarized_own/'+'binarized_mnist_'
tmp = sio.loadmat(data_dir+'train.mat')
train_x = tmp['train_x'].T
#train_y = tmp['t_train'].T.astype(np.int32)
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['test_x'].T
tmp = sio.loadmat(data_dir+'valid.mat')
#print tmp.keys()
valid_x = tmp['valid_x'].T
#test_y = tmp['t_test'].T.astype(np.int32)
f_enc, f_dec = pp.Identity()
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
train_x = np.hstack((train_x, valid_x)).astype(np.float32)
train_mean_prior = np.hstack((train_mean_prior,valid_mean_prior)).astype(np.float32)
print train_mean_prior.shape
print train_x.shape
x = {'x': train_x.astype(np.float32), 'mean_prior':train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': test_x.astype(np.float32),'mean_prior':test_mean_prior.astype(np.float32)}
x_test = x_valid
L_valid = 1
dim_input = (28,28)
n_x = 28*28
n_y = 10
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 60000
n_valid = 10000
n_batch = 1000
colorImg = False
bernoulli_x = False
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'freyface':
# Frey's face
import anglepy.data.freyface as freyface
n_train = 1600
train_x = freyface.load_numpy()
np.random.shuffle(train_x)
x = {'x': train_x.T[:,0:n_train]}
x_valid = {'x': train_x.T[:,n_train:]}
L_valid = 1
dim_input = (28,20)
n_x = 20*28
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
type_px = 'bounded01'
nonlinear = 'tanh' #tanh works better with freyface #'softplus'
n_batch = 100
colorImg = False
bernoulli_x = False
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'freyface_pca':
# Frey's face
import anglepy.data.freyface as freyface
n_train = 1600
train_x = freyface.load_numpy().T
np.random.shuffle(train_x.T)
f_enc, f_dec, _ = pp.PCA(train_x, 0.99)
train_x = f_enc(train_x)
x = {'x': train_x[:,0:n_train].astype(np.float32)}
x_valid = {'x': train_x[:,n_train:].astype(np.float32)}
L_valid = 1
dim_input = (28,20)
n_x = train_x.shape[0]
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
type_px = 'gaussian'
nonlinear = 'softplus'
n_batch = 100
colorImg = False
bernoulli_x = False
byteToFloat = False
elif dataset == 'freyface_bernoulli':
# Frey's face
import anglepy.data.freyface as freyface
n_train = 1600
train_x = freyface.load_numpy().T
np.random.shuffle(train_x.T)
x = {'x': train_x[:,0:n_train].astype(np.float32)}
x_valid = {'x': train_x[:,n_train:].astype(np.float32)}
L_valid = 1
dim_input = (28,20)
n_x = train_x.shape[0]
type_pz = 'gaussianmarg'
type_px = 'bernoulli'
nonlinear = 'softplus'
n_batch = 100
colorImg = False
bernoulli_x = False
byteToFloat = False
elif dataset == 'norb_48_24300_pca':
size = 48
train_x, train_y, test_x, test_y = np.load('data/norb/norb_48_24300.npy')
_x = {'x': train_x, 'y': train_y}
#ndict.shuffleCols(_x)
#train_x = _x['x']
#train_y = _x['y']
#print _x['x'][:,:10000].shape
# Do PCA
print 'pca'
f_enc, f_dec, pca_params = pp.PCA(_x['x'][:,:10000], cutoff=500, toFloat=False)
ndict.savez(pca_params, logdir+'pca_params')
print 'done'
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
x = {'x': f_enc(train_x).astype(np.float32), 'mean_prior' : train_mean_prior.astype(np.float32)}
x_valid = {'x': f_enc(test_x).astype(np.float32), 'mean_prior' : test_mean_prior.astype(np.float32)}
x_test = {'x': f_enc(test_x).astype(np.float32), 'mean_prior' : test_mean_prior.astype(np.float32)}
x_train = x
print x['x'].shape
print x['mean_prior'].shape
L_valid = 1
n_y = 5
n_x = x['x'].shape[0]
dim_input = (size,size)
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
type_px = 'gaussian'
nonlinear = 'softplus'
n_batch = 900 #23400/900 = 27
colorImg = False
#binarize = False
bernoulli_x = False
byteToFloat = False
weight_decay= float(n_batch)/train_x.shape[1]
elif dataset == 'norb':
# small NORB dataset
import anglepy.data.norb as norb
size = 48
train_x, train_y, test_x, test_y = norb.load_resized(size, binarize_y=True)
x = {'x': train_x.astype(np.float32)}
x_valid = {'x': test_x.astype(np.float32)}
L_valid = 1
n_x = train_x.shape[0]
dim_input = (size,size)
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
type_px = 'gaussian'
nonlinear = 'softplus'
n_batch = 900 #23400/900 = 27
colorImg = False
#binarize = False
byteToFloat = False
bernoulli_x = False
weight_decay= float(n_batch)/train_x.shape[1]
elif dataset == 'norb_pca':
# small NORB dataset
import anglepy.data.norb as norb
size = 48
train_x, train_y, test_x, test_y = norb.load_resized(size, binarize_y=True)
f_enc, f_dec, _ = pp.PCA(train_x, 0.999)
#f_enc, f_dec, _ = pp.normalize_random(train_x)
train_x = f_enc(train_x)
test_x = f_enc(test_x)
x = {'x': train_x.astype(np.float32)}
x_valid = {'x': test_x.astype(np.float32)}
L_valid = 1
n_x = train_x.shape[0]
dim_input = (size,size)
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
type_px = 'gaussian'
nonlinear = 'softplus'
n_batch = 900 #23400/900 = 27
colorImg = False
#binarize = False
bernoulli_x = False
byteToFloat = False
weight_decay= float(n_batch)/train_x.shape[1]
elif dataset == 'norb_normalized':
# small NORB dataset
import anglepy.data.norb as norb
size = 48
train_x, train_y, test_x, test_y = norb.load_resized(size, binarize_y=True)
#f_enc, f_dec, _ = pp.PCA(train_x, 0.99)
#f_enc, f_dec, _ = pp.normalize_random(train_x)
f_enc, f_dec, _ = pp.normalize(train_x)
train_x = f_enc(train_x)
test_x = f_enc(test_x)
x = {'x': train_x.astype(np.float32)}
x_valid = {'x': test_x.astype(np.float32)}
L_valid = 1
n_x = train_x.shape[0]
dim_input = (size,size)
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
type_px = 'gaussian'
nonlinear = 'softplus'
n_batch = 900 #23400/900 = 27
colorImg = False
#binarize = False
bernoulli_x = False
byteToFloat = False
weight_decay= float(n_batch)/train_x.shape[1]
elif dataset == 'svhn':
# SVHN dataset
#import anglepy.data.svhn as svhn
size = 32
train_x, train_y, test_x, test_y = np.load('data/svhn/svhn.npy')
#extra_x, extra_y = svhn.load_numpy_extra(False, binarize_y=True)
#x = {'x': np.hstack((train_x, extra_x)), 'y':np.hstack((train_y, extra_y))}
#ndict.shuffleCols(x)
x = {'x' : train_x, 'y': train_y}
print 'Performing PCA, can take a few minutes... '
cutoff = 300
if os.environ.has_key('cutoff'):
cutoff = int(os.environ['cutoff'])
color.printBlue('cutoff: '+str(cutoff))
f_enc, f_dec, pca_params = pp.PCA(x['x'][:,:10000], cutoff=cutoff, toFloat=True)
ndict.savez(pca_params, logdir+'pca_params')
print 'Done.'
n_y = 10
if os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True:
color.printBlue('Loading prior')
train_mean_prior, train_y1, test_mean_prior, test_y1 = np.load('data/svhn/svhn_prior.npy')
print np.sum((train_y1 == train_y).astype(np.int32))
print np.sum((test_y1 == test_y).astype(np.int32))
else:
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
x = {'x': f_enc(x['x']).astype(np.float32), 'mean_prior':train_mean_prior.astype(np.float32)}
x_train = x
x_test = {'x': f_enc(test_x).astype(np.float32), 'mean_prior':test_mean_prior.astype(np.float32)}
x_valid = x_test
print x_train['x'].shape
print x_test['x'].shape
print train_y.shape
print test_y.shape
print x_train['mean_prior'].shape
print x_test['mean_prior'].shape
L_valid = 1
n_x = x['x'].shape[0]
dim_input = (size,size)
n_batch = 5000
n_train = 604388
n_valid = 26032
n_test = 26032
colorImg = True
bernoulli_x = False
byteToFloat = False
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
type_px = 'gaussian'
nonlinear = 'softplus'
else:
print 'invalid data set'
exit()
#print '2', n_x
# Construct model
from anglepy.models import GPUVAE_Z_X
learning_rate1 = 3e-4
if os.environ.has_key('stepsize'):
learning_rate1 = float(os.environ['stepsize'])
color.printBlue(str(learning_rate1))
if os.environ.has_key('preoption'):
pre = int(os.environ['preoption'])
if pre == 1:
updates = get_adam_optimizer(learning_rate=3e-4, decay1=0.9, decay2=0.999, weight_decay=0)
elif pre ==2:
updates = get_adam_optimizer(learning_rate=3e-4, decay1=0.9, decay2=0.999, weight_decay=weight_decay)
else:
raise Exception('Prepotion unknown')
with open(logdir+'hook.txt', 'a') as f:
print >>f, 'preoption ' + str(pre)
else:
updates = get_adam_optimizer(learning_rate=learning_rate1, weight_decay=weight_decay)
#print '1', n_x
model = GPUVAE_Z_X(updates, n_x, n_hidden, n_z, n_hidden[::-1], nonlinear, nonlinear, type_px, type_qz=type_qz, type_pz=type_pz, prior_sd=100, init_sd=1e-3)
if os.environ.has_key('pretrain') and bool(int(os.environ['pretrain'])) == True:
#dir = '/Users/dpkingma/results/learn_z_x_mnist_binarized_50-(500, 500)_mog_1412689061/'
#dir = '/Users/dpkingma/results/learn_z_x_svhn_bernoulli_300-(1000, 1000)_l1l2_sharing_and_1000HU_1412676966/'
#dir = '/Users/dpkingma/results/learn_z_x_svhn_bernoulli_300-(1000, 1000)_l1l2_sharing_and_1000HU_1412695481/'
#dir = '/Users/dpkingma/results/learn_z_x_mnist_binarized_50-(500, 500)_mog_1412695455/'
#dir = '/Users/dpkingma/results/gpulearn_z_x_svhn_pca_300-(500, 500)__1413904756/'
if len(n_hidden) == 1:
color.printBlue('pre-training-1-layer')
layer_str = '-500'
elif len(n_hidden) == 2:
color.printBlue('pre-training-2-layers')
layer_str = '-(500, 500)'
else:
raise Exception()
pre_str = 'models/gpulearn_z_x_'
if dataset == 'mnist':
#dir = pre_str + 'mnist_'+str(n_z)+layer_str+'_longrun/'
dir = 'models/mnist_z_x_50-500-500_longrun/'
elif dataset == 'mnist_rot':
dir = pre_str + 'mnist_rot_'+str(n_z)+layer_str+'_longrun/'
elif dataset == 'mnist_back_rand':
dir = pre_str + 'mnist_back_rand_'+str(n_z)+layer_str+'_longrun/'
elif dataset == 'mnist_back_image':
dir = pre_str + 'mnist_back_image_'+str(n_z)+layer_str+'_longrun/'
elif dataset == 'mnist_back_image_rot':
dir = pre_str + 'mnist_back_image_rot_'+str(n_z)+layer_str+'_longrun/'
elif dataset == 'rectangle':
dir = pre_str + 'rectangle_'+str(n_z)+layer_str+'_longrun/'
elif dataset == 'rectangle_image':
dir = pre_str + 'rectangle_image_'+str(n_z)+layer_str+'_longrun/'
elif dataset == 'convex':
dir = pre_str + 'convex_'+str(n_z)+layer_str+'_longrun/'
elif dataset == 'mnist_basic':
dir = pre_str + 'mnist_basic_'+str(n_z)+layer_str+'_longrun/'
if dataset == 'svhn':
if (os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True):
print 'prior-------------------'
pre_dir = 'results/gpulearn_z_x_svhn_'+str(n_z)+'-500-500_prior_'+str(cutoff)+'_longrun/'
else:
pre_dir = 'results/gpulearn_z_x_svhn_'+str(n_z)+'-500-500_'+str(cutoff)+'_longrun/'
color.printBlue(pre_dir)
w = ndict.loadz(pre_dir+'w_best.ndict.tar.gz')
v = ndict.loadz(pre_dir+'v_best.ndict.tar.gz')
elif n_z == 50:
print 'n_z = 50', dir
w = ndict.loadz(dir+'w_best.ndict.tar.gz')
v = ndict.loadz(dir+'v_best.ndict.tar.gz')
else:
print 'n_z != 50'
w = ndict.loadz(pre_dir+'w_best.ndict.tar.gz')
v = ndict.loadz(pre_dir+'v_best.ndict.tar.gz')
ndict.set_value2(model.w, w)
ndict.set_value2(model.v, v)
# Some statistics for optimization
ll_valid_stats = [-1e99, 0]
# Progress hook
def hook(epoch, t, ll):
if epoch%10 != 0: return
n_batch_n = n_batch
if n_batch_n > n_valid:
n_batch_n = n_valid
ll_valid, _ = model.est_loglik(x_valid, n_samples=L_valid, n_batch=n_batch_n, byteToFloat=byteToFloat)
ll_test = ll_valid
#if not dataset == 'mnist_binarized':
if not dataset == 'svhn':
ll_test, _ = model.est_loglik(x_test, n_samples=L_valid, n_batch=n_batch, byteToFloat=byteToFloat)
# Log
ndict.savez(ndict.get_value(model.v), logdir+'v')
ndict.savez(ndict.get_value(model.w), logdir+'w')
def infer(data, n_batch=1000):
#print '--', n_batch
size = data['x'].shape[1]
res = np.zeros((sum(n_hidden), size))
res1 = np.zeros((n_z,size))
res2 = np.zeros((n_hidden[-1],size))
res3 = np.zeros((n_z,size))
for i in range(0, size, n_batch):
idx_to = min(size, i+n_batch)
x_batch = ndict.getCols(data, i, idx_to)
# may have bugs
nn_batch = idx_to - i
_x, _z, _z_confab = model.gen_xz(x_batch, {}, nn_batch)
x_samples = _z_confab['x']
for (hi, hidden) in enumerate(_z_confab['hidden']):
res[sum(n_hidden[:hi]):sum(n_hidden[:hi+1]),i:i+nn_batch] = hidden
res1[:,i:i+nn_batch] = _z_confab['mean']
res2[:,i:i+nn_batch] = _z_confab['hidden'][-1]
res3[:,i:i+nn_batch] = _z_confab['logvar']
#print '--'
return res, res1, res2, res3
#print '..', n_batch
#if not dataset == 'mnist_binarized':
if not dataset == 'svhn':
z_test, z_test1, z_test2, vv_test = infer(x_test)
z_train, z_train1, z_train2, vv_train = infer(x_train)
if ll_valid > ll_valid_stats[0]:
ll_valid_stats[0] = ll_valid
ll_valid_stats[1] = 0
ndict.savez(ndict.get_value(model.v), logdir+'v_best')
ndict.savez(ndict.get_value(model.w), logdir+'w_best')
#if not dataset == 'mnist_binarized':
if dataset == 'svhn':
pass
#np.save(logdir+'full_latent', ('z_test': z_test, 'train_y':train_y, 'test_y':test_y, 'z_train': z_train))
#np.save(logdir+'last_latent', ('z_test': z_test2, 'train_y':train_y, 'test_y':test_y, 'z_train': z_train2))
else:
sio.savemat(logdir+'full_latent.mat', {'z_test': z_test, 'train_y':train_y, 'test_y':test_y, 'z_train': z_train})
sio.savemat(logdir+'mean_latent.mat', {'z_test': z_test1, 'train_y':train_y, 'test_y':test_y, 'z_train': z_train1})
sio.savemat(logdir+'last_latent.mat', {'z_test': z_test2, 'train_y':train_y, 'test_y':test_y, 'z_train': z_train2})
else:
ll_valid_stats[1] += 1
# Stop when not improving validation set performance in 100 iterations
if ll_valid_stats[1] > 1000:
print "Finished"
with open(logdir+'hook.txt', 'a') as f:
print >>f, "Finished"
exit()
print epoch, t, ll, ll_valid, ll_test, ll_valid_stats
with open(logdir+'hook.txt', 'a') as f:
print >>f, epoch, t, ll, ll_valid, ll_test, ll_valid_stats
'''
if dataset != 'svhn':
l_t, px_t, pz_t, qz_t = model.test(x_train, n_samples=1, n_batch=n_batch, byteToFloat=byteToFloat)
print 'Elogpx', px_t, 'Elogpz', pz_t, '-Elogqz', qz_t
#sigma_square = float(os.environ['sigma_square'])
print 'var', np.mean(np.exp(vv_train)), 'q', np.mean(np.abs(z_train1)), 'p', np.mean(np.abs(train_mean_prior)), 'd', np.mean(np.abs(z_train1-train_mean_prior))
with open(logdir+'hook.txt', 'a') as f:
print >>f, 'Elogpx', px_t, 'Elogpz', pz_t, '-Elogqz', qz_t
print >>f, 'var', np.mean(np.exp(vv_train)), 'q', np.mean(np.abs(z_train1)), 'p', np.mean(np.abs(train_mean_prior)), 'd', np.mean(np.abs(z_train1-train_mean_prior))
'''
# Graphics
if gfx and epoch%gfx_freq == 0:
#tail = '.png'
tail = '-'+str(epoch)+'.png'
v = {i: model.v[i].get_value() for i in model.v}
w = {i: model.w[i].get_value() for i in model.w}
if 'pca' not in dataset and 'random' not in dataset and 'normalized' not in dataset and 'zca' not in dataset:
if 'w0' in v:
image = paramgraphics.mat_to_img(f_dec(v['w0'][:].T), dim_input, True, colorImg=colorImg)
image.save(logdir+'q_w0'+tail, 'PNG')
image = paramgraphics.mat_to_img(f_dec(w['out_w'][:]), dim_input, True, colorImg=colorImg)
image.save(logdir+'out_w'+tail, 'PNG')
if 'out_unif' in w:
image = paramgraphics.mat_to_img(f_dec(w['out_unif'].reshape((-1,1))), dim_input, True, colorImg=colorImg)
image.save(logdir+'out_unif'+tail, 'PNG')
if n_z == 2:
n_width = 10
import scipy.stats
z = {'z':np.zeros((2,n_width**2))}
for i in range(0,n_width):
for j in range(0,n_width):
z['z'][0,n_width*i+j] = scipy.stats.norm.ppf(float(i)/n_width+0.5/n_width)
z['z'][1,n_width*i+j] = scipy.stats.norm.ppf(float(j)/n_width+0.5/n_width)
x, _, _z = model.gen_xz({}, z, n_width**2)
if dataset == 'mnist':
x = 1 - _z['x']
image = paramgraphics.mat_to_img(f_dec(_z['x']), dim_input)
image.save(logdir+'2dmanifold'+tail, 'PNG')
else:
if 'norb' in dataset or dataset=='svhn':
nn_batch_nn = 64
else:
nn_batch_nn = 144
if not(os.environ.has_key('train_residual') and bool(int(os.environ['train_residual'])) == True) and (os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True):
mp_in = np.random.randint(0,x_train['mean_prior'].shape[1],nn_batch_nn)
m_p = x_train['mean_prior'][:,mp_in]
s_s = 1
if os.environ.has_key('sigma_square'):
s_s = float(os.environ['sigma_square'])
x_samples = model.gen_xz_prior({}, {}, m_p, s_s, n_batch=nn_batch_nn)
x_samples = x_samples['x']
m_p1 = (np.ones((n_z, nn_batch_nn)).T * np.mean(x_train['mean_prior'], axis = 1)).T
x_samples1 = model.gen_xz_prior({}, {}, m_p1.astype(np.float32), s_s, n_batch=nn_batch_nn)
image = paramgraphics.mat_to_img(f_dec(x_samples1['x']), dim_input, colorImg=colorImg)
image.save(logdir+'mean_samples-prior'+tail, 'PNG')
x_samples11 = model.gen_xz_prior11({}, {}, m_p, s_s, n_batch=nn_batch_nn)
image = paramgraphics.mat_to_img(f_dec(x_samples11['x']), dim_input, colorImg=colorImg)
image.save(logdir+'prior-image'+tail, 'PNG')
else:
_x, _, _z_confab = model.gen_xz({}, {}, n_batch=nn_batch_nn)
x_samples = _z_confab['x']
image = paramgraphics.mat_to_img(f_dec(x_samples), dim_input, colorImg=colorImg)
image.save(logdir+'samples-prior'+tail, 'PNG')
#x_samples = _x['x']
#image = paramgraphics.mat_to_img(x_samples, dim_input, colorImg=colorImg)
#image.save(logdir+'samples2'+tail, 'PNG')
else:
# Model with preprocessing
if 'w0' in v:
tmp = f_dec(v['w0'][:].T)
#print dim_input
#print tmp.shape
if 'zca' in dataset or dataset=='svhn':
tmp = zca_dec(zca_mean, zca_winv, tmp)
image = paramgraphics.mat_to_img(tmp, dim_input, True, colorImg=colorImg)
image.save(logdir+'q_w0'+tail, 'PNG')
tmp = f_dec(w['out_w'][:])
if 'zca' in dataset:
tmp = zca_dec(zca_mean, zca_winv, tmp)
image = paramgraphics.mat_to_img(tmp, dim_input, True, colorImg=colorImg)
image.save(logdir+'out_w'+tail, 'PNG')
if dataset == 'svhn':
nn_batch_nn = 64
else:
nn_batch_nn = 144
if not(os.environ.has_key('train_residual') and bool(int(os.environ['train_residual'])) == True) and (os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True):
mp_in = np.random.randint(0,x_train['mean_prior'].shape[1],nn_batch_nn)
m_p = x_train['mean_prior'][:,mp_in]
s_s = 1
if os.environ.has_key('sigma_square'):
s_s = float(os.environ['sigma_square'])
x_samples = model.gen_xz_prior({}, {}, m_p, s_s, n_batch=nn_batch_nn)
x_samples = zca_dec(zca_mean, zca_winv,x_samples['x'])
x_samples = np.minimum(np.maximum(x_samples, 0), 1)
x_samples11 = model.gen_xz_prior11({}, {}, m_p, s_s, n_batch=nn_batch_nn)
x_samples11 = zca_dec(zca_mean,zca_winv,x_samples11['x'])
x_samples11 = np.minimum(np.maximum(x_samples11, 0), 1)
image = paramgraphics.mat_to_img(x_samples11, dim_input, colorImg=colorImg)
image.save(logdir+'prior-image'+tail, 'PNG')
else:
_x, _z, _z_confab = model.gen_xz({}, {}, n_batch=nn_batch_nn)
x_samples = f_dec(_z_confab['x'])
x_samples = np.minimum(np.maximum(x_samples, 0), 1)
image = paramgraphics.mat_to_img(x_samples, dim_input, colorImg=colorImg)
image.save(logdir+'samples'+tail, 'PNG')
'''
def infer(data, n_batch=1000):
#print '--', n_batch
size = data['x'].shape[1]
res = np.zeros((sum(n_hidden), size))
res1 = np.zeros((n_z,size))
res2 = np.zeros((n_hidden[-1],size))
res3 = np.zeros((n_z,size))
for i in range(0, size, n_batch):
idx_to = min(size, i+n_batch)
x_batch = ndict.getCols(data, i, idx_to)
# may have bugs
nn_batch = idx_to - i
_x, _z, _z_confab = model.gen_xz(x_batch, {}, nn_batch)
x_samples = _z_confab['x']
for (hi, hidden) in enumerate(_z_confab['hidden']):
res[sum(n_hidden[:hi]):sum(n_hidden[:hi+1]),i:i+nn_batch] = hidden
res1[:,i:i+nn_batch] = _z_confab['mean']
res2[:,i:i+nn_batch] = _z_confab['hidden'][-1]
res3[:,i:i+nn_batch] = _z_confab['logvar']
#
return res, res1, res2, res3
#print n_batch
#if not dataset == 'mnist_binarized':
z_test, z_test1, z_test2, vv_test = infer(x_test)
z_train, z_train1, z_train2, vv_train = infer(x_train)
l_t, px_t, pz_t, qz_t = model.test(x_train, n_samples=1, n_batch=n_batch, byteToFloat=byteToFloat)
print 'Elogpx', px_t, 'Elogpz', pz_t, '-Elogqz', qz_t
#sigma_square = float(os.environ['sigma_square'])
print 'var', np.mean(np.exp(vv_train)), 'q', np.mean(np.abs(z_train1)), 'p', np.mean(np.abs(train_mean_prior)), 'd', np.mean(np.abs(z_train1-train_mean_prior))
with open(logdir+'hook.txt', 'a') as f:
print >>f, 'Elogpx', px_t, 'Elogpz', pz_t, '-Elogqz', qz_t
print >>f, 'var', np.mean(np.exp(vv_train)), 'q', np.mean(np.abs(z_train1)), 'p', np.mean(np.abs(train_mean_prior)), 'd', np.mean(np.abs(z_train1-train_mean_prior))
#if not dataset == 'mnist_binarized':
sio.savemat(logdir+'full_latent.mat', {'z_test': z_test, 'train_y':train_y, 'test_y':test_y, 'z_train': z_train})
sio.savemat(logdir+'mean_latent.mat', {'z_test': z_test1, 'train_y':train_y, 'test_y':test_y, 'z_train': z_train1})
sio.savemat(logdir+'last_latent.mat', {'z_test': z_test2, 'train_y':train_y, 'test_y':test_y, 'z_train': z_train2})
'''
# Optimize
#SFO
dostep = epoch_vae_adam(model, x, n_batch=n_batch, bernoulli_x=bernoulli_x, byteToFloat=byteToFloat)
loop_va(dostep, hook)
pass
# Training loop for variational autoencoder
def loop_va(doEpoch, hook, n_epochs=1201):
t0 = time.time()
for t in xrange(1, n_epochs):
L = doEpoch()
hook(t, time.time() - t0, L)
print 'Optimization loop finished'
# Learning step for variational auto-encoder
def epoch_vae_adam(model, x, n_batch=100, convertImgs=False, bernoulli_x=False, byteToFloat=False):
print 'Variational Auto-Encoder', n_batch
def doEpoch():
from collections import OrderedDict
n_tot = x.itervalues().next().shape[1]
idx_from = 0
L = 0
while idx_from < n_tot:
idx_to = min(n_tot, idx_from+n_batch)
x_minibatch = ndict.getCols(x, idx_from, idx_to)
idx_from += n_batch
if byteToFloat: x_minibatch['x'] = x_minibatch['x'].astype(np.float32)/256.
if bernoulli_x: x_minibatch['x'] = np.random.binomial(n=1, p=x_minibatch['x']).astype(np.float32)
# Do gradient ascent step
L += model.evalAndUpdate(x_minibatch, {}).sum()
#model.profmode.print_summary()
L /= n_tot
return L
return doEpoch
def get_adam_optimizer(learning_rate=0.001, decay1=0.1, decay2=0.001, weight_decay=0.0):
print 'AdaM', learning_rate, decay1, decay2, weight_decay
def shared32(x, name=None, borrow=False):
return theano.shared(np.asarray(x, dtype='float32'), name=name, borrow=borrow)
def get_optimizer(w, g):
updates = OrderedDict()
it = shared32(0.)
updates[it] = it + 1.
fix1 = 1.-(1.-decay1)**(it+1.) # To make estimates unbiased
fix2 = 1.-(1.-decay2)**(it+1.) # To make estimates unbiased
lr_t = learning_rate * T.sqrt(fix2) / fix1
for i in w:
gi = g[i]
if weight_decay > 0:
gi -= weight_decay * w[i] #T.tanh(w[i])
# mean_squared_grad := E[g^2]_{t-1}
mom1 = shared32(w[i].get_value() * 0.)
mom2 = shared32(w[i].get_value() * 0.)
# Update moments
mom1_new = mom1 + decay1 * (gi - mom1)
mom2_new = mom2 + decay2 * (T.sqr(gi) - mom2)
# Compute the effective gradient and effective learning rate
effgrad = mom1_new / (T.sqrt(mom2_new) + 1e-10)
effstep_new = lr_t * effgrad
# Do update
w_new = w[i] + effstep_new
# Apply update
updates[w[i]] = w_new
updates[mom1] = mom1_new
updates[mom2] = mom2_new
return updates
return get_optimizer<|fim▁end|> | x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size |
<|file_name|>s_3225.js<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | search_result['3225']=["topic_00000000000007B9.html","ApplicantDetailRequestDto.Notes Property",""]; |
<|file_name|>sass.js<|end_file_name|><|fim▁begin|>'use strict';
module.exports = {
compile: {
options: {
style: 'expanded',<|fim▁hole|> src : 'src/css/style.scss',
dest : 'dist/css/style.css',
},
};<|fim▁end|> | }, |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>module.exports = [
require('./deepSouth'),
require('./fallsRiverMusic')<|fim▁hole|><|fim▁end|> | // require('./gizmoBrewWorks')
]; |
<|file_name|>htmlinputelement.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use caseless::compatibility_caseless_match_str;
use dom::activation::{Activatable, ActivationSource, synthetic_click_activation};
use dom::attr::{Attr, AttrValue};
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::AttrBinding::AttrMethods;
use dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use dom::bindings::codegen::Bindings::HTMLInputElementBinding;
use dom::bindings::codegen::Bindings::HTMLInputElementBinding::HTMLInputElementMethods;
use dom::bindings::codegen::Bindings::KeyboardEventBinding::KeyboardEventMethods;
use dom::bindings::error::{Error, ErrorResult};
use dom::bindings::global::GlobalRef;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{JS, LayoutJS, Root, RootedReference};
use dom::bindings::refcounted::Trusted;
use dom::document::Document;
use dom::element::{AttributeMutation, Element, RawLayoutElementHelpers, LayoutElementHelpers};
use dom::event::{Event, EventBubbles, EventCancelable};
use dom::eventtarget::EventTarget;
use dom::htmlelement::HTMLElement;
use dom::htmlfieldsetelement::HTMLFieldSetElement;
use dom::htmlformelement::{FormControl, FormDatum, FormSubmitter, HTMLFormElement};
use dom::htmlformelement::{ResetFrom, SubmittedFrom};
use dom::keyboardevent::KeyboardEvent;
use dom::node::{Node, NodeDamage, UnbindContext};
use dom::node::{document_from_node, window_from_node};
use dom::nodelist::NodeList;
use dom::validation::Validatable;
use dom::virtualmethods::VirtualMethods;
use msg::constellation_msg::ConstellationChan;
use range::Range;
use script_thread::ScriptThreadEventCategory::InputEvent;
use script_thread::{CommonScriptMsg, Runnable};
use script_traits::ScriptMsg as ConstellationMsg;
use std::borrow::ToOwned;
use std::cell::Cell;
use string_cache::Atom;
use style::element_state::*;
use textinput::KeyReaction::{DispatchInput, Nothing, RedrawSelection, TriggerDefaultAction};
use textinput::Lines::Single;
use textinput::TextInput;
use util::str::{DOMString, search_index};
const DEFAULT_SUBMIT_VALUE: &'static str = "Submit";
const DEFAULT_RESET_VALUE: &'static str = "Reset";
#[derive(JSTraceable, PartialEq, Copy, Clone)]
#[allow(dead_code)]
#[derive(HeapSizeOf)]
enum InputType {
InputSubmit,
InputReset,
InputButton,
InputText,
InputFile,
InputImage,
InputCheckbox,
InputRadio,
InputPassword
}
#[derive(Debug, PartialEq)]
enum ValueMode {
Value,
Default,
DefaultOn,
Filename,
}
#[derive(JSTraceable, PartialEq, Copy, Clone)]
#[derive(HeapSizeOf)]
enum SelectionDirection {
Forward,
Backward,
None
}
#[dom_struct]
pub struct HTMLInputElement {
htmlelement: HTMLElement,
input_type: Cell<InputType>,
checked_changed: Cell<bool>,
placeholder: DOMRefCell<DOMString>,
value_changed: Cell<bool>,
size: Cell<u32>,
maxlength: Cell<i32>,
#[ignore_heap_size_of = "#7193"]
textinput: DOMRefCell<TextInput<ConstellationChan<ConstellationMsg>>>,
activation_state: DOMRefCell<InputActivationState>,
// https://html.spec.whatwg.org/multipage/#concept-input-value-dirty-flag
value_dirty: Cell<bool>,
selection_direction: Cell<SelectionDirection>,
// TODO: selected files for file input
}
#[derive(JSTraceable)]
#[must_root]
#[derive(HeapSizeOf)]
struct InputActivationState {
indeterminate: bool,
checked: bool,
checked_changed: bool,
checked_radio: Option<JS<HTMLInputElement>>,
// In case mutability changed
was_mutable: bool,
// In case the type changed
old_type: InputType,
}
impl InputActivationState {
fn new() -> InputActivationState {
InputActivationState {
indeterminate: false,
checked: false,
checked_changed: false,
checked_radio: None,
was_mutable: false,
old_type: InputType::InputText
}
}
}
static DEFAULT_INPUT_SIZE: u32 = 20;
static DEFAULT_MAX_LENGTH: i32 = -1;
impl HTMLInputElement {
fn new_inherited(localName: Atom, prefix: Option<DOMString>, document: &Document) -> HTMLInputElement {
let chan = document.window().constellation_chan();
HTMLInputElement {
htmlelement:
HTMLElement::new_inherited_with_state(IN_ENABLED_STATE,
localName, prefix, document),
input_type: Cell::new(InputType::InputText),
placeholder: DOMRefCell::new(DOMString::new()),
checked_changed: Cell::new(false),
value_changed: Cell::new(false),
maxlength: Cell::new(DEFAULT_MAX_LENGTH),
size: Cell::new(DEFAULT_INPUT_SIZE),
textinput: DOMRefCell::new(TextInput::new(Single, DOMString::new(), chan, None)),
activation_state: DOMRefCell::new(InputActivationState::new()),
value_dirty: Cell::new(false),
selection_direction: Cell::new(SelectionDirection::None)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: Atom,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLInputElement> {
let element = HTMLInputElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLInputElementBinding::Wrap)
}
pub fn type_(&self) -> Atom {
self.upcast::<Element>()
.get_attribute(&ns!(), &atom!("type"))
.map_or_else(|| atom!(""), |a| a.value().as_atom().to_owned())
}
// https://html.spec.whatwg.org/multipage/#input-type-attr-summary
fn get_value_mode(&self) -> ValueMode {
match self.input_type.get() {
InputType::InputSubmit |
InputType::InputReset |
InputType::InputButton |
InputType::InputImage => ValueMode::Default,
InputType::InputCheckbox |
InputType::InputRadio => ValueMode::DefaultOn,
InputType::InputPassword |
InputType::InputText => ValueMode::Value,
InputType::InputFile => ValueMode::Filename,
}
}
// this method exists so that the functions SetSelectionStart() and SetSelectionEnd()
// don't needlessly allocate strings
fn set_selection_range(&self, start: u32, end: u32, direction: &SelectionDirection) {
let mut text_input = self.textinput.borrow_mut();
let mut start = start as usize;
let mut end = end as usize;
let text_end = text_input.get_content().len();
if start > text_end {
start = text_end;
}
if end > text_end {
end = text_end;
}
if start >= end {
start = end;
}
text_input.selection_begin = Some(text_input.get_text_point_for_absolute_point(start));
text_input.edit_point = text_input.get_text_point_for_absolute_point(end);
self.selection_direction.set(*direction);
}
}
pub trait LayoutHTMLInputElementHelpers {
#[allow(unsafe_code)]
unsafe fn get_value_for_layout(self) -> String;
#[allow(unsafe_code)]
unsafe fn get_size_for_layout(self) -> u32;
#[allow(unsafe_code)]
unsafe fn get_selection_for_layout(self) -> Option<Range<isize>>;
#[allow(unsafe_code)]
unsafe fn get_checked_state_for_layout(self) -> bool;
#[allow(unsafe_code)]
unsafe fn get_indeterminate_state_for_layout(self) -> bool;
}
#[allow(unsafe_code)]
unsafe fn get_raw_textinput_value(input: LayoutJS<HTMLInputElement>) -> DOMString {
(*input.unsafe_get()).textinput.borrow_for_layout().get_content()
}
impl LayoutHTMLInputElementHelpers for LayoutJS<HTMLInputElement> {
#[allow(unsafe_code)]
unsafe fn get_value_for_layout(self) -> String {
#[allow(unsafe_code)]
unsafe fn get_raw_attr_value(input: LayoutJS<HTMLInputElement>, default: &str) -> String {
let elem = input.upcast::<Element>();
let value = (*elem.unsafe_get())
.get_attr_val_for_layout(&ns!(), &atom!("value"))
.unwrap_or(default);
String::from(value)
}
match (*self.unsafe_get()).input_type.get() {
InputType::InputCheckbox | InputType::InputRadio => String::new(),
InputType::InputFile | InputType::InputImage => String::new(),
InputType::InputButton => get_raw_attr_value(self, ""),
InputType::InputSubmit => get_raw_attr_value(self, DEFAULT_SUBMIT_VALUE),
InputType::InputReset => get_raw_attr_value(self, DEFAULT_RESET_VALUE),
InputType::InputPassword => {
let text = get_raw_textinput_value(self);
if !text.is_empty() {
// The implementation of get_selection_for_layout expects a 1:1 mapping of chars.
text.chars().map(|_| '●').collect()
} else {
String::from((*self.unsafe_get()).placeholder.borrow_for_layout().clone())
}
},
_ => {
let text = get_raw_textinput_value(self);
if !text.is_empty() {
// The implementation of get_selection_for_layout expects a 1:1 mapping of chars.
String::from(text)
} else {
String::from((*self.unsafe_get()).placeholder.borrow_for_layout().clone())
}
},<|fim▁hole|> #[allow(unsafe_code)]
unsafe fn get_size_for_layout(self) -> u32 {
(*self.unsafe_get()).size.get()
}
#[allow(unrooted_must_root)]
#[allow(unsafe_code)]
unsafe fn get_selection_for_layout(self) -> Option<Range<isize>> {
if !(*self.unsafe_get()).upcast::<Element>().get_focus_state() {
return None;
}
// Use the raw textinput to get the index as long as we use a 1:1 char mapping
// in get_value_for_layout.
let raw = match (*self.unsafe_get()).input_type.get() {
InputType::InputText |
InputType::InputPassword => get_raw_textinput_value(self),
_ => return None
};
let textinput = (*self.unsafe_get()).textinput.borrow_for_layout();
let selection = textinput.get_absolute_selection_range();
let begin_byte = selection.begin();
let begin = search_index(begin_byte, raw.char_indices());
let length = search_index(selection.length(), raw[begin_byte..].char_indices());
Some(Range::new(begin, length))
}
#[allow(unrooted_must_root)]
#[allow(unsafe_code)]
unsafe fn get_checked_state_for_layout(self) -> bool {
self.upcast::<Element>().get_state_for_layout().contains(IN_CHECKED_STATE)
}
#[allow(unrooted_must_root)]
#[allow(unsafe_code)]
unsafe fn get_indeterminate_state_for_layout(self) -> bool {
self.upcast::<Element>().get_state_for_layout().contains(IN_INDETERMINATE_STATE)
}
}
impl HTMLInputElementMethods for HTMLInputElement {
// https://html.spec.whatwg.org/multipage/#dom-fe-disabled
make_bool_getter!(Disabled, "disabled");
// https://html.spec.whatwg.org/multipage/#dom-fe-disabled
make_bool_setter!(SetDisabled, "disabled");
// https://html.spec.whatwg.org/multipage/#dom-fae-form
fn GetForm(&self) -> Option<Root<HTMLFormElement>> {
self.form_owner()
}
// https://html.spec.whatwg.org/multipage/#dom-input-defaultchecked
make_bool_getter!(DefaultChecked, "checked");
// https://html.spec.whatwg.org/multipage/#dom-input-defaultchecked
make_bool_setter!(SetDefaultChecked, "checked");
// https://html.spec.whatwg.org/multipage/#dom-input-checked
fn Checked(&self) -> bool {
self.upcast::<Element>().get_state().contains(IN_CHECKED_STATE)
}
// https://html.spec.whatwg.org/multipage/#dom-input-checked
fn SetChecked(&self, checked: bool) {
self.update_checked_state(checked, true);
}
// https://html.spec.whatwg.org/multipage/#dom-input-readonly
make_bool_getter!(ReadOnly, "readonly");
// https://html.spec.whatwg.org/multipage/#dom-input-readonly
make_bool_setter!(SetReadOnly, "readonly");
// https://html.spec.whatwg.org/multipage/#dom-input-size
make_uint_getter!(Size, "size", DEFAULT_INPUT_SIZE);
// https://html.spec.whatwg.org/multipage/#dom-input-size
make_limited_uint_setter!(SetSize, "size", DEFAULT_INPUT_SIZE);
// https://html.spec.whatwg.org/multipage/#dom-input-type
make_enumerated_getter!(Type,
"type",
"text",
("hidden") | ("search") | ("tel") |
("url") | ("email") | ("password") |
("datetime") | ("date") | ("month") |
("week") | ("time") | ("datetime-local") |
("number") | ("range") | ("color") |
("checkbox") | ("radio") | ("file") |
("submit") | ("image") | ("reset") | ("button"));
// https://html.spec.whatwg.org/multipage/#dom-input-type
make_atomic_setter!(SetType, "type");
// https://html.spec.whatwg.org/multipage/#dom-input-value
fn Value(&self) -> DOMString {
match self.get_value_mode() {
ValueMode::Value => self.textinput.borrow().get_content(),
ValueMode::Default => {
self.upcast::<Element>()
.get_attribute(&ns!(), &atom!("value"))
.map_or(DOMString::from(""),
|a| DOMString::from(a.summarize().value))
}
ValueMode::DefaultOn => {
self.upcast::<Element>()
.get_attribute(&ns!(), &atom!("value"))
.map_or(DOMString::from("on"),
|a| DOMString::from(a.summarize().value))
}
ValueMode::Filename => {
// TODO: return C:\fakepath\<first of selected files> when a file is selected
DOMString::from("")
}
}
}
// https://html.spec.whatwg.org/multipage/#dom-input-value
fn SetValue(&self, value: DOMString) -> ErrorResult {
match self.get_value_mode() {
ValueMode::Value => {
self.textinput.borrow_mut().set_content(value);
self.value_dirty.set(true);
}
ValueMode::Default |
ValueMode::DefaultOn => {
self.upcast::<Element>().set_string_attribute(&atom!("value"), value);
}
ValueMode::Filename => {
if value.is_empty() {
// TODO: empty list of selected files
} else {
return Err(Error::InvalidState);
}
}
}
self.value_changed.set(true);
self.force_relayout();
Ok(())
}
// https://html.spec.whatwg.org/multipage/#dom-input-defaultvalue
make_getter!(DefaultValue, "value");
// https://html.spec.whatwg.org/multipage/#dom-input-defaultvalue
make_setter!(SetDefaultValue, "value");
// https://html.spec.whatwg.org/multipage/#attr-fe-name
make_getter!(Name, "name");
// https://html.spec.whatwg.org/multipage/#attr-fe-name
make_atomic_setter!(SetName, "name");
// https://html.spec.whatwg.org/multipage/#attr-input-placeholder
make_getter!(Placeholder, "placeholder");
// https://html.spec.whatwg.org/multipage/#attr-input-placeholder
make_setter!(SetPlaceholder, "placeholder");
// https://html.spec.whatwg.org/multipage/#dom-input-formaction
make_url_or_base_getter!(FormAction, "formaction");
// https://html.spec.whatwg.org/multipage/#dom-input-formaction
make_setter!(SetFormAction, "formaction");
// https://html.spec.whatwg.org/multipage/#dom-input-formenctype
make_enumerated_getter!(FormEnctype,
"formenctype",
"application/x-www-form-urlencoded",
("text/plain") | ("multipart/form-data"));
// https://html.spec.whatwg.org/multipage/#dom-input-formenctype
make_setter!(SetFormEnctype, "formenctype");
// https://html.spec.whatwg.org/multipage/#dom-input-formmethod
make_enumerated_getter!(FormMethod, "formmethod", "get", ("post") | ("dialog"));
// https://html.spec.whatwg.org/multipage/#dom-input-formmethod
make_setter!(SetFormMethod, "formmethod");
// https://html.spec.whatwg.org/multipage/#dom-input-formtarget
make_getter!(FormTarget, "formtarget");
// https://html.spec.whatwg.org/multipage/#dom-input-formtarget
make_setter!(SetFormTarget, "formtarget");
// https://html.spec.whatwg.org/multipage/#attr-fs-formnovalidate
make_bool_getter!(FormNoValidate, "formnovalidate");
// https://html.spec.whatwg.org/multipage/#attr-fs-formnovalidate
make_bool_setter!(SetFormNoValidate, "formnovalidate");
// https://html.spec.whatwg.org/multipage/#dom-input-maxlength
make_int_getter!(MaxLength, "maxlength", DEFAULT_MAX_LENGTH);
// https://html.spec.whatwg.org/multipage/#dom-input-maxlength
make_limited_int_setter!(SetMaxLength, "maxlength", DEFAULT_MAX_LENGTH);
// https://html.spec.whatwg.org/multipage/#dom-input-indeterminate
fn Indeterminate(&self) -> bool {
self.upcast::<Element>().get_state().contains(IN_INDETERMINATE_STATE)
}
// https://html.spec.whatwg.org/multipage/#dom-input-indeterminate
fn SetIndeterminate(&self, val: bool) {
self.upcast::<Element>().set_state(IN_INDETERMINATE_STATE, val)
}
// https://html.spec.whatwg.org/multipage/#dom-lfe-labels
fn Labels(&self) -> Root<NodeList> {
if self.type_() == atom!("hidden") {
let window = window_from_node(self);
NodeList::empty(&window)
} else {
self.upcast::<HTMLElement>().labels()
}
}
// https://html.spec.whatwg.org/multipage/#dom-input-selectionstart
fn SelectionStart(&self) -> u32 {
let text_input = self.textinput.borrow();
let selection_start = match text_input.selection_begin {
Some(selection_begin_point) => {
text_input.get_absolute_point_for_text_point(&selection_begin_point)
},
None => text_input.get_absolute_insertion_point()
};
selection_start as u32
}
// https://html.spec.whatwg.org/multipage/#dom-textarea/input-selectionstart
fn SetSelectionStart(&self, start: u32) {
self.set_selection_range(start, self.SelectionEnd(), &self.selection_direction.get());
}
// https://html.spec.whatwg.org/multipage/#dom-textarea/input-selectionend
fn SelectionEnd(&self) -> u32 {
let text_input = self.textinput.borrow();
text_input.get_absolute_insertion_point() as u32
}
// https://html.spec.whatwg.org/multipage/#dom-textarea/input-selectionend
fn SetSelectionEnd(&self, end: u32) {
self.set_selection_range(self.SelectionStart(), end, &self.selection_direction.get());
}
// https://html.spec.whatwg.org/multipage/#dom-textarea/input-selectiondirection
fn SelectionDirection(&self) -> DOMString {
match self.selection_direction.get() {
SelectionDirection::Forward => DOMString::from("forward"),
SelectionDirection::Backward => DOMString::from("backward"),
SelectionDirection::None => DOMString::from("none"),
}
}
// https://html.spec.whatwg.org/multipage/#dom-textarea/input-selectiondirection
fn SetSelectionDirection(&self, direction: DOMString) {
self.SetSelectionRange(self.SelectionStart(), self.SelectionEnd(), Some(direction));
}
// https://html.spec.whatwg.org/multipage/#dom-textarea/input-setselectionrange
fn SetSelectionRange(&self, start: u32, end: u32, direction: Option<DOMString>) {
let selection_direction = match direction {
Some(selection_direction) => {
match &*selection_direction {
"forward" => SelectionDirection::Forward,
"backward" => SelectionDirection::Backward,
_ => SelectionDirection::None,
}
},
None => SelectionDirection::None,
};
self.set_selection_range(start, end, &selection_direction);
}
}
#[allow(unsafe_code)]
fn broadcast_radio_checked(broadcaster: &HTMLInputElement, group: Option<&Atom>) {
match group {
None | Some(&atom!("")) => {
// Radio input elements with a missing or empty name are alone in their
// own group.
return;
},
_ => {},
}
//TODO: if not in document, use root ancestor instead of document
let owner = broadcaster.form_owner();
let doc = document_from_node(broadcaster);
// This function is a workaround for lifetime constraint difficulties.
fn do_broadcast(doc_node: &Node, broadcaster: &HTMLInputElement,
owner: Option<&HTMLFormElement>, group: Option<&Atom>) {
let iter = doc_node.query_selector_iter(DOMString::from("input[type=radio]")).unwrap()
.filter_map(Root::downcast::<HTMLInputElement>)
.filter(|r| in_same_group(r.r(), owner, group) && broadcaster != r.r());
for ref r in iter {
if r.Checked() {
r.SetChecked(false);
}
}
}
do_broadcast(doc.upcast(), broadcaster, owner.r(), group)
}
// https://html.spec.whatwg.org/multipage/#radio-button-group
fn in_same_group(other: &HTMLInputElement, owner: Option<&HTMLFormElement>,
group: Option<&Atom>) -> bool {
other.input_type.get() == InputType::InputRadio &&
// TODO Both a and b are in the same home subtree.
other.form_owner().r() == owner &&
match (other.get_radio_group_name(), group) {
(Some(ref s1), Some(s2)) => compatibility_caseless_match_str(s1, s2) && s2 != &atom!(""),
_ => false
}
}
impl HTMLInputElement {
fn force_relayout(&self) {
let doc = document_from_node(self);
doc.content_changed(self.upcast(), NodeDamage::OtherNodeDamage)
}
fn radio_group_updated(&self, group: Option<&Atom>) {
if self.Checked() {
broadcast_radio_checked(self, group);
}
}
/// https://html.spec.whatwg.org/multipage/#constructing-the-form-data-set
/// Steps range from 3.1 to 3.7 which related to the HTMLInputElement
pub fn get_form_datum(&self, submitter: Option<FormSubmitter>) -> Option<FormDatum> {
// Step 3.2
let ty = self.type_();
// Step 3.4
let name = self.Name();
let is_submitter = match submitter {
Some(FormSubmitter::InputElement(s)) => {
self == s
},
_ => false
};
match ty {
// Step 3.1: it's a button but it is not submitter.
atom!("submit") | atom!("button") | atom!("reset") if !is_submitter => return None,
// Step 3.1: it's the "Checkbox" or "Radio Button" and whose checkedness is false.
atom!("radio") | atom!("checkbox") => if !self.Checked() || name.is_empty() {
return None;
},
atom!("image") | atom!("file") => return None, // Unimplemented
// Step 3.1: it's not the "Image Button" and doesn't have a name attribute.
_ => if name.is_empty() {
return None;
}
}
// Step 3.6
Some(FormDatum {
ty: DOMString::from(&*ty), // FIXME(ajeffrey): Convert directly from Atoms to DOMStrings
name: name,
value: self.Value()
})
}
// https://html.spec.whatwg.org/multipage/#radio-button-group
fn get_radio_group_name(&self) -> Option<Atom> {
//TODO: determine form owner
self.upcast::<Element>()
.get_attribute(&ns!(), &atom!("name"))
.map(|name| name.value().as_atom().clone())
}
fn update_checked_state(&self, checked: bool, dirty: bool) {
self.upcast::<Element>().set_state(IN_CHECKED_STATE, checked);
if dirty {
self.checked_changed.set(true);
}
if self.input_type.get() == InputType::InputRadio && checked {
broadcast_radio_checked(self,
self.get_radio_group_name().as_ref());
}
self.force_relayout();
//TODO: dispatch change event
}
pub fn get_indeterminate_state(&self) -> bool {
self.Indeterminate()
}
// https://html.spec.whatwg.org/multipage/#concept-fe-mutable
fn mutable(&self) -> bool {
// https://html.spec.whatwg.org/multipage/#the-input-element:concept-fe-mutable
// https://html.spec.whatwg.org/multipage/#the-readonly-attribute:concept-fe-mutable
!(self.upcast::<Element>().get_disabled_state() || self.ReadOnly())
}
// https://html.spec.whatwg.org/multipage/#the-input-element:concept-form-reset-control
pub fn reset(&self) {
match self.input_type.get() {
InputType::InputRadio | InputType::InputCheckbox => {
self.update_checked_state(self.DefaultChecked(), false);
self.checked_changed.set(false);
},
InputType::InputImage => (),
_ => ()
}
self.SetValue(self.DefaultValue())
.expect("Failed to reset input value to default.");
self.value_dirty.set(false);
self.value_changed.set(false);
self.force_relayout();
}
}
impl VirtualMethods for HTMLInputElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn attribute_mutated(&self, attr: &Attr, mutation: AttributeMutation) {
self.super_type().unwrap().attribute_mutated(attr, mutation);
match attr.local_name() {
&atom!("disabled") => {
let disabled_state = match mutation {
AttributeMutation::Set(None) => true,
AttributeMutation::Set(Some(_)) => {
// Input was already disabled before.
return;
},
AttributeMutation::Removed => false,
};
let el = self.upcast::<Element>();
el.set_disabled_state(disabled_state);
el.set_enabled_state(!disabled_state);
el.check_ancestors_disabled_state_for_form_control();
},
&atom!("checked") if !self.checked_changed.get() => {
let checked_state = match mutation {
AttributeMutation::Set(None) => true,
AttributeMutation::Set(Some(_)) => {
// Input was already checked before.
return;
},
AttributeMutation::Removed => false,
};
self.update_checked_state(checked_state, false);
},
&atom!("size") => {
let size = mutation.new_value(attr).map(|value| {
value.as_uint()
});
self.size.set(size.unwrap_or(DEFAULT_INPUT_SIZE));
}
&atom!("type") => {
match mutation {
AttributeMutation::Set(_) => {
let new_type = match attr.value().as_atom() {
&atom!("button") => InputType::InputButton,
&atom!("submit") => InputType::InputSubmit,
&atom!("reset") => InputType::InputReset,
&atom!("file") => InputType::InputFile,
&atom!("radio") => InputType::InputRadio,
&atom!("checkbox") => InputType::InputCheckbox,
&atom!("password") => InputType::InputPassword,
_ => InputType::InputText,
};
// https://html.spec.whatwg.org/multipage/#input-type-change
let (old_value_mode, old_idl_value) = (self.get_value_mode(), self.Value());
self.input_type.set(new_type);
let new_value_mode = self.get_value_mode();
match (&old_value_mode, old_idl_value.is_empty(), new_value_mode) {
// Step 1
(&ValueMode::Value, false, ValueMode::Default) |
(&ValueMode::Value, false, ValueMode::DefaultOn) => {
self.SetValue(old_idl_value)
.expect("Failed to set input value on type change to a default ValueMode.");
}
// Step 2
(_, _, ValueMode::Value) if old_value_mode != ValueMode::Value => {
self.SetValue(self.upcast::<Element>()
.get_attribute(&ns!(), &atom!("value"))
.map_or(DOMString::from(""),
|a| DOMString::from(a.summarize().value)))
.expect("Failed to set input value on type change to ValueMode::Value.");
self.value_dirty.set(false);
}
// Step 3
(_, _, ValueMode::Filename) if old_value_mode != ValueMode::Filename => {
self.SetValue(DOMString::from(""))
.expect("Failed to set input value on type change to ValueMode::Filename.");
}
_ => {}
}
// Step 5
if new_type == InputType::InputRadio {
self.radio_group_updated(
self.get_radio_group_name().as_ref());
}
// TODO: Step 6 - value sanitization
},
AttributeMutation::Removed => {
if self.input_type.get() == InputType::InputRadio {
broadcast_radio_checked(
self,
self.get_radio_group_name().as_ref());
}
self.input_type.set(InputType::InputText);
}
}
},
&atom!("value") if !self.value_changed.get() => {
let value = mutation.new_value(attr).map(|value| (**value).to_owned());
self.textinput.borrow_mut().set_content(
value.map_or(DOMString::new(), DOMString::from));
},
&atom!("name") if self.input_type.get() == InputType::InputRadio => {
self.radio_group_updated(
mutation.new_value(attr).as_ref().map(|name| name.as_atom()));
},
&atom!("maxlength") => {
match *attr.value() {
AttrValue::Int(_, value) => {
if value < 0 {
self.textinput.borrow_mut().max_length = None
} else {
self.textinput.borrow_mut().max_length = Some(value as usize)
}
},
_ => panic!("Expected an AttrValue::Int"),
}
}
&atom!("placeholder") => {
// FIXME(ajeffrey): Should we do in-place mutation of the placeholder?
let mut placeholder = self.placeholder.borrow_mut();
placeholder.clear();
if let AttributeMutation::Set(_) = mutation {
placeholder.extend(
attr.value().chars().filter(|&c| c != '\n' && c != '\r'));
}
},
_ => {},
}
}
fn parse_plain_attribute(&self, name: &Atom, value: DOMString) -> AttrValue {
match name {
&atom!("name") => AttrValue::from_atomic(value),
&atom!("size") => AttrValue::from_limited_u32(value, DEFAULT_INPUT_SIZE),
&atom!("type") => AttrValue::from_atomic(value),
&atom!("maxlength") => AttrValue::from_limited_i32(value, DEFAULT_MAX_LENGTH),
_ => self.super_type().unwrap().parse_plain_attribute(name, value),
}
}
fn bind_to_tree(&self, tree_in_doc: bool) {
if let Some(ref s) = self.super_type() {
s.bind_to_tree(tree_in_doc);
}
self.upcast::<Element>().check_ancestors_disabled_state_for_form_control();
}
fn unbind_from_tree(&self, context: &UnbindContext) {
self.super_type().unwrap().unbind_from_tree(context);
let node = self.upcast::<Node>();
let el = self.upcast::<Element>();
if node.ancestors().any(|ancestor| ancestor.is::<HTMLFieldSetElement>()) {
el.check_ancestors_disabled_state_for_form_control();
} else {
el.check_disabled_attribute();
}
}
fn handle_event(&self, event: &Event) {
if let Some(s) = self.super_type() {
s.handle_event(event);
}
if event.type_() == atom!("click") && !event.DefaultPrevented() {
// TODO: Dispatch events for non activatable inputs
// https://html.spec.whatwg.org/multipage/#common-input-element-events
//TODO: set the editing position for text inputs
document_from_node(self).request_focus(self.upcast());
} else if event.type_() == atom!("keydown") && !event.DefaultPrevented() &&
(self.input_type.get() == InputType::InputText ||
self.input_type.get() == InputType::InputPassword) {
if let Some(keyevent) = event.downcast::<KeyboardEvent>() {
// This can't be inlined, as holding on to textinput.borrow_mut()
// during self.implicit_submission will cause a panic.
let action = self.textinput.borrow_mut().handle_keydown(keyevent);
match action {
TriggerDefaultAction => {
self.implicit_submission(keyevent.CtrlKey(),
keyevent.ShiftKey(),
keyevent.AltKey(),
keyevent.MetaKey());
},
DispatchInput => {
self.value_changed.set(true);
if event.IsTrusted() {
ChangeEventRunnable::send(self.upcast::<Node>());
}
self.force_relayout();
event.PreventDefault();
}
RedrawSelection => {
self.force_relayout();
event.PreventDefault();
}
Nothing => (),
}
}
}
}
}
impl FormControl for HTMLInputElement {}
impl Validatable for HTMLInputElement {}
impl Activatable for HTMLInputElement {
fn as_element(&self) -> &Element {
self.upcast()
}
fn is_instance_activatable(&self) -> bool {
match self.input_type.get() {
// https://html.spec.whatwg.org/multipage/#submit-button-state-%28type=submit%29:activation-behaviour-2
// https://html.spec.whatwg.org/multipage/#reset-button-state-%28type=reset%29:activation-behaviour-2
// https://html.spec.whatwg.org/multipage/#checkbox-state-%28type=checkbox%29:activation-behaviour-2
// https://html.spec.whatwg.org/multipage/#radio-button-state-%28type=radio%29:activation-behaviour-2
InputType::InputSubmit | InputType::InputReset
| InputType::InputCheckbox | InputType::InputRadio => self.mutable(),
_ => false
}
}
// https://html.spec.whatwg.org/multipage/#run-pre-click-activation-steps
#[allow(unsafe_code)]
fn pre_click_activation(&self) {
let mut cache = self.activation_state.borrow_mut();
let ty = self.input_type.get();
cache.old_type = ty;
cache.was_mutable = self.mutable();
if cache.was_mutable {
match ty {
// https://html.spec.whatwg.org/multipage/#submit-button-state-(type=submit):activation-behavior
// InputType::InputSubmit => (), // No behavior defined
// https://html.spec.whatwg.org/multipage/#reset-button-state-(type=reset):activation-behavior
// InputType::InputSubmit => (), // No behavior defined
InputType::InputCheckbox => {
/*
https://html.spec.whatwg.org/multipage/#checkbox-state-(type=checkbox):pre-click-activation-steps
cache current values of `checked` and `indeterminate`
we may need to restore them later
*/
cache.indeterminate = self.Indeterminate();
cache.checked = self.Checked();
cache.checked_changed = self.checked_changed.get();
self.SetIndeterminate(false);
self.SetChecked(!cache.checked);
},
// https://html.spec.whatwg.org/multipage/#radio-button-state-(type=radio):pre-click-activation-steps
InputType::InputRadio => {
//TODO: if not in document, use root ancestor instead of document
let owner = self.form_owner();
let doc = document_from_node(self);
let doc_node = doc.upcast::<Node>();
let group = self.get_radio_group_name();;
// Safe since we only manipulate the DOM tree after finding an element
let checked_member = doc_node.query_selector_iter(DOMString::from("input[type=radio]"))
.unwrap()
.filter_map(Root::downcast::<HTMLInputElement>)
.find(|r| {
in_same_group(r.r(), owner.r(), group.as_ref()) &&
r.Checked()
});
cache.checked_radio = checked_member.r().map(JS::from_ref);
cache.checked_changed = self.checked_changed.get();
self.SetChecked(true);
}
_ => ()
}
}
}
// https://html.spec.whatwg.org/multipage/#run-canceled-activation-steps
fn canceled_activation(&self) {
let cache = self.activation_state.borrow();
let ty = self.input_type.get();
if cache.old_type != ty {
// Type changed, abandon ship
// https://www.w3.org/Bugs/Public/show_bug.cgi?id=27414
return;
}
match ty {
// https://html.spec.whatwg.org/multipage/#submit-button-state-(type=submit):activation-behavior
// InputType::InputSubmit => (), // No behavior defined
// https://html.spec.whatwg.org/multipage/#reset-button-state-(type=reset):activation-behavior
// InputType::InputReset => (), // No behavior defined
// https://html.spec.whatwg.org/multipage/#checkbox-state-(type=checkbox):canceled-activation-steps
InputType::InputCheckbox => {
// We want to restore state only if the element had been changed in the first place
if cache.was_mutable {
self.SetIndeterminate(cache.indeterminate);
self.SetChecked(cache.checked);
self.checked_changed.set(cache.checked_changed);
}
},
// https://html.spec.whatwg.org/multipage/#radio-button-state-(type=radio):canceled-activation-steps
InputType::InputRadio => {
// We want to restore state only if the element had been changed in the first place
if cache.was_mutable {
let name = self.get_radio_group_name();
match cache.checked_radio.r() {
Some(o) => {
// Avoiding iterating through the whole tree here, instead
// we can check if the conditions for radio group siblings apply
if name == o.get_radio_group_name() && // TODO should be compatibility caseless
self.form_owner() == o.form_owner() &&
// TODO Both a and b are in the same home subtree
o.input_type.get() == InputType::InputRadio {
o.SetChecked(true);
} else {
self.SetChecked(false);
}
},
None => self.SetChecked(false)
};
self.checked_changed.set(cache.checked_changed);
}
}
_ => ()
}
}
// https://html.spec.whatwg.org/multipage/#run-post-click-activation-steps
fn activation_behavior(&self, _event: &Event, _target: &EventTarget) {
let ty = self.input_type.get();
if self.activation_state.borrow().old_type != ty {
// Type changed, abandon ship
// https://www.w3.org/Bugs/Public/show_bug.cgi?id=27414
return;
}
match ty {
InputType::InputSubmit => {
// https://html.spec.whatwg.org/multipage/#submit-button-state-(type=submit):activation-behavior
// FIXME (Manishearth): support document owners (needs ability to get parent browsing context)
if self.mutable() /* and document owner is fully active */ {
self.form_owner().map(|o| {
o.submit(SubmittedFrom::NotFromFormSubmitMethod,
FormSubmitter::InputElement(self.clone()))
});
}
},
InputType::InputReset => {
// https://html.spec.whatwg.org/multipage/#reset-button-state-(type=reset):activation-behavior
// FIXME (Manishearth): support document owners (needs ability to get parent browsing context)
if self.mutable() /* and document owner is fully active */ {
self.form_owner().map(|o| {
o.reset(ResetFrom::NotFromFormResetMethod)
});
}
},
InputType::InputCheckbox | InputType::InputRadio => {
// https://html.spec.whatwg.org/multipage/#checkbox-state-(type=checkbox):activation-behavior
// https://html.spec.whatwg.org/multipage/#radio-button-state-(type=radio):activation-behavior
if self.mutable() {
let target = self.upcast::<EventTarget>();
target.fire_event("input",
EventBubbles::Bubbles,
EventCancelable::NotCancelable);
target.fire_event("change",
EventBubbles::Bubbles,
EventCancelable::NotCancelable);
}
},
_ => ()
}
}
// https://html.spec.whatwg.org/multipage/#implicit-submission
#[allow(unsafe_code)]
fn implicit_submission(&self, ctrlKey: bool, shiftKey: bool, altKey: bool, metaKey: bool) {
let doc = document_from_node(self);
let node = doc.upcast::<Node>();
let owner = self.form_owner();
let form = match owner {
None => return,
Some(ref f) => f
};
if self.upcast::<Element>().click_in_progress() {
return;
}
let submit_button;
submit_button = node.query_selector_iter(DOMString::from("input[type=submit]")).unwrap()
.filter_map(Root::downcast::<HTMLInputElement>)
.find(|r| r.form_owner() == owner);
match submit_button {
Some(ref button) => {
if button.is_instance_activatable() {
synthetic_click_activation(button.as_element(),
ctrlKey,
shiftKey,
altKey,
metaKey,
ActivationSource::NotFromClick)
}
}
None => {
let inputs = node.query_selector_iter(DOMString::from("input")).unwrap()
.filter_map(Root::downcast::<HTMLInputElement>)
.filter(|input| {
input.form_owner() == owner && match input.type_() {
atom!("text") | atom!("search") | atom!("url") | atom!("tel") |
atom!("email") | atom!("password") | atom!("datetime") |
atom!("date") | atom!("month") | atom!("week") | atom!("time") |
atom!("datetime-local") | atom!("number")
=> true,
_ => false
}
});
if inputs.skip(1).next().is_some() {
// lazily test for > 1 submission-blocking inputs
return;
}
form.submit(SubmittedFrom::NotFromFormSubmitMethod,
FormSubmitter::FormElement(form.r()));
}
}
}
}
pub struct ChangeEventRunnable {
element: Trusted<Node>,
}
impl ChangeEventRunnable {
pub fn send(node: &Node) {
let window = window_from_node(node);
let window = window.r();
let chan = window.user_interaction_task_source();
let handler = Trusted::new(node, chan.clone());
let dispatcher = ChangeEventRunnable {
element: handler,
};
let _ = chan.send(CommonScriptMsg::RunnableMsg(InputEvent, box dispatcher));
}
}
impl Runnable for ChangeEventRunnable {
fn handler(self: Box<ChangeEventRunnable>) {
let target = self.element.root();
let window = window_from_node(target.r());
let window = window.r();
let event = Event::new(GlobalRef::Window(window),
atom!("input"),
EventBubbles::Bubbles,
EventCancelable::NotCancelable);
target.upcast::<EventTarget>().dispatch_event(&event);
}
}<|fim▁end|> | }
}
#[allow(unrooted_must_root)] |
<|file_name|>checks_test.go<|end_file_name|><|fim▁begin|>/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package preflight
import (
"bytes"
"fmt"
"io/ioutil"
"strings"
"testing"
"github.com/lithammer/dedent"
"github.com/pkg/errors"
"net/http"
"os"
"k8s.io/apimachinery/pkg/util/sets"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
utilruntime "k8s.io/kubernetes/cmd/kubeadm/app/util/runtime"
"k8s.io/utils/exec"
fakeexec "k8s.io/utils/exec/testing"
)
var (
externalEtcdRootCAFileContent = dedent.Dedent(`
-----BEGIN CERTIFICATE-----
MIIFrjCCA5agAwIBAgIUJAM5bQz/Ann8qye8T7Uyl+cAt3wwDQYJKoZIhvcNAQEN
BQAwbzEOMAwGA1UEBhMFQ2hpbmExDzANBgNVBAgTBkhhaW5hbjEOMAwGA1UEBxMF
U2FueWExDTALBgNVBAoTBGV0Y2QxFjAUBgNVBAsTDWV0Y2Qgc2VjdXJpdHkxFTAT
BgNVBAMTDGV0Y2Qtcm9vdC1jYTAeFw0xNzAyMjIwNzEyMDBaFw0yMjAyMjEwNzEy
MDBaMG8xDjAMBgNVBAYTBUNoaW5hMQ8wDQYDVQQIEwZIYWluYW4xDjAMBgNVBAcT
BVNhbnlhMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIHNlY3VyaXR5MRUw
EwYDVQQDEwxldGNkLXJvb3QtY2EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
AoICAQDD16VNTwvEvy1yd/vt8Eq2NwTw51mKHGYlZwsDqdqMEnEiWoJ7Iv9HZ+cl
jX0FnahKnaV76j3xPO73L5WOvRYxnZ8MvU/aBdDO+Tct4ht3m7TJaav6s55otjDy
dQNmlpBt4fFEB/nDozQaocfu2mqr5nyKJOjJpe+57Uw4h0LshreDOlzHEs8CkP6W
/B9yGFARVyz84YgVtemUX8WTB3cVU49KEYMCuhqXY8s97xSTGT/4Tq/MruKb2V+w
uUPjvyO5eIUcWetjBhgEGsS37NrsSFhoUNMp/PtIkth0LQoWb9sjnG069KIQqm61
1PKxH7jgLYLf4q455iAuTFr0lF1OcmICTeJB+GiS+3ubOb1TH3AYICXvQUniNWJx
sDz3qUUu4GLHk9wHtdNmX2FXYB8kHMZAidDM4Zw3IhZZap6n6BlGVVBV5h8sNM3t
SB+pDLuAaZLx3/ah2ds6AwkfaMdYDsE/MWcWQqzBfhOp758Mx3dF16IY+6IQp0RS
8qGKxgLDnTF9LgyHVOait2N/pT54faf8//ShSqTqzTK1wzHCkYwL6/B259zXWxeX
z4gOpQOk4rO4pgm/65QW9aKzHoQnpQ7lFQL2cdsKJv2tyC7pDfVrFy2uHWaUibbP
7pDw3OD8MQwR1TuhflK1AIicpMQe/kTAuRwH4fneeaGVdddBQQIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUtoqcReNJ
p8z8Hz1/Q7XMK2fgi74wDQYJKoZIhvcNAQENBQADggIBADbh4HB//Gb0TUUEPoSw
VMJSUK1pb6KVTqAITSCKPwGT8KfCvVpUxEjh9J3dm1L8wbdr48yffdjhdl96cx2F
aGWdUIxRBIcpt5xvauBoj0OwfNcD5B9q1aKuh5XPNu4BndNeGw51vdJ8bJbtrZa8
wKWF/PHciCo/wlzE/YgsemHeY5bYeXawXVP/+ocoLH82Fb8Aq0Af3ZABiA6fmawz
FiZlnIrZnHVJYSap4yDhC/AQECXKY5gj7kjSnDebsIYds5OrW0D3LeRzs+q5nQXE
xR35qg834kxUULS8AywqmR3+zjfeymm2FtsjT/PuzEImA80y29qpLZIpPg0meKHF
pCMJkEHaRh4/JAinLaKCGLpnchqBy7CR6yvVnGkx93J0louIbVyUfn63R6mxCvd7
kL16a2xBMKgV4RDFcu+VYjbJTFdWOTGFrxPBmd/rLdwD3XNiwPtI0vXGM7I35DDP
SWwKVvR97F3uEnIQ1u8vHa1pNfQ1qSf/+hUJx2D9ypr7LTQ0LpLh1vUeTeUAVHmT
EEpcqzDg6lsqXw6KHJ55kd3QR/hRXd/Vr6EWUawDEnGjxyFVV2dTBbunsbSobNI4
eKV+60oCk3NMwrZoLw4Fv5qs2saS62dgJNfxbKqBX9ljSQxGzHjRwh+hVByCnG8m
Z9JkQayesM6D7uwbQJXd5rgy
-----END CERTIFICATE-----
`)
externalEtcdCertFileContent = dedent.Dedent(`
-----BEGIN CERTIFICATE-----
MIIGEjCCA/qgAwIBAgIURHJFslbPveA1WwQ4FaPJg1x6B8YwDQYJKoZIhvcNAQEN
BQAwbzEOMAwGA1UEBhMFQ2hpbmExDzANBgNVBAgTBkhhaW5hbjEOMAwGA1UEBxMF
U2FueWExDTALBgNVBAoTBGV0Y2QxFjAUBgNVBAsTDWV0Y2Qgc2VjdXJpdHkxFTAT
BgNVBAMTDGV0Y2Qtcm9vdC1jYTAeFw0xNzAyMjIwNzE0MDBaFw0yNzAyMjAwNzE0
MDBaMGwxDjAMBgNVBAYTBUNoaW5hMQ8wDQYDVQQIEwZIYWluYW4xDjAMBgNVBAcT
BVNhbnlhMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIHNlY3VyaXR5MRIw
EAYDVQQDEwlteS1ldGNkLTEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
AQCmCR4OSRrUCES90sUbj5tvjF24lPCMj7qP9MBUxcVvWfaJM12o4AxqBr8OThgd
lpNvlbKmRpfvbraXiDnuGty1vPa3z7RmKbwFgENfgKHz4fUw/MQ7CALOQ5PAvgf1
rQ6Ii4cr49nWctpQmBXHtZRjvquBYnw70KrWfQ121DwPYy7cb/StuHLsTgqsgzhl
ECILWCj9GNqcGQr5+ZvwUxa2yam2CS1M+PLbB6HxX/4RBBTWKAt8+kjt6TxxMaSE
bNDHNDLWzQSpxg5qTLOQtrubFD4O3JT2E8DEj+LvXJKH7pJd1Z+r0m3ymQvBAIXr
6OJs+sHbaaxKWS35k9m88NRojR+r5KPoEcBgxhtBtXUfMS5v5dTtcNsHl/mHmTC+
gWiqpzA+tF55uUEWhRoA+pN7Ie2PviRhG43t99l7bsHVnrxZQqWsWlvCxMN1c2+7
PRwhsYZFITyKcMSvd19Nb5HGc5hT7btZlWc2xKS2YNnDXbD8C5SdxZek5Cb/xRxL
T8taf2c1bHs8sZrzIK2DCGvaN3471WEnmaCuRWr2fqyJeCPwsvvWeNDVmgPP6v7g
ncyy+4QyyfNrdURTZFyw81ZbCiznPc070u7vtIYt3Sa0NXd0oEG1ybAZwBIYhMOY
5ctepJLf7QxHXR70RdI0ksHEmZGZ1igk7gzhmHEgQM87pQIDAQABo4GoMIGlMA4G
A1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYD
VR0TAQH/BAIwADAdBgNVHQ4EFgQU0U/Zn4mc95UXm+LVO67wqJpL9gIwHwYDVR0j
BBgwFoAUtoqcReNJp8z8Hz1/Q7XMK2fgi74wJgYDVR0RBB8wHYIJbG9jYWxob3N0
hwR/AAABhwQKcjPGhwQKcgwwMA0GCSqGSIb3DQEBDQUAA4ICAQCikW5SNpndBxEz
qblER72KkfSEXMFhQry3RZJeAw6rQiOl+PMJqMnylcepOAUrNi20emS270dQDh3z
Hw/JBgKftZ1JrjbF9NF4oFUZcFUKmTgyWYnhLH0BskgwJf2u+DpugFa4U8niQf15
ciZGoUfWCGOJbgVP7esdnyhH/P/DpOEObWf8vOfvfQ49r7MzATyzMESyJjdtAH/F
c5JKACxpJhaYfTZ78F43jSw0vswBdLQ7fJWqg/sJBlTG0GBFJcEJzFVpwzYUxwZ4
rUpAn4A02M2V9XDNlptrWvcQz/5Vs/aCmehz7GOiMJB6SLWcMSpJRLMqoJjaFVfO
OPm7bWMMaVOUPedzvcBKRXmEAg7HQnm3ibkVNjTW8Hr66n34Yk/dO9WXD+6IXnOQ
bMY+Mf9vpIsscSpGTO15sAKqiXCzHR9RWqNd4U3jvo3JtewkNMhIKzPThgYNfsO3
7HSrlfffeEQKc59rDUaC3Y9YSc5ERJRMC+mdOqXNMy2iedZnNEsmgYlaVDg6xfG8
65w9UkMOe+DTJtMHnMxP4rT6WE4cKysQeSYxkyo/jh+8rKEy9+AyuEntJAknABUc
N5mizdYu8nrtiSu9jdLKhwO41gC2IlXPUHizylo6g24RFVBjHLlzYAAsVMMMSQW1
XRMVQjawUTknbAgHuE7/rEX8c27WUA==
-----END CERTIFICATE-----
`)
externalEtcdKeyFileContent = dedent.Dedent(`
-----BEGIN RSA PRIVATE KEY-----
MIIJKAIBAAKCAgEApgkeDkka1AhEvdLFG4+bb4xduJTwjI+6j/TAVMXFb1n2iTNd
qOAMaga/Dk4YHZaTb5WypkaX7262l4g57hrctbz2t8+0Zim8BYBDX4Ch8+H1MPzE
OwgCzkOTwL4H9a0OiIuHK+PZ1nLaUJgVx7WUY76rgWJ8O9Cq1n0NdtQ8D2Mu3G/0
rbhy7E4KrIM4ZRAiC1go/RjanBkK+fmb8FMWtsmptgktTPjy2weh8V/+EQQU1igL
fPpI7ek8cTGkhGzQxzQy1s0EqcYOakyzkLa7mxQ+DtyU9hPAxI/i71ySh+6SXdWf
q9Jt8pkLwQCF6+jibPrB22msSlkt+ZPZvPDUaI0fq+Sj6BHAYMYbQbV1HzEub+XU
7XDbB5f5h5kwvoFoqqcwPrReeblBFoUaAPqTeyHtj74kYRuN7ffZe27B1Z68WUKl
rFpbwsTDdXNvuz0cIbGGRSE8inDEr3dfTW+RxnOYU+27WZVnNsSktmDZw12w/AuU
ncWXpOQm/8UcS0/LWn9nNWx7PLGa8yCtgwhr2jd+O9VhJ5mgrkVq9n6siXgj8LL7
1njQ1ZoDz+r+4J3MsvuEMsnza3VEU2RcsPNWWwos5z3NO9Lu77SGLd0mtDV3dKBB
tcmwGcASGITDmOXLXqSS3+0MR10e9EXSNJLBxJmRmdYoJO4M4ZhxIEDPO6UCAwEA
AQKCAgEAmr3OlDPP3CLkpiFEcJ5TmA+y3S96TRY7IqVRhvBXRKMMoOwNczF0gHBP
Ka7gzNqkCA/1UwBh49VEOU/N5bqFTp+RNNhQYhKtWFck82H4Dkrd8EzzOa0KqF/U
2YKB+pbR/7JCRUZypGmgTBKh4eG6LYfrYYd/D2Q3g/VCUigU3aZrayiwWiOYf+Fw
Ez2slowFnpsIgHHkdCzmzPi0O7PEbJDgKXa+EInIFRg09renGwa5wKnLoyvEQm7o
VPqWQJEFt1JPu1+R5ARhNPLNO6cCi9K+z60G65yXQNp0/u5A5o0TPn609DcHH11B
1ht9tNL0C+tcNvhyiUw6C+uet3egDVu1TqptzAfb2Y3MQK6UV/by7KJxcFxBAzWl
UQ4zDaQzCcU81T92zI+XeRSJuCPuOL61mH7zEiPZZPOLV8MbxBX/7lj+IJTBL+vJ
Idq7Nn/+LRtuSy5PH2MzZ5DzIMmjkjQ/ScpzAr9Zpkm3dpTcGTpFV0uqHseE77Re
55tz9uB7pxV1n6Gz4uMNnsioEYsFIRfzst4QWDdaQqcYJQuKvW9pXNmgRgSCIlft
54DxQ98a1PVFmS40TT9mjUg0P66m+8bk5vEb58iAjoYJRcoriZhlT6cOcuPW6hos
3PfA2gMXuWu61mAjzdP0zbzNBXCn5nRppqLNmWMVZCI0nLjmyZUCggEBAMEpCQu9
cRWc/GjvmnfXHewvqQHu3A3J1HCLR0VqJo8rcIIvhSe7dPRAMtUFxV1R2eOfMvSZ
Y4y69tMHZPVTgnp2t5TSavjpMqSQLvXyBkgL8FnGEl5l6HEQTm8y0C13Cm+CUB5a
uxQnQflkX539SjWX0XdOmYuLORmrKGxgcDOd9652fDJcFSXYa0mx6KN2JZHh9psA
9ldHhUIq1ngoVnrctlK53MptckPrFwMFdXRCKiMfkvpUkXTeXu4D7Z1VNh2V/3gF
lmRNioXaxp7W8omBSQlwaHY5btPj5jktiC9/so4ORqJjHvbCURrIvdkPPaXi/YJy
HdoOgHYFnn3p6M8CggEBANwNDtdbHWwwVC7Op6TNc8qK+SWAId5RqPOmM70XBVvg
u9nxT7a5vmRTs81fcVoxtE0t+KWIfOXquxqTbk0ONqIsl2CLTiTFaNHoHlvwgFBT
aYukORiGILIzOJr82RPugAw1+j8jmw3OsCOXnf2odGs+oC/V9vEd9NyZpDHPohtK
a8Bk8p326mQam23ArUesIqnw31fG22KRpoLXuk/9nNcAAAZd1Qd9hGWf0HHxunXB
wj6e3VTm0G4NPTli5vmVavYRPMFUUJpU5lwTHhlrHTSmANHTjZGnn0mEOfIrfodF
ODwJjwoyq4rPls0fqOvyAyBCnhop4fC8yOd4cQcLSUsCggEAbv9Br3lhLmZTtYla
XltDWqHYoL+9vD6q0TF39y+UkNkJggYEolxaTLFHhJoYXBPY/bBR+7TZO9mEVKf/
H+qpI+5seByiU/7NlzszgSle6q/RogTsMUqmU7JnIAc3EalCWemsWIUS0/XrN4Cy
YXtX1Yw0VjbYjROn8FQmmoCgeUjhN2Pm4pl/nYvLu0F8ydHurPIIX/IhnO4AaZFs
RQgJCfki3E7pzXkvHFBPnPDaGcCbritKrodCPsI6EtQ3Cx4YRtAXScUMMv9MBrc9
Q7GJFfMxITdzD9zZDvH7Lgg4JfNfi7owZMhI1su7B4UrczwK1PSncPpapR+IOkno
VbrAiQKCAQB2xGV6PqdGuV72VHuPK4SPkSqf3uRoxdJWjyHlsQMnb8hz/RZ1HRNx
uuuUsSrQ73rNHT7SuTQQM/0AfwpNdJpwNXkOlqF6n0HP6WRZYxkeQab5w409e0cy
ZwrqPAY+B7/81zVV1rXdYe0XiMGxIraTG54Bs44w3WZHmnVQnSx1Zll54gJA1//y
P5ocRp4/zNx4tJUXHzFRpiMlA6J/gfag5FMfHI3aGRjYcMVken+VBxr8CWqUZG+i
tmqRCpx3oPm2Dd+oyQUoByK+F2NrfLCqtd5DYddLAhmq6D8OQgNspyOO4+ncKzUD
Gr/dvnTBxEGDq/EBVhGoiXw10n/OuXy5AoIBAAUAoTyt4gQjjC0ddtMLN7+R1Ymp
eNULpq2XTvidj7jaysIW9Q52ncvN6h2Vds/Z3Ujvdne2jMq7Q/C96fKcrhgMH9ca
ADGLWtD+VkP4NgFjj7R2jabF8d9IQdJDXAgvR/kokojF0RsJuvD2hawN6lQkkj6S
fNNGMBk4sGyt7gzAn3iO4Zoy+QjtALNnZcaH6s7oIg3UKf6OwskiBB60Q5P1U3/E
RPtTxhex3jFuySNJ413JgyGkvcP+qjuzi6eyVDxkfiyNohQYGuZ8rieFX7QfQFAY
TIXptchVUTxmGKWzcpLC3AfkwFvV2IPoMk8YnDSp270D30cqWiI9puSEcxQ=
-----END RSA PRIVATE KEY-----
`)
)
type preflightCheckTest struct {
msg string
}
func (pfct preflightCheckTest) Name() string {
return "preflightCheckTest"
}
func (pfct preflightCheckTest) Check() (warning, errorList []error) {
if pfct.msg == "warning" {
return []error{errors.New("warning")}, nil
}
if pfct.msg != "" {
return nil, []error{errors.New("fake error")}
}
return
}
func TestRunInitNodeChecks(t *testing.T) {
var tests = []struct {
name string
cfg *kubeadmapi.InitConfiguration
expected bool
isSecondaryControlPlane bool
downloadCerts bool
}{
{name: "Test valid advertised address",
cfg: &kubeadmapi.InitConfiguration{
LocalAPIEndpoint: kubeadmapi.APIEndpoint{AdvertiseAddress: "foo"},
},
expected: false,
},
{
name: "Test CA file exists if specified",
cfg: &kubeadmapi.InitConfiguration{
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
Etcd: kubeadmapi.Etcd{External: &kubeadmapi.ExternalEtcd{CAFile: "/foo"}},
},
},
expected: false,
},
{
name: "Skip test CA file exists if specified/download certs",
cfg: &kubeadmapi.InitConfiguration{
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
Etcd: kubeadmapi.Etcd{External: &kubeadmapi.ExternalEtcd{CAFile: "/foo"}},
},
},
expected: true,
isSecondaryControlPlane: true,
downloadCerts: true,
},
{
name: "Test Cert file exists if specified",
cfg: &kubeadmapi.InitConfiguration{
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
Etcd: kubeadmapi.Etcd{External: &kubeadmapi.ExternalEtcd{CertFile: "/foo"}},
},
},
expected: false,
},
{
name: "Test Key file exists if specified",
cfg: &kubeadmapi.InitConfiguration{
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
Etcd: kubeadmapi.Etcd{External: &kubeadmapi.ExternalEtcd{CertFile: "/foo"}},
},
},
expected: false,
},
{
cfg: &kubeadmapi.InitConfiguration{
LocalAPIEndpoint: kubeadmapi.APIEndpoint{AdvertiseAddress: "2001:1234::1:15"},
},
expected: false,
},
}
for _, rt := range tests {
// TODO: Make RunInitNodeChecks accept a ClusterConfiguration object instead of InitConfiguration
actual := RunInitNodeChecks(exec.New(), rt.cfg, sets.NewString(), rt.isSecondaryControlPlane, rt.downloadCerts)
if (actual == nil) != rt.expected {
t.Errorf(
"failed RunInitNodeChecks:\n\texpected: %t\n\t actual: %t\n\t error: %v",
rt.expected,
(actual == nil),
actual,
)
}
}
}
func TestRunJoinNodeChecks(t *testing.T) {
var tests = []struct {
cfg *kubeadmapi.JoinConfiguration
expected bool
}{
{
cfg: &kubeadmapi.JoinConfiguration{},
expected: false,
},
{
cfg: &kubeadmapi.JoinConfiguration{
Discovery: kubeadmapi.Discovery{
BootstrapToken: &kubeadmapi.BootstrapTokenDiscovery{
APIServerEndpoint: "192.168.1.15",
},
},
},
expected: false,
},
{
cfg: &kubeadmapi.JoinConfiguration{
Discovery: kubeadmapi.Discovery{
BootstrapToken: &kubeadmapi.BootstrapTokenDiscovery{
APIServerEndpoint: "2001:1234::1:15",
},
},
},
expected: false,
},
}
for _, rt := range tests {
actual := RunJoinNodeChecks(exec.New(), rt.cfg, sets.NewString())
if (actual == nil) != rt.expected {
t.Errorf(
"failed RunJoinNodeChecks:\n\texpected: %t\n\t actual: %t",
rt.expected,
(actual != nil),
)
}
}
}
func TestRunChecks(t *testing.T) {
var tokenTest = []struct {
p []Checker
expected bool
output string
}{
{[]Checker{}, true, ""},
{[]Checker{preflightCheckTest{"warning"}}, true, "\t[WARNING preflightCheckTest]: warning\n"}, // should just print warning
{[]Checker{preflightCheckTest{"error"}}, false, ""},
{[]Checker{preflightCheckTest{"test"}}, false, ""},
{[]Checker{DirAvailableCheck{Path: "/does/not/exist"}}, true, ""},
{[]Checker{DirAvailableCheck{Path: "/"}}, false, ""},
{[]Checker{FileAvailableCheck{Path: "/does/not/exist"}}, true, ""},
{[]Checker{FileContentCheck{Path: "/does/not/exist"}}, false, ""},
{[]Checker{FileContentCheck{Path: "/"}}, true, ""},
{[]Checker{FileContentCheck{Path: "/", Content: []byte("does not exist")}}, false, ""},
{[]Checker{InPathCheck{executable: "foobarbaz", exec: exec.New()}}, true, "\t[WARNING FileExisting-foobarbaz]: foobarbaz not found in system path\n"},
{[]Checker{InPathCheck{executable: "foobarbaz", mandatory: true, exec: exec.New()}}, false, ""},
{[]Checker{InPathCheck{executable: "foobar", mandatory: false, exec: exec.New(), suggestion: "install foobar"}}, true, "\t[WARNING FileExisting-foobar]: foobar not found in system path\nSuggestion: install foobar\n"},
}
for _, rt := range tokenTest {
buf := new(bytes.Buffer)
actual := RunChecks(rt.p, buf, sets.NewString())
if (actual == nil) != rt.expected {
t.Errorf(
"failed RunChecks:\n\texpected: %t\n\t actual: %t",
rt.expected,
(actual == nil),
)
}
if buf.String() != rt.output {
t.Errorf(
"failed RunChecks:\n\texpected: %s\n\t actual: %s",
rt.output,
buf.String(),
)
}
}
}
func TestConfigRootCAs(t *testing.T) {
f, err := ioutil.TempFile(os.TempDir(), "kubeadm-external-etcd-test-cafile")
if err != nil {
t.Errorf("failed configRootCAs:\n\texpected: succeed creating temp CA file\n\tactual:%v", err)
}
defer os.Remove(f.Name())
if err := ioutil.WriteFile(f.Name(), []byte(externalEtcdRootCAFileContent), 0644); err != nil {
t.Errorf("failed configRootCAs:\n\texpected: succeed writing contents to temp CA file %s\n\tactual:%v", f.Name(), err)
}
c := ExternalEtcdVersionCheck{Etcd: kubeadmapi.Etcd{External: &kubeadmapi.ExternalEtcd{CAFile: f.Name()}}}
config, err := c.configRootCAs(nil)
if err != nil {
t.Errorf(
"failed configRootCAs:\n\texpected: has no error\n\tactual:%v",
err,
)
}
if config.RootCAs == nil {
t.Errorf(
"failed configRootCAs:\n\texpected: RootCAs not equal to nil\n\tactual:%v",
config.RootCAs,
)
}
}
func TestConfigCertAndKey(t *testing.T) {
certFile, err := ioutil.TempFile(os.TempDir(), "kubeadm-external-etcd-test-certfile")
if err != nil {
t.Errorf(
"failed configCertAndKey:\n\texpected: succeed creating temp CertFile file\n\tactual:%v",
err,
)
}
defer os.Remove(certFile.Name())
if err := ioutil.WriteFile(certFile.Name(), []byte(externalEtcdCertFileContent), 0644); err != nil {
t.Errorf(
"failed configCertAndKey:\n\texpected: succeed writing contents to temp CertFile file %s\n\tactual:%v",
certFile.Name(),
err,
)
}
keyFile, err := ioutil.TempFile(os.TempDir(), "kubeadm-external-etcd-test-keyfile")
if err != nil {
t.Errorf(
"failed configCertAndKey:\n\texpected: succeed creating temp KeyFile file\n\tactual:%v",
err,
)
}
defer os.Remove(keyFile.Name())
if err := ioutil.WriteFile(keyFile.Name(), []byte(externalEtcdKeyFileContent), 0644); err != nil {
t.Errorf(
"failed configCertAndKey:\n\texpected: succeed writing contents to temp KeyFile file %s\n\tactual:%v",
keyFile.Name(),
err,
)
}
c := ExternalEtcdVersionCheck{
Etcd: kubeadmapi.Etcd{
External: &kubeadmapi.ExternalEtcd{
CertFile: certFile.Name(),
KeyFile: keyFile.Name(),
},
},
}
config, err := c.configCertAndKey(nil)
if err != nil {
t.Errorf(
"failed configCertAndKey:\n\texpected: has no error\n\tactual:%v",
err,
)
}
if config.Certificates == nil {
t.Errorf(
"failed configCertAndKey:\n\texpected: Certificates not equal to nil\n\tactual:%v",
config.Certificates,
)
}
}
func TestKubernetesVersionCheck(t *testing.T) {
var tests = []struct {
check KubernetesVersionCheck
expectWarnings bool
}{
{
check: KubernetesVersionCheck{
KubeadmVersion: "v1.6.6", //Same version
KubernetesVersion: "v1.6.6",
},
expectWarnings: false,
},
{
check: KubernetesVersionCheck{
KubeadmVersion: "v1.6.6", //KubernetesVersion version older than KubeadmVersion
KubernetesVersion: "v1.5.5",
},
expectWarnings: false,
},
{
check: KubernetesVersionCheck{
KubeadmVersion: "v1.6.6", //KubernetesVersion newer than KubeadmVersion, within the same minor release (new patch)
KubernetesVersion: "v1.6.7",
},
expectWarnings: false,
},
{
check: KubernetesVersionCheck{
KubeadmVersion: "v1.6.6", //KubernetesVersion newer than KubeadmVersion, in a different minor/in pre-release
KubernetesVersion: "v1.7.0-alpha.0",
},
expectWarnings: true,
},
{
check: KubernetesVersionCheck{
KubeadmVersion: "v1.6.6", //KubernetesVersion newer than KubeadmVersion, in a different minor/stable
KubernetesVersion: "v1.7.0",
},
expectWarnings: true,
},
{
check: KubernetesVersionCheck{
KubeadmVersion: "v0.0.0", //"super-custom" builds - Skip this check
KubernetesVersion: "v1.7.0",
},
expectWarnings: false,
},
}
for _, rt := range tests {
warning, _ := rt.check.Check()
if (warning != nil) != rt.expectWarnings {
t.Errorf(
"failed KubernetesVersionCheck:\n\texpected: %t\n\t actual: %t (KubeadmVersion:%s, KubernetesVersion: %s)",
rt.expectWarnings,
(warning != nil),
rt.check.KubeadmVersion,
rt.check.KubernetesVersion,
)
}
}
}
func TestHTTPProxyCIDRCheck(t *testing.T) {
var tests = []struct {
check HTTPProxyCIDRCheck
expectWarnings bool
}{
{
check: HTTPProxyCIDRCheck{
Proto: "https",
CIDR: "127.0.0.0/8",
}, // Loopback addresses never should produce proxy warnings
expectWarnings: false,
},
{
check: HTTPProxyCIDRCheck{
Proto: "https",
CIDR: "10.96.0.0/12",
}, // Expected to be accessed directly, we set NO_PROXY to 10.0.0.0/8
expectWarnings: false,
},
{
check: HTTPProxyCIDRCheck{
Proto: "https",
CIDR: "192.168.0.0/16",
}, // Expected to go via proxy as this range is not listed in NO_PROXY
expectWarnings: true,
},
{
check: HTTPProxyCIDRCheck{
Proto: "https",
CIDR: "2001:db8::/56",
}, // Expected to be accessed directly, part of 2001:db8::/48 in NO_PROXY
expectWarnings: false,
},
{
check: HTTPProxyCIDRCheck{
Proto: "https",
CIDR: "2001:db8:1::/56",
}, // Expected to go via proxy, range is not in 2001:db8::/48
expectWarnings: true,
},
}
// Save current content of *_proxy and *_PROXY variables.
savedEnv := resetProxyEnv(t)
defer restoreEnv(savedEnv)
for _, rt := range tests {
warning, _ := rt.check.Check()
if (warning != nil) != rt.expectWarnings {
t.Errorf(
"failed HTTPProxyCIDRCheck:\n\texpected: %t\n\t actual: %t (CIDR:%s). Warnings: %v",
rt.expectWarnings,
(warning != nil),
rt.check.CIDR,
warning,
)
}
}
}
func TestHTTPProxyCheck(t *testing.T) {
var tests = []struct {
name string
check HTTPProxyCheck
expectWarnings bool
}{
{
name: "Loopback address",
check: HTTPProxyCheck{
Proto: "https",
Host: "127.0.0.1",
}, // Loopback addresses never should produce proxy warnings
expectWarnings: false,
},
{
name: "IPv4 direct access",
check: HTTPProxyCheck{
Proto: "https",
Host: "10.96.0.1",
}, // Expected to be accessed directly, we set NO_PROXY to 10.0.0.0/8
expectWarnings: false,
},
{
name: "IPv4 via proxy",
check: HTTPProxyCheck{
Proto: "https",
Host: "192.168.0.1",
}, // Expected to go via proxy as this range is not listed in NO_PROXY
expectWarnings: true,
},
{
name: "IPv6 direct access",
check: HTTPProxyCheck{
Proto: "https",
Host: "[2001:db8::1:15]",
}, // Expected to be accessed directly, part of 2001:db8::/48 in NO_PROXY
expectWarnings: false,
},
{
name: "IPv6 via proxy",
check: HTTPProxyCheck{
Proto: "https",
Host: "[2001:db8:1::1:15]",
}, // Expected to go via proxy, range is not in 2001:db8::/48
expectWarnings: true,
},
{
name: "IPv6 direct access, no brackets",
check: HTTPProxyCheck{
Proto: "https",
Host: "2001:db8::1:15",
}, // Expected to be accessed directly, part of 2001:db8::/48 in NO_PROXY
expectWarnings: false,
},
{
name: "IPv6 via proxy, no brackets",
check: HTTPProxyCheck{
Proto: "https",
Host: "2001:db8:1::1:15",
}, // Expected to go via proxy, range is not in 2001:db8::/48
expectWarnings: true,
},
}
// Save current content of *_proxy and *_PROXY variables.
savedEnv := resetProxyEnv(t)
defer restoreEnv(savedEnv)
for _, rt := range tests {
warning, _ := rt.check.Check()
if (warning != nil) != rt.expectWarnings {
t.Errorf(
"%s failed HTTPProxyCheck:\n\texpected: %t\n\t actual: %t (Host:%s). Warnings: %v",
rt.name,
rt.expectWarnings,
(warning != nil),
rt.check.Host,
warning,
)
}
}
}
// resetProxyEnv is helper function that unsets all *_proxy variables
// and return previously set values as map. This can be used to restore
// original state of the environment.
func resetProxyEnv(t *testing.T) map[string]string {
savedEnv := make(map[string]string)
for _, e := range os.Environ() {
pair := strings.Split(e, "=")
if strings.HasSuffix(strings.ToLower(pair[0]), "_proxy") {
savedEnv[pair[0]] = pair[1]
os.Unsetenv(pair[0])
}
}
t.Log("Saved environment: ", savedEnv)
os.Setenv("HTTP_PROXY", "http://proxy.example.com:3128")
os.Setenv("NO_PROXY", "example.com,10.0.0.0/8,2001:db8::/48")
// Check if we can reliably execute tests:
// ProxyFromEnvironment caches the *_proxy environment variables and
// if ProxyFromEnvironment already executed before our test with empty
// HTTP_PROXY it will make these tests return false positive failures
req, err := http.NewRequest("GET", "http://host.fake.tld/", nil)
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
proxy, err := http.ProxyFromEnvironment(req)
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
if proxy == nil {
t.Skip("test skipped as ProxyFromEnvironment already initialized in environment without defined HTTP proxy")
}
t.Log("http.ProxyFromEnvironment is usable, continue executing test")
return savedEnv
}
// restoreEnv is helper function to restores values
// of environment variables from saved state in the map
func restoreEnv(e map[string]string) {
for k, v := range e {
os.Setenv(k, v)
}
}
func TestKubeletVersionCheck(t *testing.T) {
// TODO: Re-enable this test
// fakeexec.FakeCmd supports only combined output.
// Hence .Output() returns a "not supported" error and we cannot use it for the test ATM.
t.Skip()
cases := []struct {
kubeletVersion string
k8sVersion string
expectErrors bool
expectWarnings bool
}{
{"v" + constants.CurrentKubernetesVersion.WithPatch(2).String(), "", false, false}, // check minimally supported version when there is no information about control plane
{"v1.11.3", "v1.11.8", true, false}, // too old kubelet (older than kubeadmconstants.MinimumKubeletVersion), should fail.
{"v" + constants.MinimumKubeletVersion.String(), constants.MinimumControlPlaneVersion.WithPatch(5).String(), false, false}, // kubelet within same major.minor as control plane
{"v" + constants.MinimumKubeletVersion.WithPatch(5).String(), constants.MinimumControlPlaneVersion.WithPatch(1).String(), false, false}, // kubelet is newer, but still within same major.minor as control plane
{"v" + constants.MinimumKubeletVersion.String(), constants.CurrentKubernetesVersion.WithPatch(1).String(), false, false}, // kubelet is lower than control plane, but newer than minimally supported
{"v" + constants.CurrentKubernetesVersion.WithPreRelease("alpha.1").String(), constants.MinimumControlPlaneVersion.WithPatch(1).String(), true, false}, // kubelet is newer (development build) than control plane, should fail.
{"v" + constants.CurrentKubernetesVersion.String(), constants.MinimumControlPlaneVersion.WithPatch(5).String(), true, false}, // kubelet is newer (release) than control plane, should fail.
}
for _, tc := range cases {
t.Run(tc.kubeletVersion, func(t *testing.T) {
fcmd := fakeexec.FakeCmd{<|fim▁hole|> fexec := &fakeexec.FakeExec{
CommandScript: []fakeexec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
},
}
check := KubeletVersionCheck{KubernetesVersion: tc.k8sVersion, exec: fexec}
warnings, errors := check.Check()
switch {
case warnings != nil && !tc.expectWarnings:
t.Errorf("KubeletVersionCheck: unexpected warnings for kubelet version %q and Kubernetes version %q. Warnings: %v", tc.kubeletVersion, tc.k8sVersion, warnings)
case warnings == nil && tc.expectWarnings:
t.Errorf("KubeletVersionCheck: expected warnings for kubelet version %q and Kubernetes version %q but got nothing", tc.kubeletVersion, tc.k8sVersion)
case errors != nil && !tc.expectErrors:
t.Errorf("KubeletVersionCheck: unexpected errors for kubelet version %q and Kubernetes version %q. errors: %v", tc.kubeletVersion, tc.k8sVersion, errors)
case errors == nil && tc.expectErrors:
t.Errorf("KubeletVersionCheck: expected errors for kubelet version %q and Kubernetes version %q but got nothing", tc.kubeletVersion, tc.k8sVersion)
}
})
}
}
func TestSetHasItemOrAll(t *testing.T) {
var tests = []struct {
ignoreSet sets.String
testString string
expectedResult bool
}{
{sets.NewString(), "foo", false},
{sets.NewString("all"), "foo", true},
{sets.NewString("all", "bar"), "foo", true},
{sets.NewString("bar"), "foo", false},
{sets.NewString("baz", "foo", "bar"), "foo", true},
{sets.NewString("baz", "bar", "foo"), "Foo", true},
}
for _, rt := range tests {
t.Run(rt.testString, func(t *testing.T) {
result := setHasItemOrAll(rt.ignoreSet, rt.testString)
if result != rt.expectedResult {
t.Errorf(
"setHasItemOrAll: expected: %v actual: %v (arguments: %q, %q)",
rt.expectedResult, result,
rt.ignoreSet,
rt.testString,
)
}
})
}
}
func TestImagePullCheck(t *testing.T) {
fcmd := fakeexec.FakeCmd{
RunScript: []fakeexec.FakeRunAction{
// Test case 1: img1 and img2 exist, img3 doesn't exist
func() ([]byte, []byte, error) { return nil, nil, nil },
func() ([]byte, []byte, error) { return nil, nil, nil },
func() ([]byte, []byte, error) { return nil, nil, &fakeexec.FakeExitError{Status: 1} },
// Test case 2: images don't exist
func() ([]byte, []byte, error) { return nil, nil, &fakeexec.FakeExitError{Status: 1} },
func() ([]byte, []byte, error) { return nil, nil, &fakeexec.FakeExitError{Status: 1} },
func() ([]byte, []byte, error) { return nil, nil, &fakeexec.FakeExitError{Status: 1} },
},
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
// Test case1: pull only img3
func() ([]byte, error) { return nil, nil },
// Test case 2: fail to pull image2 and image3
func() ([]byte, error) { return nil, nil },
func() ([]byte, error) { return []byte("error"), &fakeexec.FakeExitError{Status: 1} },
func() ([]byte, error) { return []byte("error"), &fakeexec.FakeExitError{Status: 1} },
},
}
fexec := fakeexec.FakeExec{
CommandScript: []fakeexec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) },
},
LookPathFunc: func(cmd string) (string, error) { return "/usr/bin/docker", nil },
}
containerRuntime, err := utilruntime.NewContainerRuntime(&fexec, constants.DefaultDockerCRISocket)
if err != nil {
t.Errorf("unexpected NewContainerRuntime error: %v", err)
}
check := ImagePullCheck{
runtime: containerRuntime,
imageList: []string{"img1", "img2", "img3"},
}
warnings, errors := check.Check()
if len(warnings) != 0 {
t.Fatalf("did not expect any warnings but got %q", warnings)
}
if len(errors) != 0 {
t.Fatalf("expected 1 errors but got %d: %q", len(errors), errors)
}
warnings, errors = check.Check()
if len(warnings) != 0 {
t.Fatalf("did not expect any warnings but got %q", warnings)
}
if len(errors) != 2 {
t.Fatalf("expected 2 errors but got %d: %q", len(errors), errors)
}
}
func TestNumCPUCheck(t *testing.T) {
var tests = []struct {
numCPU int
numErrors int
numWarnings int
}{
{0, 0, 0},
{999999999, 1, 0},
}
for _, rt := range tests {
t.Run(fmt.Sprintf("number of CPUs: %d", rt.numCPU), func(t *testing.T) {
warnings, errors := NumCPUCheck{NumCPU: rt.numCPU}.Check()
if len(warnings) != rt.numWarnings {
t.Errorf("expected %d warning(s) but got %d: %q", rt.numWarnings, len(warnings), warnings)
}
if len(errors) != rt.numErrors {
t.Errorf("expected %d warning(s) but got %d: %q", rt.numErrors, len(errors), errors)
}
})
}
}<|fim▁end|> | CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
func() ([]byte, error) { return []byte("Kubernetes " + tc.kubeletVersion), nil },
},
} |
<|file_name|>os_sys_calls_impl_hot_restart.cc<|end_file_name|><|fim▁begin|>#include "common/api/os_sys_calls_impl_hot_restart.h"
#include <cerrno>
namespace Envoy {
namespace Api {
SysCallIntResult HotRestartOsSysCallsImpl::shmOpen(const char* name, int oflag, mode_t mode) {
const int rc = ::shm_open(name, oflag, mode);
return {rc, errno};
}
SysCallIntResult HotRestartOsSysCallsImpl::shmUnlink(const char* name) {
const int rc = ::shm_unlink(name);
return {rc, errno};
}<|fim▁hole|>
} // namespace Api
} // namespace Envoy<|fim▁end|> | |
<|file_name|>createVariable.py<|end_file_name|><|fim▁begin|># encoding: latin1
"""createVariable
"""
__author__ = "Juan C. Duque, Alejandro Betancourt, Juan Sebastian Marín"
__credits__ = "Copyright (c) 2010-11 Juan C. Duque"
__license__ = "New BSD License"
__version__ = "1.0.0"
__maintainer__ = "RiSE Group"
__email__ = "[email protected]"
__all__ = ['fieldOperation']
import re
def fieldOperation(function, Y, fieldnames):
"""
This method receives a string which contains a function or formula written by the user. That function has operations between the variables of Y (a data dictionary) which names are contained in fieldnames (a list), the function is applied to the corresponding values in each element of Y. The return value is a list containing the results of the function.
:param function: function defined by the user, written like a python operation
:type function: string
:rtype: list (Y dictionary with the results)
"""
variables = []
positions = []
auxiliar1 = []
count = 0
results = []<|fim▁hole|> newfunc = ''
for i in fieldnames[0:]:
if re.search(i,function):
if not (function[function.index(i) - 2: function.index(i)].isalpha()):
variables.append(i)
positions.append(fieldnames.index(i))
for j in Y:
auxiliar1 = []
count = 0
newfunc = function
for k in positions:
auxiliar1.append(Y[j][k])
for l in variables:
if len(re.findall(l,newfunc)) == 1:
newfunc = re.compile(l).sub(str(auxiliar1[variables.index(l)]), newfunc)
else:
if newfunc.index(re.findall(l, newfunc)[0]) != newfunc.index(re.findall('(\D)' + l, newfunc)[1]):
newfunc = re.compile('(\W)-[+,-]' + l).sub(str(auxiliar1[variables.index(l)]), newfunc)
for l in variables:
newfunc = re.compile(l).sub(str(auxiliar1[variables.index(l)]), newfunc)
try:
n = eval(newfunc)
except ZeroDivisionError:
raise ZeroDivisionError("Division by zero was detected")
results.append(n)
return results<|fim▁end|> | |
<|file_name|>test_stringutils.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2013 Async Open Source
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
##
## Author(s): Stoq Team <[email protected]>
##
__tests__ = 'stoqlib.lib.stringutils'
import unittest
from stoqlib.lib.stringutils import next_value_for, max_value_for
class TestStringUtils(unittest.TestCase):
def test_next_value_for(self):
# Trivial cases
self.assertEqual(next_value_for(u''), u'1')
self.assertEqual(next_value_for(u'1'), u'2')
self.assertEqual(next_value_for(u'999'), u'1000')
# Ending with digit cases
self.assertEqual(next_value_for(u'A999'), u'A1000')
self.assertEqual(next_value_for(u'A8'), u'A9')
self.assertEqual(next_value_for(u'A9'), u'A10')
self.assertEqual(next_value_for(u'A99'), u'A100')
self.assertEqual(next_value_for(u'A199'), u'A200')
self.assertEqual(next_value_for(u'999A1'), u'999A2')
self.assertEqual(next_value_for(u'A009'), u'A010')
self.assertEqual(next_value_for(u'AB0099'), u'AB0100')
# Ending with alphanumeric cases
self.assertEqual(next_value_for(u'999A'), u'999B')
self.assertEqual(next_value_for(u'A999A'), u'A999B')
self.assertEqual(next_value_for(u'A99AZ'), u'A99B0')
self.assertEqual(next_value_for(u'A999Z'), u'A10000')
self.assertEqual(next_value_for(u'A999-A'), u'A999-B')
self.assertEqual(next_value_for(u'A999-Z'), u'A999-00')
# Not handled cases
self.assertEqual(next_value_for(u'A999-'), u'A999-0')
def test_max_value_for(self):
self.assertEqual(max_value_for([u'']), u'')<|fim▁hole|> self.assertEqual(max_value_for([u'1']), u'1')
self.assertEqual(max_value_for([u'1', u'2']), u'2')
self.assertEqual(max_value_for([u'9', u'10']), u'10')
self.assertEqual(max_value_for([u'009', u'10']), u'010')
self.assertEqual(max_value_for([u'a09', u'999']), u'a09')<|fim▁end|> | |
<|file_name|>math.rs<|end_file_name|><|fim▁begin|>use std::mem;
pub use vecmath::{
Vector3,
Matrix4,
vec3_add,
vec3_sub,
vec3_scale,
row_mat4_mul,
row_mat4_transform,
mat4_transposed,
mat4_inv,
mat4_id,
};
pub use quaternion::id as quaternion_id;
pub use quaternion::mul as quaternion_mul;
pub use quaternion::conj as quaternion_conj;
pub use quaternion::{self, Quaternion};
pub use dual_quaternion::{self, DualQuaternion};
pub fn lerp_quaternion(q1: &Quaternion<f32>, q2: &Quaternion<f32>, blend_factor: &f32) -> Quaternion<f32> {
let dot = q1.0 * q2.0 + q1.1[0] * q2.1[0] + q1.1[1] * q2.1[1] + q1.1[2] * q2.1[2];
let s = 1.0 - blend_factor;
let t: f32 = if dot > 0.0 { *blend_factor } else { -blend_factor };
let w = s * q1.0 + t * q2.0;
let x = s * q1.1[0] + t * q2.1[0];
let y = s * q1.1[1] + t * q2.1[1];
let z = s * q1.1[2] + t * q2.1[2];
let inv_sqrt_len = inv_sqrt(w * w + x * x + y * y + z * z);
(w * inv_sqrt_len, [x * inv_sqrt_len, y * inv_sqrt_len, z * inv_sqrt_len])
}
/// Dual-quaternion linear blending. See http://dcgi.felk.cvut.cz/home/zara/papers/TCD-CS-2006-46.pdf
pub fn lerp_dual_quaternion(q1: DualQuaternion<f32>, q2: DualQuaternion<f32>, blend_factor: f32) -> DualQuaternion<f32> {
let dot = dual_quaternion::dot(q1, q2);
let s = 1.0 - blend_factor;
let t: f32 = if dot > 0.0 { blend_factor } else { -blend_factor };
let blended_sum = dual_quaternion::add(dual_quaternion::scale(q1, s), dual_quaternion::scale(q2, t));
dual_quaternion::normalize(blended_sum)
}
/// rotation matrix for `a` radians about z
pub fn mat4_rotate_z(a: f32) -> Matrix4<f32> {
[
[a.cos(), -a.sin(), 0.0, 0.0],
[a.sin(), a.cos(), 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
}
pub fn matrix_to_quaternion(m: &Matrix4<f32>) -> Quaternion<f32> {<|fim▁hole|>
let trace = m[0][0] + m[1][1] + m[2][2];
if trace > 0.0 {
let t = trace + 1.0;
let s = inv_sqrt(t) * 0.5;
q[3] = s * t;
q[0] = (m[1][2] - m[2][1]) * s;
q[1] = (m[2][0] - m[0][2]) * s;
q[2] = (m[0][1] - m[1][0]) * s;
} else {
let mut i = 0;
if m[1][1] > m[0][0] {
i = 1;
}
if m[2][2] > m[i][i] {
i = 2;
}
let j = next[i];
let k = next[j];
let t = (m[i][i] - (m[j][j] + m[k][k])) + 1.0;
let s = inv_sqrt(t) * 0.5;
q[i] = s * t;
q[3] = (m[j][k] - m[k][j]) * s;
q[j] = (m[i][j] + m[j][i]) * s;
q[k] = (m[i][k] + m[k][i]) * s;
}
(q[3], [q[0], q[1], q[2]])
}
///
/// See http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/
///
pub fn quaternion_to_matrix(q: Quaternion<f32>) -> Matrix4<f32> {
let w = q.0;
let x = q.1[0];
let y = q.1[1];
let z = q.1[2];
let x2 = x + x;
let y2 = y + y;
let z2 = z + z;
let xx2 = x2 * x;
let xy2 = x2 * y;
let xz2 = x2 * z;
let yy2 = y2 * y;
let yz2 = y2 * z;
let zz2 = z2 * z;
let wy2 = y2 * w;
let wz2 = z2 * w;
let wx2 = x2 * w;
[
[1.0 - yy2 - zz2, xy2 + wz2, xz2 - wy2, 0.0],
[xy2 - wz2, 1.0 - xx2 - zz2, yz2 + wx2, 0.0],
[xz2 + wy2, yz2 - wx2, 1.0 - xx2 - yy2, 0.0],
[0.0, 0.0, 0.0, 1.0]
]
}
pub fn inv_sqrt(x: f32) -> f32 {
let x2: f32 = x * 0.5;
let mut y: f32 = x;
let mut i: i32 = unsafe { mem::transmute(y) };
i = 0x5f3759df - (i >> 1);
y = unsafe { mem::transmute(i) };
y = y * (1.5 - (x2 * y * y));
y
}<|fim▁end|> |
let mut q = [0.0, 0.0, 0.0, 0.0];
let next = [1, 2, 0]; |
<|file_name|>a_sort.py<|end_file_name|><|fim▁begin|>import fileinput
def str_to_int(s):
return([ int(x) for x in s.split() ])
# args = [ 'line 1', 'line 2', ... ]
def proc_input(args):
return str_to_int(args[1])
def find(ints, offset):
min = float('inf')
min_index = -1
for k, v in enumerate(ints[offset:]):
if v < min:
min = v
min_index = k + offset
return(min_index)
def swap(l, a, b):
t = l[a]
l[a] = l[b]
l[b] = t
def solve(args, verbose=False, debug=False):
ints = proc_input(args)
if debug:
from copy import deepcopy
d = deepcopy(ints)
results = []
for i in xrange(len(ints)):
min_index = find(ints, i)
if min_index != i:
results.append((min_index, i))
swap(ints, min_index, i)
if debug:
d.sort()
assert(ints == d)
assert(len(results) <= len(ints))
if verbose:
print len(results)
for (src, tgt) in results:
print src, tgt
return (len(results), results)
def test():
assert(str_to_int('1 2 3') == [ 1, 2, 3 ])
assert(solve([ '5', '5 2 5 1 4' ], debug=True) == ( 2, [ (3, 0), (4, 2) ] ))
assert(solve([ '6', '10 20 20 40 60 60' ], debug=True) == (0, []))
assert(solve([ '2', '101 100' ], debug=True) == (1, [ (1, 0) ]))
if __name__ == '__main__':
from sys import argv
if argv.pop() == 'test':
test()<|fim▁hole|><|fim▁end|> | else:
solve(list(fileinput.input()), verbose=True) |
<|file_name|>externs.js<|end_file_name|><|fim▁begin|>var GBYViewController = {};
GBYViewController.performSegueWithIdentifier = function() {};
GBYViewController.dismissViewControllerAnimated = function() {};
GBYViewController.interfaceOrientation = function() {};
GBYViewController.animateWithDurationAnimationsCompletion = function() {};
var GBYButton = {};
GBYButton.enabled = function() {};
GBYButton.hidden = function() {};
GBYButton.setTouchUpInsideCallback = function() {};
GBYButton.setTouchDownCallback = function() {};
GBYButton.setTitleForState = function() {};
var GBYBarButtonItem = {};
GBYBarButtonItem.enabled = function() {};
GBYBarButtonItem.setActionCallback = function() {};
GBYBarButtonItem.title = function() {};
var GBYImageView = {};
GBYImageView.clearImage = function() {};
GBYImageView.hidden = function() {};
var GBYSegmentedControl = {};
GBYSegmentedControl.enabled = function() {};
GBYSegmentedControl.setValueChangedCallback = function() {};
var GBYStepper = {};
GBYStepper.enabled = function() {};
GBYStepper.value = function() {};
GBYStepper.setValueChangedCallback = function() {};
var GBYSwitch = {};
GBYSwitch.enabled = function() {};
GBYSwitch.on = function() {};
GBYSwitch.setValueChangedCallback = function() {};
<|fim▁hole|>var GBYLabel = {};
GBYLabel.text = function() {};
GBYLabel.textColor = function() {};
GBYLabel.enabled = function() {};
GBYLabel.hidden = function() {};
GBYLabel.frame = function() {};
var GBYNavigationItem = {};
GBYNavigationItem.title = function() {};
var GBYTableView = {};
GBYTableView.hidden = function() {};
GBYTableView.frame = function() {};
GBYTableView.reloadData = function() {};
GBYTableView.reloadRowInSectionWithAnimation = function() {};
GBYTableView.scrollToRowInSectionAtScrollPositionAnimated = function() {};
GBYTableView.dataSource = function() {};
GBYTableView.delegate = function() {};
GBYTableView.beginUpdates = function() {};
GBYTableView.endUpdates = function() {};
GBYTableView.insertSectionWithRowAnimation = function() {};
GBYTableView.deleteSectionWithRowAnimation = function() {};
GBYTableView.insertRowInSectionWithRowAnimation = function() {};
GBYTableView.deleteRowInSectionWithRowAnimation = function() {};
GBYTableView.indexPathForSelectedRow = function() {};
GBYTableView.indexPathsForSelectedRows = function() {};
GBYTableView.allowsMultipleSelectionDuringEditing = function() {};
GBYTableView.setEditingAnimated = function() {};
GBYTableView.selectRowInSectionAnimatedScrollPosition = function() {};
var GBYTextField = {};
GBYTextField.enabled = function() {};
GBYTextField.hidden = function() {};
GBYTextField.text = function() {};
GBYTextField.textColor = function() {};
GBYTextField.setEditingDidBeginCallback = function() {};
GBYTextField.setEditingChangedCallback = function() {};
GBYTextField.setEditingDidEndCallback = function() {};
GBYTextField.setEditingDidEndOnExitCallback = function() {};
GBYTextField.becomeFirstResponder = function() {};
var GBYTableViewCell = {};
GBYTableViewCell.textLabel = function() {};
GBYTableViewCell.detailTextLabel = function() {};
var GBYColor = {};
GBYColor.colorWithRedGreenBlueAlpha = function() {};
GBYColor.blackColor = function() {};
GBYColor.darkGrayColor = function() {};
GBYColor.lightGrayColor = function() {};
GBYColor.whiteColor = function() {};
GBYColor.grayColor = function() {};
GBYColor.redColor = function() {};
GBYColor.greenColor = function() {};
GBYColor.blueColor = function() {};
GBYColor.cyanColor = function() {};
GBYColor.yellowColor = function() {};
GBYColor.magentaColor = function() {};
GBYColor.orangeColor = function() {};
GBYColor.purpleColor = function() {};
GBYColor.brownColor = function() {};
GBYColor.clearColor = function() {};
var GBYFetchedResultsController = {};
GBYFetchedResultsController.cljsDelegate = function() {};
GBYFetchedResultsController.performFetch = function() {};
GBYFetchedResultsController.objectAtSectionRow = function() {};
GBYFetchedResultsController.sectionCount = function() {};
GBYFetchedResultsController.numberOfObjectsInSection = function() {};
GBYFetchedResultsController.sectionIndexTitles = function() {};
var GBYSoundUtils = {};
GBYSoundUtils.playSoundWithNameAndExtension = function() {};
var GBYAlertUtils = {};
GBYAlertUtils.alertWithMessage = function() {};<|fim▁end|> | |
<|file_name|>expatreader.py<|end_file_name|><|fim▁begin|>"""
SAX driver for the pyexpat C module. This driver works with
pyexpat.__version__ == '2.22'.
"""
version = "0.20"
from xml.sax._exceptions import *
from xml.sax.handler import feature_validation, feature_namespaces
from xml.sax.handler import feature_namespace_prefixes
from xml.sax.handler import feature_external_ges, feature_external_pes
from xml.sax.handler import feature_string_interning
from xml.sax.handler import property_xml_string, property_interning_dict
# xml.parsers.expat does not raise ImportError in Jython
import sys
if sys.platform[:4] == "java":
raise SAXReaderNotAvailable("expat not available in Java", None)
del sys
try:
from xml.parsers import expat
except ImportError:
raise SAXReaderNotAvailable("expat not supported", None)
else:
if not hasattr(expat, "ParserCreate"):
raise SAXReaderNotAvailable("expat not supported", None)
from xml.sax import xmlreader, saxutils, handler
AttributesImpl = xmlreader.AttributesImpl
AttributesNSImpl = xmlreader.AttributesNSImpl
# If we're using a sufficiently recent version of Python, we can use
# weak references to avoid cycles between the parser and content
# handler, otherwise we'll just have to pretend.
try:
import _weakref
except ImportError:
def _mkproxy(o):
return o
else:
import weakref
_mkproxy = weakref.proxy
del weakref, _weakref
class _ClosedParser:
pass
# --- ExpatLocator
class ExpatLocator(xmlreader.Locator):
"""Locator for use with the ExpatParser class.
This uses a weak reference to the parser object to avoid creating
a circular reference between the parser and the content handler.
"""
def __init__(self, parser):
self._ref = _mkproxy(parser)
def getColumnNumber(self):
parser = self._ref
if parser._parser is None:
return None
return parser._parser.ErrorColumnNumber
def getLineNumber(self):
parser = self._ref
if parser._parser is None:
return 1
return parser._parser.ErrorLineNumber
def getPublicId(self):
parser = self._ref
if parser is None:
return None
return parser._source.getPublicId()
def getSystemId(self):
parser = self._ref
if parser is None:
return None
return parser._source.getSystemId()
# --- ExpatParser
class ExpatParser(xmlreader.IncrementalParser, xmlreader.Locator):
"""SAX driver for the pyexpat C module."""
def __init__(self, namespaceHandling=0, bufsize=2**16-20):
xmlreader.IncrementalParser.__init__(self, bufsize)
self._source = xmlreader.InputSource()
self._parser = None
self._namespaces = namespaceHandling
self._lex_handler_prop = None
self._parsing = 0
self._entity_stack = []
self._external_ges = 1
self._interning = None
# XMLReader methods
def parse(self, source):
"Parse an XML document from a URL or an InputSource."
source = saxutils.prepare_input_source(source)
self._source = source
try:
self.reset()
self._cont_handler.setDocumentLocator(ExpatLocator(self))
xmlreader.IncrementalParser.parse(self, source)
except:
# bpo-30264: Close the source on error to not leak resources:
# xml.sax.parse() doesn't give access to the underlying parser
# to the caller
self._close_source()
raise
def prepareParser(self, source):
if source.getSystemId() is not None:
self._parser.SetBase(source.getSystemId())
# Redefined setContentHandler to allow changing handlers during parsing
def setContentHandler(self, handler):
xmlreader.IncrementalParser.setContentHandler(self, handler)
if self._parsing:
self._reset_cont_handler()
def getFeature(self, name):
if name == feature_namespaces:
return self._namespaces
elif name == feature_string_interning:
return self._interning is not None
elif name in (feature_validation, feature_external_pes,
feature_namespace_prefixes):
return 0
elif name == feature_external_ges:
return self._external_ges
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def setFeature(self, name, state):
if self._parsing:
raise SAXNotSupportedException("Cannot set features while parsing")
if name == feature_namespaces:
self._namespaces = state
elif name == feature_external_ges:
self._external_ges = state
elif name == feature_string_interning:
if state:
if self._interning is None:
self._interning = {}
else:
self._interning = None
elif name == feature_validation:
if state:
raise SAXNotSupportedException(
"expat does not support validation")<|fim▁hole|> if state:
raise SAXNotSupportedException(
"expat does not read external parameter entities")
elif name == feature_namespace_prefixes:
if state:
raise SAXNotSupportedException(
"expat does not report namespace prefixes")
else:
raise SAXNotRecognizedException(
"Feature '%s' not recognized" % name)
def getProperty(self, name):
if name == handler.property_lexical_handler:
return self._lex_handler_prop
elif name == property_interning_dict:
return self._interning
elif name == property_xml_string:
if self._parser:
if hasattr(self._parser, "GetInputContext"):
return self._parser.GetInputContext()
else:
raise SAXNotRecognizedException(
"This version of expat does not support getting"
" the XML string")
else:
raise SAXNotSupportedException(
"XML string cannot be returned when not parsing")
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
def setProperty(self, name, value):
if name == handler.property_lexical_handler:
self._lex_handler_prop = value
if self._parsing:
self._reset_lex_handler_prop()
elif name == property_interning_dict:
self._interning = value
elif name == property_xml_string:
raise SAXNotSupportedException("Property '%s' cannot be set" %
name)
else:
raise SAXNotRecognizedException("Property '%s' not recognized" %
name)
# IncrementalParser methods
def feed(self, data, isFinal = 0):
if not self._parsing:
self.reset()
self._parsing = 1
self._cont_handler.startDocument()
try:
# The isFinal parameter is internal to the expat reader.
# If it is set to true, expat will check validity of the entire
# document. When feeding chunks, they are not normally final -
# except when invoked from close.
self._parser.Parse(data, isFinal)
except expat.error as e:
exc = SAXParseException(expat.ErrorString(e.code), e, self)
# FIXME: when to invoke error()?
self._err_handler.fatalError(exc)
def _close_source(self):
source = self._source
try:
file = source.getCharacterStream()
if file is not None:
file.close()
finally:
file = source.getByteStream()
if file is not None:
file.close()
def close(self):
if (self._entity_stack or self._parser is None or
isinstance(self._parser, _ClosedParser)):
# If we are completing an external entity, do nothing here
return
try:
self.feed("", isFinal = 1)
self._cont_handler.endDocument()
self._parsing = 0
# break cycle created by expat handlers pointing to our methods
self._parser = None
finally:
self._parsing = 0
if self._parser is not None:
# Keep ErrorColumnNumber and ErrorLineNumber after closing.
parser = _ClosedParser()
parser.ErrorColumnNumber = self._parser.ErrorColumnNumber
parser.ErrorLineNumber = self._parser.ErrorLineNumber
self._parser = parser
self._close_source()
def _reset_cont_handler(self):
self._parser.ProcessingInstructionHandler = \
self._cont_handler.processingInstruction
self._parser.CharacterDataHandler = self._cont_handler.characters
def _reset_lex_handler_prop(self):
lex = self._lex_handler_prop
parser = self._parser
if lex is None:
parser.CommentHandler = None
parser.StartCdataSectionHandler = None
parser.EndCdataSectionHandler = None
parser.StartDoctypeDeclHandler = None
parser.EndDoctypeDeclHandler = None
else:
parser.CommentHandler = lex.comment
parser.StartCdataSectionHandler = lex.startCDATA
parser.EndCdataSectionHandler = lex.endCDATA
parser.StartDoctypeDeclHandler = self.start_doctype_decl
parser.EndDoctypeDeclHandler = lex.endDTD
def reset(self):
if self._namespaces:
self._parser = expat.ParserCreate(self._source.getEncoding(), " ",
intern=self._interning)
self._parser.namespace_prefixes = 1
self._parser.StartElementHandler = self.start_element_ns
self._parser.EndElementHandler = self.end_element_ns
else:
self._parser = expat.ParserCreate(self._source.getEncoding(),
intern = self._interning)
self._parser.StartElementHandler = self.start_element
self._parser.EndElementHandler = self.end_element
self._reset_cont_handler()
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
self._parser.NotationDeclHandler = self.notation_decl
self._parser.StartNamespaceDeclHandler = self.start_namespace_decl
self._parser.EndNamespaceDeclHandler = self.end_namespace_decl
self._decl_handler_prop = None
if self._lex_handler_prop:
self._reset_lex_handler_prop()
# self._parser.DefaultHandler =
# self._parser.DefaultHandlerExpand =
# self._parser.NotStandaloneHandler =
self._parser.ExternalEntityRefHandler = self.external_entity_ref
try:
self._parser.SkippedEntityHandler = self.skipped_entity_handler
except AttributeError:
# This pyexpat does not support SkippedEntity
pass
self._parser.SetParamEntityParsing(
expat.XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE)
self._parsing = 0
self._entity_stack = []
# Locator methods
def getColumnNumber(self):
if self._parser is None:
return None
return self._parser.ErrorColumnNumber
def getLineNumber(self):
if self._parser is None:
return 1
return self._parser.ErrorLineNumber
def getPublicId(self):
return self._source.getPublicId()
def getSystemId(self):
return self._source.getSystemId()
# event handlers
def start_element(self, name, attrs):
self._cont_handler.startElement(name, AttributesImpl(attrs))
def end_element(self, name):
self._cont_handler.endElement(name)
def start_element_ns(self, name, attrs):
pair = name.split()
if len(pair) == 1:
# no namespace
pair = (None, name)
elif len(pair) == 3:
pair = pair[0], pair[1]
else:
# default namespace
pair = tuple(pair)
newattrs = {}
qnames = {}
for (aname, value) in attrs.items():
parts = aname.split()
length = len(parts)
if length == 1:
# no namespace
qname = aname
apair = (None, aname)
elif length == 3:
qname = "%s:%s" % (parts[2], parts[1])
apair = parts[0], parts[1]
else:
# default namespace
qname = parts[1]
apair = tuple(parts)
newattrs[apair] = value
qnames[apair] = qname
self._cont_handler.startElementNS(pair, None,
AttributesNSImpl(newattrs, qnames))
def end_element_ns(self, name):
pair = name.split()
if len(pair) == 1:
pair = (None, name)
elif len(pair) == 3:
pair = pair[0], pair[1]
else:
pair = tuple(pair)
self._cont_handler.endElementNS(pair, None)
# this is not used (call directly to ContentHandler)
def processing_instruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
# this is not used (call directly to ContentHandler)
def character_data(self, data):
self._cont_handler.characters(data)
def start_namespace_decl(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def end_namespace_decl(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
self._lex_handler_prop.startDTD(name, pubid, sysid)
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
self._dtd_handler.unparsedEntityDecl(name, pubid, sysid, notation_name)
def notation_decl(self, name, base, sysid, pubid):
self._dtd_handler.notationDecl(name, pubid, sysid)
def external_entity_ref(self, context, base, sysid, pubid):
if not self._external_ges:
return 1
source = self._ent_handler.resolveEntity(pubid, sysid)
source = saxutils.prepare_input_source(source,
self._source.getSystemId() or
"")
self._entity_stack.append((self._parser, self._source))
self._parser = self._parser.ExternalEntityParserCreate(context)
self._source = source
try:
xmlreader.IncrementalParser.parse(self, source)
except:
return 0 # FIXME: save error info here?
(self._parser, self._source) = self._entity_stack[-1]
del self._entity_stack[-1]
return 1
def skipped_entity_handler(self, name, is_pe):
if is_pe:
# The SAX spec requires to report skipped PEs with a '%'
name = '%'+name
self._cont_handler.skippedEntity(name)
# ---
def create_parser(*args, **kwargs):
return ExpatParser(*args, **kwargs)
# ---
if __name__ == "__main__":
import xml.sax.saxutils
p = create_parser()
p.setContentHandler(xml.sax.saxutils.XMLGenerator())
p.setErrorHandler(xml.sax.ErrorHandler())
p.parse("http://www.ibiblio.org/xml/examples/shakespeare/hamlet.xml")<|fim▁end|> | elif name == feature_external_pes: |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from matplotlib.numerix import which
if which[0] == "numarray":
from numarray.linear_algebra.mlab import *
elif which[0] == "numeric":
from MLab import *
elif which[0] == "numpy":
try:
from numpy.oldnumeric.mlab import *
except ImportError:
from numpy.lib.mlab import *<|fim▁hole|>amax = max<|fim▁end|> | else:
raise RuntimeError("invalid numerix selector")
amin = min |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import sys, os, re
from functools import partial
from PyQt5.Qt import (
QGridLayout, QToolButton, QIcon, QRadioButton, QMenu, QApplication, Qt,
QSize, QWidget, QLabel, QStackedLayout, QPainter, QRect, QVBoxLayout,
QCursor, QEventLoop, QKeySequence, pyqtSignal, QTimer, QHBoxLayout)
from calibre.ebooks.oeb.polish.container import Container
from calibre.ebooks.oeb.polish.utils import guess_type
from calibre.gui2 import info_dialog
from calibre.gui2.progress_indicator import ProgressIndicator
from calibre.gui2.tweak_book.editor import syntax_from_mime
from calibre.gui2.tweak_book.diff.view import DiffView
from calibre.gui2.tweak_book.widgets import Dialog
from calibre.gui2.widgets2 import HistoryLineEdit2
from calibre.utils.filenames import samefile
from calibre.utils.icu import numeric_sort_key
class BusyWidget(QWidget): # {{{
def __init__(self, parent):
QWidget.__init__(self, parent)
l = QVBoxLayout()
self.setLayout(l)
l.addStretch(10)
self.pi = ProgressIndicator(self, 128)
l.addWidget(self.pi, alignment=Qt.AlignHCenter)
self.dummy = QLabel('<h2>\xa0')
l.addSpacing(10)
l.addWidget(self.dummy, alignment=Qt.AlignHCenter)
l.addStretch(10)
self.text = _('Calculating differences, please wait...')
def paintEvent(self, ev):
br = ev.region().boundingRect()
QWidget.paintEvent(self, ev)
p = QPainter(self)
p.setClipRect(br)
f = p.font()
f.setBold(True)
f.setPointSize(20)
p.setFont(f)
p.setPen(Qt.SolidLine)
r = QRect(0, self.dummy.geometry().top() + 10, self.geometry().width(), 150)
p.drawText(r, Qt.AlignHCenter | Qt.AlignTop | Qt.TextSingleLine, self.text)
p.end()
# }}}
class Cache(object):
def __init__(self):
self._left, self._right = {}, {}
self.left, self.right = self._left.get, self._right.get
self.set_left, self.set_right = self._left.__setitem__, self._right.__setitem__
def changed_files(list_of_names1, list_of_names2, get_data1, get_data2):
list_of_names1, list_of_names2 = frozenset(list_of_names1), frozenset(list_of_names2)
changed_names = set()
cache = Cache()
common_names = list_of_names1.intersection(list_of_names2)
for name in common_names:
left, right = get_data1(name), get_data2(name)
if len(left) == len(right) and left == right:
continue
cache.set_left(name, left), cache.set_right(name, right)
changed_names.add(name)
removals = list_of_names1 - common_names
adds = set(list_of_names2 - common_names)
adata, rdata = {a:get_data2(a) for a in adds}, {r:get_data1(r) for r in removals}
ahash = {a:hash(d) for a, d in adata.iteritems()}
rhash = {r:hash(d) for r, d in rdata.iteritems()}
renamed_names, removed_names, added_names = {}, set(), set()
for name, rh in rhash.iteritems():
for n, ah in ahash.iteritems():
if ah == rh:
renamed_names[name] = n
adds.discard(n)
break
else:
cache.set_left(name, rdata[name])
removed_names.add(name)
for name in adds:
cache.set_right(name, adata[name])
added_names.add(name)
return cache, changed_names, renamed_names, removed_names, added_names
def get_decoded_raw(name):
from calibre.ebooks.chardet import xml_to_unicode, force_encoding
with open(name, 'rb') as f:
raw = f.read()
syntax = syntax_from_mime(name, guess_type(name))
if syntax is None:
try:
raw = raw.decode('utf-8')
except ValueError:
pass
elif syntax != 'raster_image':
if syntax in {'html', 'xml'}:
raw = xml_to_unicode(raw, verbose=True)[0]
else:
m = re.search(br"coding[:=]\s*([-\w.]+)", raw[:1024], flags=re.I)
if m is not None and m.group(1) != '8bit':
enc = m.group(1)
if enc == b'unicode':
enc = 'utf-8'
else:
enc = force_encoding(raw, verbose=True)
try:
raw = raw.decode(enc)
except (LookupError, ValueError):
pass
return raw, syntax
def file_diff(left, right):
(raw1, syntax1), (raw2, syntax2) = map(get_decoded_raw, (left, right))
if type(raw1) is not type(raw2):
raw1, raw2 = open(left, 'rb').read(), open(right, 'rb').read()
cache = Cache()
cache.set_left(left, raw1), cache.set_right(right, raw2)
changed_names = {} if raw1 == raw2 else {left:right}
return cache, {left:syntax1, right:syntax2}, changed_names, {}, set(), set()
def dir_diff(left, right):
ldata, rdata, lsmap, rsmap = {}, {}, {}, {}
for base, data, smap in ((left, ldata, lsmap), (right, rdata, rsmap)):
for dirpath, dirnames, filenames in os.walk(base):
for filename in filenames:
path = os.path.join(dirpath, filename)
name = os.path.relpath(path, base)
data[name], smap[name] = get_decoded_raw(path)
cache, changed_names, renamed_names, removed_names, added_names = changed_files(
ldata, rdata, ldata.get, rdata.get)
syntax_map = {name:lsmap[name] for name in changed_names}
syntax_map.update({name:lsmap[name] for name in renamed_names})
syntax_map.update({name:rsmap[name] for name in added_names})
syntax_map.update({name:lsmap[name] for name in removed_names})
return cache, syntax_map, changed_names, renamed_names, removed_names, added_names
def container_diff(left, right):
left_names, right_names = set(left.name_path_map), set(right.name_path_map)
if left.cloned or right.cloned:
# Since containers are often clones of each other, as a performance
# optimization, discard identical names that point to the same physical
# file, without needing to read the file's contents.
# First commit dirtied names
for c in (left, right):
Container.commit(c, keep_parsed=True)
samefile_names = {name for name in left_names & right_names if samefile(
left.name_path_map[name], right.name_path_map[name])}
left_names -= samefile_names
right_names -= samefile_names
cache, changed_names, renamed_names, removed_names, added_names = changed_files(
left_names, right_names, left.raw_data, right.raw_data)
def syntax(container, name):
mt = container.mime_map[name]
return syntax_from_mime(name, mt)
syntax_map = {name:syntax(left, name) for name in changed_names}
syntax_map.update({name:syntax(left, name) for name in renamed_names})
syntax_map.update({name:syntax(right, name) for name in added_names})
syntax_map.update({name:syntax(left, name) for name in removed_names})
return cache, syntax_map, changed_names, renamed_names, removed_names, added_names
def ebook_diff(path1, path2):
from calibre.ebooks.oeb.polish.container import get_container
left = get_container(path1, tweak_mode=True)
right = get_container(path2, tweak_mode=True)
return container_diff(left, right)
class Diff(Dialog):
revert_requested = pyqtSignal()
line_activated = pyqtSignal(object, object, object)
def __init__(self, revert_button_msg=None, parent=None, show_open_in_editor=False, show_as_window=False):
self.context = 3
self.beautify = False
self.apply_diff_calls = []
self.show_open_in_editor = show_open_in_editor
self.revert_button_msg = revert_button_msg
Dialog.__init__(self, _('Differences between books'), 'diff-dialog', parent=parent)
self.setWindowFlags(self.windowFlags() | Qt.WindowMinMaxButtonsHint)
if show_as_window:
self.setWindowFlags(Qt.Window)
self.view.line_activated.connect(self.line_activated)
def sizeHint(self):
geom = QApplication.instance().desktop().availableGeometry(self)
return QSize(int(0.9 * geom.width()), int(0.8 * geom.height()))
def setup_ui(self):
self.setWindowIcon(QIcon(I('diff.png')))
self.stacks = st = QStackedLayout(self)
self.busy = BusyWidget(self)
self.w = QWidget(self)
st.addWidget(self.busy), st.addWidget(self.w)
self.setLayout(st)
self.l = l = QGridLayout()
self.w.setLayout(l)
self.view = v = DiffView(self, show_open_in_editor=self.show_open_in_editor)
l.addWidget(v, l.rowCount(), 0, 1, -1)
r = l.rowCount()
self.bp = b = QToolButton(self)
b.setIcon(QIcon(I('back.png')))
b.clicked.connect(partial(self.view.next_change, -1))
b.setToolTip(_('Go to previous change') + ' [p]')
b.setText(_('&Previous change')), b.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
l.addWidget(b, r, 0)
self.bn = b = QToolButton(self)
b.setIcon(QIcon(I('forward.png')))
b.clicked.connect(partial(self.view.next_change, 1))
b.setToolTip(_('Go to next change') + ' [n]')
b.setText(_('&Next change')), b.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
l.addWidget(b, r, 1)
self.search = s = HistoryLineEdit2(self)
s.initialize('diff_search_history')
l.addWidget(s, r, 2)
s.setPlaceholderText(_('Search for text'))
s.returnPressed.connect(partial(self.do_search, False))
self.sbn = b = QToolButton(self)
b.setIcon(QIcon(I('arrow-down.png')))
b.clicked.connect(partial(self.do_search, False))
b.setToolTip(_('Find next match'))
b.setText(_('Next &match')), b.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
l.addWidget(b, r, 3)
self.sbp = b = QToolButton(self)
b.setIcon(QIcon(I('arrow-up.png')))
b.clicked.connect(partial(self.do_search, True))
b.setToolTip(_('Find previous match'))
b.setText(_('P&revious match')), b.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
l.addWidget(b, r, 4)
self.lb = b = QRadioButton(_('Left panel'), self)
b.setToolTip(_('Perform search in the left panel'))
l.addWidget(b, r, 5)
self.rb = b = QRadioButton(_('Right panel'), self)
b.setToolTip(_('Perform search in the right panel'))
l.addWidget(b, r, 6)
b.setChecked(True)
self.pb = b = QToolButton(self)
b.setIcon(QIcon(I('config.png')))
b.setText(_('&Options')), b.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
b.setToolTip(_('Change how the differences are displayed'))
b.setPopupMode(b.InstantPopup)
m = QMenu(b)
b.setMenu(m)
cm = self.cm = QMenu(_('Lines of context around each change'))
for i in (3, 5, 10, 50):
cm.addAction(_('Show %d lines of context') % i, partial(self.change_context, i))
cm.addAction(_('Show all text'), partial(self.change_context, None))
self.beautify_action = m.addAction('', self.toggle_beautify)
self.set_beautify_action_text()
m.addMenu(cm)
l.addWidget(b, r, 7)
self.hl = QHBoxLayout()
l.addLayout(self.hl, l.rowCount(), 0, 1, -1)
self.names = QLabel('')
self.hl.addWidget(self.names, r)
self.bb.setStandardButtons(self.bb.Close)<|fim▁hole|> b.clicked.connect(self.reject)
self.bb.button(self.bb.Close).setDefault(True)
self.hl.addWidget(self.bb, r)
self.view.setFocus(Qt.OtherFocusReason)
def break_cycles(self):
self.view = None
for x in ('revert_requested', 'line_activated'):
try:
getattr(self, x).disconnect()
except:
pass
def do_search(self, reverse):
text = unicode(self.search.text())
if not text.strip():
return
v = self.view.view.left if self.lb.isChecked() else self.view.view.right
v.search(text, reverse=reverse)
def change_context(self, context):
if context == self.context:
return
self.context = context
self.refresh()
def refresh(self):
with self:
self.view.clear()
for args, kwargs in self.apply_diff_calls:
kwargs['context'] = self.context
kwargs['beautify'] = self.beautify
self.view.add_diff(*args, **kwargs)
self.view.finalize()
def toggle_beautify(self):
self.beautify = not self.beautify
self.set_beautify_action_text()
self.refresh()
def set_beautify_action_text(self):
self.beautify_action.setText(
_('Beautify files before comparing them') if not self.beautify else
_('Do not beautify files before comparing'))
def __enter__(self):
self.stacks.setCurrentIndex(0)
self.busy.pi.startAnimation()
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
QApplication.processEvents(QEventLoop.ExcludeUserInputEvents | QEventLoop.ExcludeSocketNotifiers)
def __exit__(self, *args):
self.busy.pi.stopAnimation()
self.stacks.setCurrentIndex(1)
QApplication.restoreOverrideCursor()
def set_names(self, names):
if isinstance(names, tuple):
self.names.setText('%s <--> %s' % names)
else:
self.names.setText('')
def ebook_diff(self, path1, path2, names=None):
self.set_names(names)
with self:
identical = self.apply_diff(_('The books are identical'), *ebook_diff(path1, path2))
self.view.finalize()
if identical:
self.reject()
def container_diff(self, left, right, identical_msg=None, names=None):
self.set_names(names)
with self:
identical = self.apply_diff(identical_msg or _('No changes found'), *container_diff(left, right))
self.view.finalize()
if identical:
self.reject()
def file_diff(self, left, right, identical_msg=None):
with self:
identical = self.apply_diff(identical_msg or _('The files are identical'), *file_diff(left, right))
self.view.finalize()
if identical:
self.reject()
def dir_diff(self, left, right, identical_msg=None):
with self:
identical = self.apply_diff(identical_msg or _('The directories are identical'), *dir_diff(left, right))
self.view.finalize()
if identical:
self.reject()
def apply_diff(self, identical_msg, cache, syntax_map, changed_names, renamed_names, removed_names, added_names):
self.view.clear()
self.apply_diff_calls = calls = []
def add(args, kwargs):
self.view.add_diff(*args, **kwargs)
calls.append((args, kwargs))
if len(changed_names) + len(renamed_names) + len(removed_names) + len(added_names) < 1:
info_dialog(self, _('No changes found'), identical_msg, show=True)
return True
kwargs = lambda name: {'context':self.context, 'beautify':self.beautify, 'syntax':syntax_map.get(name, None)}
if isinstance(changed_names, dict):
for name, other_name in sorted(changed_names.iteritems(), key=lambda x:numeric_sort_key(x[0])):
args = (name, other_name, cache.left(name), cache.right(other_name))
add(args, kwargs(name))
else:
for name in sorted(changed_names, key=numeric_sort_key):
args = (name, name, cache.left(name), cache.right(name))
add(args, kwargs(name))
for name in sorted(added_names, key=numeric_sort_key):
args = (_('[%s was added]') % name, name, None, cache.right(name))
add(args, kwargs(name))
for name in sorted(removed_names, key=numeric_sort_key):
args = (name, _('[%s was removed]') % name, cache.left(name), None)
add(args, kwargs(name))
for name, new_name in sorted(renamed_names.iteritems(), key=lambda x:numeric_sort_key(x[0])):
args = (name, new_name, None, None)
add(args, kwargs(name))
def keyPressEvent(self, ev):
if not self.view.handle_key(ev):
if ev.key() in (Qt.Key_Enter, Qt.Key_Return):
return # The enter key is used by the search box, so prevent it closing the dialog
if ev.key() == Qt.Key_Slash:
return self.search.setFocus(Qt.OtherFocusReason)
if ev.matches(QKeySequence.Copy):
text = self.view.view.left.selected_text + self.view.view.right.selected_text
if text:
QApplication.clipboard().setText(text)
return
if ev.matches(QKeySequence.FindNext):
self.sbn.click()
return
if ev.matches(QKeySequence.FindPrevious):
self.sbp.click()
return
return Dialog.keyPressEvent(self, ev)
def compare_books(path1, path2, revert_msg=None, revert_callback=None, parent=None, names=None):
d = Diff(parent=parent, revert_button_msg=revert_msg)
if revert_msg is not None:
d.revert_requested.connect(revert_callback)
QTimer.singleShot(0, partial(d.ebook_diff, path1, path2, names=names))
d.exec_()
try:
d.revert_requested.disconnect()
except:
pass
d.break_cycles()
def main(args=sys.argv):
from calibre.gui2 import Application
left, right = args[-2:]
ext1, ext2 = left.rpartition('.')[-1].lower(), right.rpartition('.')[-1].lower()
if ext1.startswith('original_'):
ext1 = ext1.partition('_')[-1]
if ext2.startswith('original_'):
ext2 = ext2.partition('_')[-2]
if os.path.isdir(left):
attr = 'dir_diff'
elif (ext1, ext2) in {('epub', 'epub'), ('azw3', 'azw3')}:
attr = 'ebook_diff'
else:
attr = 'file_diff'
app = Application([]) # noqa
d = Diff(show_as_window=True)
func = getattr(d, attr)
QTimer.singleShot(0, lambda : func(left, right))
d.exec_()
return 0
if __name__ == '__main__':
main()<|fim▁end|> | if self.revert_button_msg is not None:
self.rvb = b = self.bb.addButton(self.revert_button_msg, self.bb.ActionRole)
b.setIcon(QIcon(I('edit-undo.png'))), b.setAutoDefault(False)
b.clicked.connect(self.revert_requested) |
<|file_name|>parsers.py<|end_file_name|><|fim▁begin|>from copy import deepcopy
from django.test import TestCase
from django.core.exceptions import ValidationError
from django.core import management
from avocado.query import oldparsers as parsers
from avocado.models import DataConcept, DataField, DataConceptField
from ....models import Employee
class DataContextParserTestCase(TestCase):
fixtures = ['employee_data.json']
def setUp(self):
management.call_command('avocado', 'init', 'tests', quiet=True)
def test_valid(self):
title = DataField.objects.get_by_natural_key('tests.title.name')
# Single by id (deprecated)
attrs = {
'id': title.pk,
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {'value': 'CEO', 'label': 'CEO'},
'language': 'Name is CEO'
}
self.assertEqual(
parsers.datacontext.validate(deepcopy(attrs), tree=Employee),
attrs)
# Single by dotted label
attrs = {
'field': 'tests.title.name',
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {'value': 'CEO', 'label': 'CEO'},
'language': 'Name is CEO'
}
self.assertEqual(
parsers.datacontext.validate(deepcopy(attrs), tree=Employee),
attrs)
# Single by label list
attrs = {
'field': ['tests', 'title', 'name'],
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {'value': 'CEO', 'label': 'CEO'},
'language': 'Name is CEO'
}
self.assertEqual(
parsers.datacontext.validate(deepcopy(attrs), tree=Employee),
attrs)
# Single by field
attrs = {
'field': title.pk,
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {'value': 'CEO', 'label': 'CEO'},
'language': 'Name is CEO'
}
self.assertEqual(
parsers.datacontext.validate(deepcopy(attrs), tree=Employee),
attrs)
# Branch node
attrs = {
'type': 'and',
'children': [{
'field': 'tests.title.name',
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {'value': 'CEO', 'label': 'CEO'},
'language': 'Name is CEO'
}, {
'field': 'tests.employee.first_name',
'operator': 'exact',
'value': 'John',
'cleaned_value': {'value': 'John', 'label': 'John'},
'language': 'First Name is John'
}],
}
self.assertEqual(
parsers.datacontext.validate(deepcopy(attrs), tree=Employee),
attrs)
# No children
attrs = {
'type': 'and',
'children': [],
}
self.assertEqual(
parsers.datacontext.validate(deepcopy(attrs), tree=Employee),
attrs)
# 1 child
attrs = {
'type': 'and',
'children': [{
'field': 'tests.title.name',
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {'value': 'CEO', 'label': 'CEO'},
'language': 'Name is CEO'
}]
}
self.assertEqual(
parsers.datacontext.validate(deepcopy(attrs), tree=Employee),
attrs)
def test_invalid(self):
# Non-existent data field
attrs = parsers.datacontext.validate({
'field': 999,
'operator': 'exact',
'value': 'CEO'
})
self.assertFalse(attrs['enabled'])
# Object must be a dict
self.assertRaises(ValidationError, parsers.datacontext.validate, 1)
# Invalid logical operator
attrs = parsers.datacontext.validate({'type': 'foo', 'children': []})
self.assertFalse(attrs['enabled'])
# Missing 'value' key in first condition
attrs = parsers.datacontext.validate({
'type': 'and',
'children': [{
'field': 'tests.title.name',
'operator': 'exact'
}, {
'field': 'tests.title.name',
'operator': 'exact',
'value': 'CEO'
}]
}, tree=Employee)
self.assertTrue(attrs.get('enabled', True))
self.assertFalse(attrs['children'][0]['enabled'])
self.assertTrue(attrs['children'][1].get('enabled', True))
def test_field_for_concept(self):
f = DataField.objects.get(model_name='title', field_name='name')
c1 = DataConcept()
c2 = DataConcept()
c1.save()
c2.save()
cf = DataConceptField(concept=c1, field=f)
cf.save()
attrs = {
'concept': c1.pk,
'field': f.pk,
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {'value': 'CEO', 'label': 'CEO'},
'language': 'Name is CEO'
}
self.assertEqual(
parsers.datacontext.validate(deepcopy(attrs), tree=Employee),
attrs)
# Invalid concept
attrs = parsers.datacontext.validate({
'concept': c2.pk,
'field': f.pk,
'operator': 'exact',
'value': 'CEO',
}, tree=Employee)
self.assertFalse(attrs['enabled'])
def test_parsed_node(self):
node = parsers.datacontext.parse({
'type': 'and',
'children': [],
}, tree=Employee)
# No condition has been defined..
self.assertEqual(node.condition, None)
node = parsers.datacontext.parse({
'type': 'and',
'children': [{
'field': 'tests.title.name',
'operator': 'exact',
'value': 'CEO',
}]
}, tree=Employee)
# Only the one condition is represented
self.assertEqual(str(node.condition),
"(AND: ('title__name__exact', u'CEO'))")
def test_apply(self):
f = DataField.objects.get_by_natural_key('tests',
'title',
'boss')
f1 = DataField.objects.get_by_natural_key('tests',
'employee',
'first_name')
node = parsers.datacontext.parse({
'field': 'tests.title.boss',
'operator': 'exact',
'value': True
}, tree=Employee)
self.assertEqual(
unicode(node.apply().values('id').query).replace(' ', ''),
'SELECT DISTINCT "tests_employee"."id" FROM "tests_employee" '
'INNER JOIN "tests_title" ON ("tests_employee"."title_id" = '
'"tests_title"."id") WHERE "tests_title"."boss" = True '
.replace(' ', ''))
self.assertEqual(node.language, {
'operator': 'exact',
'language': u'Boss is True',
'field': f.pk,
'value': True
})
# Branch node
node = parsers.datacontext.parse({
'type': 'and',
'children': [{
'field': 'tests.title.boss',
'operator': 'exact',
'value': True,
}, {
'field': 'tests.employee.first_name',
'operator': 'exact',
'value': 'John',
}]
}, tree=Employee)
self.assertEqual(
unicode(node.apply().values('id').query).replace(' ', ''),
'SELECT DISTINCT "tests_employee"."id" FROM "tests_employee" '<|fim▁hole|> 'AND "tests_title"."boss" = True )'.replace(' ', ''))
self.assertEqual(node.language, {
'type': 'and',
'children': [{
'field': f.pk,
'operator': 'exact',
'value': True,
'language': 'Boss is True',
}, {
'field': f1.pk,
'operator': 'exact',
'value': 'John',
'language': 'First Name is John',
}]
})
class DataViewParserTestCase(TestCase):
fixtures = ['tests/fixtures/employee_data.json']
def setUp(self):
management.call_command('avocado', 'init', 'tests', publish=False,
concepts=False, quiet=True)
f1 = DataField.objects.get_by_natural_key('tests',
'employee',
'first_name')
f2 = DataField.objects.get_by_natural_key('tests',
'employee',
'last_name')
self.c = DataConcept()
self.c.save()
DataConceptField(concept=self.c, field=f1).save()
DataConceptField(concept=self.c, field=f2).save()
def test_valid(self):
# Single by id
self.assertEqual(parsers.dataview.validate([{
'concept': self.c.pk
}], tree=Employee), [{
'concept': self.c.pk,
}])
self.assertEqual(parsers.dataview.validate([{
'concept': self.c.pk,
'sort': 'desc',
}], tree=Employee), [{
'concept': self.c.pk,
'sort': 'desc',
}])
def test_valid_legacy(self):
# Single by id
self.assertEqual(parsers.dataview.validate({
'columns': [self.c.pk],
}, tree=Employee), [{
'concept': self.c.pk,
'visible': True,
'sort': None,
'sort_index': None,
}])
self.assertEqual(parsers.dataview.validate({
'ordering': [(self.c.pk, 'desc')],
}, tree=Employee), [{
'concept': self.c.pk,
'visible': False,
'sort': 'desc',
'sort_index': 0,
}])
def test_invalid(self):
# Non-existent data field
facets = parsers.dataview.validate({'columns': [999]})
self.assertFalse(facets[0]['enabled'])
self.assertTrue(facets[0]['errors'])
# Invalid ordering
facets = parsers.dataview.validate([{
'concept': self.c.pk,
'sort': 'foo',
}])
self.assertTrue(facets[0]['warnings'])
def test_apply(self):
node = parsers.dataview.parse([{
'concept': self.c.pk,
}], tree=Employee)
self.assertEqual(
unicode(node.apply().query).replace(' ', ''),
'SELECT "tests_employee"."id", "tests_employee"."first_name", '
'"tests_employee"."last_name" FROM "tests_employee"'
.replace(' ', ''))
node = parsers.dataview.parse([{
'concept': self.c.pk,
'sort': 'desc',
'visible': False,
}], tree=Employee)
self.assertEqual(
unicode(node.apply().query).replace(' ', ''),
'SELECT "tests_employee"."id" FROM "tests_employee" '
'ORDER BY "tests_employee"."first_name" DESC, '
'"tests_employee"."last_name" DESC'
.replace(' ', ''))
def test_apply_distinct(self):
node = parsers.dataview.parse([{
'concept': self.c.pk,
}], tree=Employee)
self.assertEqual(
unicode(node.apply(Employee.objects.distinct()).query)
.replace(' ', ''),
'SELECT DISTINCT "tests_employee"."id", '
'"tests_employee"."first_name", '
'"tests_employee"."last_name" FROM "tests_employee"'
.replace(' ', ''))
def test_implicit_apply_distinct(self):
f1 = DataField.objects.get_by_natural_key('tests',
'office',
'location')
f2 = DataField.objects.get_by_natural_key('tests',
'title',
'name')
c = DataConcept()
c.save()
DataConceptField(concept=c, field=f1).save()
DataConceptField(concept=c, field=f2).save()
# Due to the use of distinct, the concept fields appear in the SELECT
# statement at this point. This is not a bug, but a requirement of SQL.
# These columns are stripped downstream by the exporter.
node = parsers.dataview.parse([{
'concept': c.pk,
'sort': 'desc',
'visible': False,
}], tree=Employee)
self.assertEqual(
unicode(node.apply(Employee.objects.distinct()).query)
.replace(' ', ''),
'SELECT DISTINCT "tests_employee"."id", '
'"tests_office"."location", "tests_title"."name" FROM '
'"tests_employee" INNER JOIN "tests_office" ON '
'("tests_employee"."office_id" = "tests_office"."id") LEFT OUTER '
'JOIN "tests_title" ON ("tests_employee"."title_id" = '
'"tests_title"."id") ORDER BY "tests_office"."location" DESC, '
'"tests_title"."name" DESC'
.replace(' ', ''))
class DataQueryParserTestCase(TestCase):
fixtures = ['employee_data.json']
def setUp(self):
management.call_command('avocado', 'init', 'tests', publish=False,
concepts=False, quiet=True)
f1 = DataField.objects.get_by_natural_key('tests',
'employee',
'first_name')
f2 = DataField.objects.get_by_natural_key('tests',
'employee',
'last_name')
self.c = DataConcept()
self.c.save()
DataConceptField(concept=self.c, field=f1).save()
DataConceptField(concept=self.c, field=f2).save()
def test_valid(self):
self.assertEqual(parsers.dataquery.validate({}, tree=Employee), None)
attrs = {
'context': {
'field': 'tests.title.name',
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {'value': 'CEO', 'label': 'CEO'},
'language': 'Name is CEO'
},
'view': [{
'concept': self.c.pk,
}],
}
exp_attrs = deepcopy(attrs)
self.assertEqual(parsers.dataquery.validate(attrs, tree=Employee),
exp_attrs)
# Only the context
attrs = {
'context': {
'field': 'tests.title.name',
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {
'value': 'CEO',
'label': 'CEO',
},
'language': 'Name is CEO'
}
}
exp_attrs = deepcopy(attrs)
exp_attrs['view'] = None
self.assertEqual(parsers.dataquery.validate(attrs, tree=Employee),
exp_attrs)
# Only the view
attrs = {
'view': [{
'concept': self.c.pk,
'visible': False,
'sort': 'desc',
}]
}
exp_attrs = {
'context': None,
'view': [{
'visible': False,
'concept': self.c.pk,
'sort': 'desc',
}],
}
self.assertEqual(parsers.dataquery.validate(attrs, tree=Employee),
exp_attrs)
def test_parsed_node(self):
# Make sure no context or view subnodes are created
node = parsers.dataquery.parse({}, tree=Employee)
self.assertEqual(node.datacontext_node, None)
self.assertEqual(node.dataview_node, None)
node = parsers.dataquery.parse({
'context': {
'type': 'and',
'children': [],
}
}, tree=Employee)
# No condition has been defined..
self.assertEqual(node.datacontext_node.condition, None)
node = parsers.dataquery.parse({
'context': {
'type': 'and',
'children': [{
'field': 'tests.title.name',
'operator': 'exact',
'value': 'CEO',
}]
}
}, tree=Employee)
# Only the one condition is represented
self.assertEqual(str(node.datacontext_node.condition),
"(AND: ('title__name__exact', u'CEO'))")
def test_apply(self):
node = parsers.dataquery.parse({
'context': {
'field': 'tests.title.boss',
'operator': 'exact',
'value': True
},
'view': [{
'concept': self.c.pk,
}],
}, tree=Employee)
self.assertEqual(
unicode(node.apply().query).replace(' ', ''),
'SELECT DISTINCT "tests_employee"."id", '
'"tests_employee"."first_name", "tests_employee"."last_name" FROM '
'"tests_employee" INNER JOIN "tests_title" ON '
'("tests_employee"."title_id" = "tests_title"."id") '
'WHERE "tests_title"."boss" = True '
.replace(' ', ''))
# Just the view
node = parsers.dataquery.parse({
'view': [{
'concept': self.c.pk,
'sort': 'desc',
}]
}, tree=Employee)
self.assertEqual(
unicode(node.apply().query).replace(' ', ''),
'SELECT DISTINCT "tests_employee"."id", '
'"tests_employee"."first_name", '
'"tests_employee"."last_name" FROM "tests_employee" '
'ORDER BY "tests_employee"."first_name" DESC, '
'"tests_employee"."last_name" DESC'.replace(' ', ''))
# Just the context
node = parsers.dataquery.parse({
'context': {
'field': 'tests.title.boss',
'operator': 'exact',
'value': True
}
}, tree=Employee)
self.assertEqual(
unicode(node.apply().values('id').query).replace(' ', ''),
'SELECT DISTINCT "tests_employee"."id" FROM "tests_employee" '
'INNER JOIN "tests_title" ON ("tests_employee"."title_id" = '
'"tests_title"."id") WHERE "tests_title"."boss" = True '
.replace(' ', ''))
f = DataField.objects.get_by_natural_key('tests', 'title', 'boss')
self.assertEqual(node.datacontext_node.language, {
'operator': 'exact',
'language': u'Boss is True',
'field': f.pk,
'value': True
})<|fim▁end|> | 'INNER JOIN "tests_title" ON ("tests_employee"."title_id" = '
'"tests_title"."id") WHERE ("tests_employee"."first_name" = John ' |
<|file_name|>decoder.rs<|end_file_name|><|fim▁begin|>//! Implements the rustc_serialize::Decoder trait
use std;
use rustc_serialize::{Decoder,Decodable};
use types::{BasicValue,Value};
#[derive(Debug,PartialEq)]
pub enum DecodeError {
BadSignature,
NotSupported,
IntTooNarrow
}
pub struct DBusDecoder {
value: Value,
map_val: Option<Value>
}
impl DBusDecoder {
fn get_unsigned_int (v: &BasicValue) -> Result<u64,DecodeError> {
let val = match v {
&BasicValue::Byte(x) => x as u64,
&BasicValue::Uint16(x) => x as u64,
&BasicValue::Uint32(x) => x as u64,
&BasicValue::Uint64(x) => x,
_ => return Err(DecodeError::BadSignature)
};
Ok(val)
}
fn get_signed_int (v: &BasicValue) -> Result<i64,DecodeError> {
let val = match v {
&BasicValue::Int16(x) => x as i64,
&BasicValue::Int32(x) => x as i64,
&BasicValue::Int64(x) => x as i64,
_ => return Err(DecodeError::BadSignature)
};
Ok(val)
}
fn read_unsigned_int (v: &Value, max: usize) -> Result<u64,DecodeError> {
let basic_val = match v {
&Value::BasicValue(ref x) => x,
_ => return Err(DecodeError::BadSignature)
};
// Make sure the value will fit
let x = try!(DBusDecoder::get_unsigned_int(basic_val));
if x > (max as u64) {
return Err(DecodeError::IntTooNarrow);
}
Ok(x)
}
fn read_signed_int (v: &Value, max: isize, min: isize) -> Result<i64,DecodeError> {
let basic_val = match v {
&Value::BasicValue(ref x) => x,
_ => return Err(DecodeError::BadSignature)
};
let x = try!(DBusDecoder::get_signed_int(basic_val));
// Make sure the value will fit
if x > (max as i64) {
return Err(DecodeError::IntTooNarrow);
}
if x < (min as i64) {
return Err(DecodeError::IntTooNarrow);
}
Ok(x)
}
pub fn new (v: Value) -> DBusDecoder {
DBusDecoder{
value: v,
map_val: None
}
}
pub fn decode<T: Decodable>(v: Value) -> Result<T,DecodeError> {
let mut decoder = DBusDecoder::new(v);
T::decode(&mut decoder)
}
}
impl Decoder for DBusDecoder {
type Error = DecodeError;
fn read_usize(&mut self) -> Result<usize, Self::Error> {
let basic_val = match &self.value {
&Value::BasicValue(ref x) => x,
_ => return Err(DecodeError::BadSignature)
};
let x = try!(DBusDecoder::get_unsigned_int(basic_val));
Ok(x as usize)
}
fn read_u64(&mut self) -> Result<u64, Self::Error> {
let val = try!(self.read_usize());
Ok(val as u64)
}
fn read_u32(&mut self) -> Result<u32, Self::Error> {
Ok(try!(DBusDecoder::read_unsigned_int(&self.value, std::u32::MAX as usize)) as u32)
}
fn read_u16(&mut self) -> Result<u16, Self::Error> {
Ok(try!(DBusDecoder::read_unsigned_int(&self.value, std::u16::MAX as usize)) as u16)
}
fn read_u8(&mut self) -> Result<u8, Self::Error> {
Ok(try!(DBusDecoder::read_unsigned_int(&self.value, std::u8::MAX as usize)) as u8)
}
fn read_isize(&mut self) -> Result<isize, Self::Error> {
let basic_val = match &self.value {
&Value::BasicValue(ref x) => x,
_ => return Err(DecodeError::BadSignature)
};
let x = try!(DBusDecoder::get_signed_int(basic_val));
Ok(x as isize)
}
fn read_i64(&mut self) -> Result<i64, Self::Error> {
let val = try!(self.read_isize());
Ok(val as i64)
}
fn read_i32(&mut self) -> Result<i32, Self::Error> {
Ok(try!(DBusDecoder::read_signed_int(&self.value, std::i32::MAX as isize, std::i32::MIN as isize)) as i32)
}
fn read_i16(&mut self) -> Result<i16, Self::Error> {
Ok(try!(DBusDecoder::read_signed_int(&self.value, std::i16::MAX as isize, std::i16::MIN as isize)) as i16)
}
fn read_i8(&mut self) -> Result<i8, Self::Error> {
Ok(try!(DBusDecoder::read_signed_int(&self.value, std::i8::MAX as isize, std::i8::MIN as isize)) as i8)
}
fn read_bool(&mut self) -> Result<bool, Self::Error> {
let basic_val = match &self.value {
&Value::BasicValue(ref x) => x,
_ => return Err(DecodeError::BadSignature)
};
let x = match basic_val {
&BasicValue::Boolean(x) => x,
_ => return Err(DecodeError::BadSignature)
};
Ok(x)
}
fn read_f64(&mut self) -> Result<f64, Self::Error> {
match &self.value {
&Value::Double(x) => Ok(x),
_ => return Err(DecodeError::BadSignature)
}
}
fn read_char(&mut self) -> Result<char, Self::Error> {
let val = try!(self.read_u8());
Ok(val as char)
}
fn read_str(&mut self) -> Result<String, Self::Error> {
let basic_val = match &self.value {
&Value::BasicValue(ref x) => x,
_ => return Err(DecodeError::BadSignature)
};
let x = match basic_val {
&BasicValue::String(ref x) => x.to_string(),
&BasicValue::ObjectPath(ref x) => x.0.to_string(),
&BasicValue::Signature(ref x) => x.0.to_string(),
_ => return Err(DecodeError::BadSignature)
};
Ok(x)
}
fn read_seq<T, F>(&mut self, f: F) -> Result<T, Self::Error> where F: FnOnce(&mut Self, usize) -> Result<T, Self::Error> {
let len = match self.value {
Value::Array(ref x) => x.objects.len(),
_ => return Err(DecodeError::BadSignature)
};
f(self, len)
}
fn read_seq_elt<T, F>(&mut self, idx: usize, f: F) -> Result<T, Self::Error> where F: FnOnce(&mut Self) -> Result<T, Self::Error> {
let val = match self.value {
Value::Array(ref mut x) => {
x.objects.push(Value::BasicValue(BasicValue::Byte(0)));
x.objects.swap_remove(idx)
},
_ => return Err(DecodeError::BadSignature)
};
let mut subdecoder = DBusDecoder::new(val);
f(&mut subdecoder)
}
fn read_map<T, F>(&mut self, f: F) -> Result<T, Self::Error> where F: FnOnce(&mut Self, usize) -> Result<T, Self::Error> {
let len = match self.value {
Value::Dictionary(ref x) => x.map.keys().len(),
_ => return Err(DecodeError::BadSignature)
};
f(self, len)
}
fn read_map_elt_key<T, F>(&mut self, _idx: usize, f: F) -> Result<T, Self::Error> where F: FnOnce(&mut Self) -> Result<T, Self::Error> {
let dict = match self.value {
Value::Dictionary(ref mut x) => x,
_ => return Err(DecodeError::BadSignature)
};
let key = {
dict.map.keys().next().unwrap().clone()
};
self.map_val = Some(dict.map.remove(&key).unwrap());
let mut subdecoder = DBusDecoder::new(Value::BasicValue(key));
f(&mut subdecoder)
}
fn read_map_elt_val<T, F>(&mut self, _idx: usize, f: F) -> Result<T, Self::Error> where F: FnOnce(&mut Self) -> Result<T, Self::Error> {
let val = self.map_val.take().unwrap();
let mut subdecoder = DBusDecoder::new(val);
f(&mut subdecoder)
}
fn read_struct<T, F>(&mut self, _s_name: &str, _len: usize, f: F) -> Result<T, Self::Error> where F: FnOnce(&mut Self) -> Result<T, Self::Error> {
match self.value {
Value::Struct(_) => (),
_ => return Err(DecodeError::BadSignature)
};
f(self)
}
fn read_struct_field<T, F>(&mut self, _f_name: &str, f_idx: usize, f: F) -> Result<T, Self::Error> where F: FnOnce(&mut Self) -> Result<T, Self::Error> {
let val = match self.value {
Value::Struct(ref mut x) => {
x.objects.push(Value::BasicValue(BasicValue::Byte(0)));
x.objects.swap_remove(f_idx)
},
_ => return Err(DecodeError::BadSignature)
};
let mut subdecoder = DBusDecoder::new(val);
f(&mut subdecoder)
}
fn read_enum<T, F>(&mut self, _name: &str, _f: F) -> Result<T, Self::Error> where F: FnOnce(&mut Self) -> Result<T, Self::Error> {
Err(DecodeError::NotSupported)
}
fn read_enum_variant<T, F>(&mut self, _names: &[&str], _f: F) -> Result<T, Self::Error> where F: FnMut(&mut Self, usize) -> Result<T, Self::Error> {
Err(DecodeError::NotSupported)
}
fn read_enum_variant_arg<T, F>(&mut self, _a_idx: usize, _f: F) -> Result<T, Self::Error> where F: FnOnce(&mut Self) -> Result<T, Self::Error> {
Err(DecodeError::NotSupported)
}
fn read_enum_struct_variant<T, F>(&mut self, _names: &[&str], _f: F) -> Result<T, Self::Error> where F: FnMut(&mut Self, usize) -> Result<T, Self::Error> {
Err(DecodeError::NotSupported)
}
fn read_enum_struct_variant_field<T, F>(&mut self, _f_name: &str, _f_idx: usize, _f: F) -> Result<T, Self::Error> where F: FnOnce(&mut Self) -> Result<T, Self::Error> {
Err(DecodeError::NotSupported)
}
fn read_tuple<T, F>(&mut self, _len: usize, _f: F) -> Result<T, Self::Error> where F: FnOnce(&mut Self) -> Result<T, Self::Error> {
Err(DecodeError::NotSupported)
}
fn read_tuple_arg<T, F>(&mut self, _a_idx: usize, _f: F) -> Result<T, Self::Error> where F: FnOnce(&mut Self) -> Result<T, Self::Error> {
Err(DecodeError::NotSupported)
}
fn read_tuple_struct<T, F>(&mut self, _s_name: &str, _len: usize, _f: F) -> Result<T, Self::Error> where F: FnOnce(&mut Self) -> Result<T, Self::Error> {
Err(DecodeError::NotSupported)
}
fn read_tuple_struct_arg<T, F>(&mut self, _a_idx: usize, _f: F) -> Result<T, Self::Error> where F: FnOnce(&mut Self) -> Result<T, Self::Error> {<|fim▁hole|> }
fn read_nil(&mut self) -> Result<(), Self::Error> {
Err(DecodeError::NotSupported)
}
fn read_f32(&mut self) -> Result<f32, Self::Error> {
Err(DecodeError::NotSupported)
}
fn error(&mut self, _err: &str) -> Self::Error {
DecodeError::NotSupported
}
}
#[cfg(test)]
mod test {
use rustc_serialize::{Decoder,Decodable};
use types::{BasicValue,Value,Path,Struct,Signature,Array};
use decoder::*;
#[test]
fn test_array () {
let vec = vec![
Value::BasicValue(BasicValue::Uint32(1)),
Value::BasicValue(BasicValue::Uint32(2)),
Value::BasicValue(BasicValue::Uint32(3)),
];
let val = Value::Array(Array::new(vec));
let arr : Vec<u32> = DBusDecoder::decode(val).ok().unwrap();
assert_eq!(vec![1,2,3], arr);
}
#[test]
fn test_int () {
let v = Value::BasicValue(BasicValue::Uint32(1024));
let i : u32 = DBusDecoder::decode(v).ok().unwrap();
assert_eq!(i, 1024);
let x = Value::BasicValue(BasicValue::Uint32(1024));
let err = DBusDecoder::decode::<u8>(x).err().unwrap();
assert_eq!(err, DecodeError::IntTooNarrow);
}
#[test]
fn test_string () {
let v = Value::BasicValue(BasicValue::String("foo".to_string()));
let i : String = DBusDecoder::decode(v).ok().unwrap();
assert_eq!(i, "foo");
let v = Value::BasicValue(BasicValue::Signature(Signature("foo".to_string())));
let i : String = DBusDecoder::decode(v).ok().unwrap();
assert_eq!(i, "foo");
let v = Value::BasicValue(BasicValue::ObjectPath(Path("foo".to_string())));
let i : String = DBusDecoder::decode(v).ok().unwrap();
assert_eq!(i, "foo");
}
#[derive(PartialEq,Debug)]
struct TestStruct {
foo: u8,
bar: u32,
baz: String,
}
impl Decodable for TestStruct {
fn decode<S: Decoder>(s: &mut S) -> Result<Self, S::Error> {
s.read_struct("TestStruct", 3, |s: &mut S| {
let foo = try!(s.read_struct_field("foo", 0, |s: &mut S| {
s.read_u8()
}));
let bar = try!(s.read_struct_field("bar", 1, |s: &mut S| {
s.read_u32()
}));
let baz = try!(s.read_struct_field("baz", 2, |s: &mut S| {
s.read_str()
}));
Ok(TestStruct {
foo: foo,
bar: bar,
baz: baz
})
})
}
}
#[test]
fn test_struct () {
let objects = vec![
Value::BasicValue(BasicValue::Byte(1)),
Value::BasicValue(BasicValue::Uint32(10)),
Value::BasicValue(BasicValue::String("baz".to_string()))
];
let s = Struct {
objects: objects,
signature: Signature("(yus)".to_string())
};
let v = Value::Struct(s);
let x : TestStruct = DBusDecoder::decode(v).unwrap();
assert_eq!(x, TestStruct {
foo: 1,
bar: 10,
baz: "baz".to_string()
});
}
}<|fim▁end|> | Err(DecodeError::NotSupported)
}
fn read_option<T, F>(&mut self, _f: F) -> Result<T, Self::Error> where F: FnMut(&mut Self, bool) -> Result<T, Self::Error> {
Err(DecodeError::NotSupported) |
<|file_name|>GeometryGather.cpp<|end_file_name|><|fim▁begin|>/*
* This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not
* distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
#include "CarbonEngine/Common.h"
#include "CarbonEngine/Core/CoreEvents.h"
#include "CarbonEngine/Core/EventManager.h"
#include "CarbonEngine/Globals.h"
#include "CarbonEngine/Render/EffectManager.h"
#include "CarbonEngine/Render/EffectQueue.h"
#include "CarbonEngine/Render/EffectQueueArray.h"
#include "CarbonEngine/Render/Font.h"
#include "CarbonEngine/Render/GeometryChunk.h"
#include "CarbonEngine/Scene/GeometryGather.h"
#include "CarbonEngine/Scene/Material.h"
#include "CarbonEngine/Scene/MaterialManager.h"
namespace Carbon
{
// Immediate triangles that get gathered are rendered by putting their geometry into the following geometry chunk and
// appending a draw item to it. This geometry chunk has its draw items cleared at the start of every frame which means
// it will grow in size as needed and never shrink back down, thus avoiding unnecessary allocations.
static auto immediateTriangleGeometry = GeometryChunk();
static auto immediateTriangleCount = 0U;
static bool onFrameBeginEvent(const FrameBeginEvent& e)
{
immediateTriangleGeometry.clearDrawItems();
immediateTriangleCount = 0;
return true;
}
CARBON_REGISTER_EVENT_HANDLER_FUNCTION(FrameBeginEvent, onFrameBeginEvent)
static void clearImmediateTriangleGeometry()
{
immediateTriangleGeometry.clear();
}
CARBON_REGISTER_SHUTDOWN_FUNCTION(clearImmediateTriangleGeometry, 0)
GeometryGather::GeometryGather(const Vec3& cameraPosition, const ConvexHull& frustum, bool isShadowGeometryGather,
EffectQueueArray& queues)
: cameraPosition_(cameraPosition),
frustum_(frustum),
isShadowGeometryGather_(isShadowGeometryGather),
scale_(Vec3::One),
queues_(queues)
{
materialQueueInfos_.reserve(1024);
}
GeometryGather::~GeometryGather()
{
// Now that the gather has completed it is important to unlock the vertex data so it has an opportunity to be
// uploaded to the graphics interface
immediateTriangleGeometry.unlockVertexData();
}
void GeometryGather::changeMaterial(const String& material, const ParameterArray& materialOverrideParameters)
{
currentQueue_ = nullptr;
if (materialOverrideParameters.empty())<|fim▁hole|> for (auto& q : materialQueueInfos_)
{
if (q.queue->getPriority() == currentPriority_ && q.material == material && !q.queue->hasCustomParams() &&
!q.queue->getInternalParams().size())
{
currentQueue_ = &q;
currentQueue_->isTransformCurrent = false;
return;
}
}
}
// No existing material queue can be used, so create a new one
newMaterial(&materials().getMaterial(material), materialOverrideParameters);
}
void GeometryGather::newMaterial(Material* material, const ParameterArray& materialOverrideParameters,
const ParameterArray& internalParams)
{
// Create new material queue entry
auto queue = queues_.create(currentPriority_, material->getEffect(), internalParams);
materialQueueInfos_.emplace(material->getName(), queue);
currentQueue_ = &materialQueueInfos_.back();
material->update();
material->setupEffectQueue(currentQueue_->queue);
// If override parameters are specified then add them as custom parameters to this queue
if (!materialOverrideParameters.empty())
{
for (auto parameter : materialOverrideParameters)
currentQueue_->queue->setCustomParameter(parameter.getLookup(), parameter.getValue());
}
}
void GeometryGather::ensureTransformIsCurrent()
{
if (!currentQueue_->isTransformCurrent)
{
currentQueue_->queue->getItems().addChangeTransformItem(transform_, scale_);
currentQueue_->isTransformCurrent = true;
}
}
void GeometryGather::addGeometryChunk(const GeometryChunk& geometryChunk, int drawItemIndex)
{
ensureTransformIsCurrent();
currentQueue_->queue->getItems().addDrawGeometryChunkItem(geometryChunk, drawItemIndex);
}
void GeometryGather::addRectangle(float width, float height)
{
ensureTransformIsCurrent();
currentQueue_->queue->getItems().addDrawRectangleItem(width, height);
}
void GeometryGather::addText(const Font* font, float fontSize, const UnicodeString& text, const Color& color)
{
if (!font->isReadyForUse() || !text.length())
return;
changeMaterial("Font");
ensureTransformIsCurrent();
currentQueue_->queue->getItems().addDrawTextItem(font, fontSize, text, color);
}
struct ImmediateVertex
{
Vec3 p;
Vec2 st;
unsigned int color = 0;
};
void GeometryGather::addImmediateTriangles(unsigned int triangleCount)
{
changeMaterial("ImmediateGeometry");
// Expand the immediate triangles chunk if needed
if ((immediateTriangleCount + triangleCount) * 3 > immediateTriangleGeometry.getVertexCount())
{
immediateTriangleGeometry.unlockVertexData();
immediateTriangleGeometry.unregisterWithRenderer();
if (immediateTriangleGeometry.getVertexCount() == 0)
{
immediateTriangleGeometry.setDynamic(true);
immediateTriangleGeometry.addVertexStream({VertexStream::Position, 3});
immediateTriangleGeometry.addVertexStream({VertexStream::DiffuseTextureCoordinate, 2});
immediateTriangleGeometry.addVertexStream({VertexStream::Color, 4, TypeUInt8});
}
auto initialVertexCount = immediateTriangleGeometry.getVertexCount();
immediateTriangleGeometry.setVertexCount(
std::max(immediateTriangleGeometry.getVertexCount() * 2,
immediateTriangleGeometry.getVertexCount() + triangleCount * 3));
// Reset the new vertex data
auto vertices = immediateTriangleGeometry.lockVertexData<ImmediateVertex>();
for (auto i = initialVertexCount; i < immediateTriangleGeometry.getVertexCount(); i++)
vertices[i] = ImmediateVertex();
immediateTriangleGeometry.unlockVertexData();
// Setup indices while preserving draw items
auto indices = Vector<unsigned int>(immediateTriangleGeometry.getVertexCount());
for (auto i = 0U; i < indices.size(); i++)
indices[i] = i;
immediateTriangleGeometry.setupIndexData(immediateTriangleGeometry.getDrawItems(), indices);
immediateTriangleGeometry.registerWithRenderer();
}
// Add a drawitem for these immediate triangles and queue it for rendering
immediateTriangleGeometry.appendDrawItem(
{GraphicsInterface::TriangleList, triangleCount * 3, immediateTriangleCount * 3});
addGeometryChunk(immediateTriangleGeometry, immediateTriangleGeometry.getDrawItems().size() - 1);
}
void GeometryGather::addImmediateTriangle(const Vec3& v0, const Vec3& v1, const Vec3& v2, const Color& color)
{
if (!immediateTriangleGeometry.isVertexDataLocked())
immediateTriangleGeometry.lockVertexData();
assert(immediateTriangleGeometry.isVertexDataLocked());
auto vertices =
immediateTriangleGeometry.getLockedVertexDataPointer<ImmediateVertex>() + immediateTriangleCount * 3;
vertices[0].p = v0;
vertices[0].color = color.toRGBA8();
vertices[1].p = v1;
vertices[1].color = vertices[0].color;
vertices[2].p = v2;
vertices[2].color = vertices[0].color;
immediateTriangleCount++;
}
}<|fim▁end|> | {
// Try and find an existing queue that uses the specified material, the current priority, and has no custom
// parameters |
<|file_name|>server_test.go<|end_file_name|><|fim▁begin|>package udf_test
import (
"errors"
"log"
"os"
"reflect"
"testing"
"time"
"github.com/influxdata/kapacitor/models"
"github.com/influxdata/kapacitor/udf"
udf_test "github.com/influxdata/kapacitor/udf/test"
)
func TestUDF_StartStop(t *testing.T) {
u := udf_test.NewIO()
l := log.New(os.Stderr, "[TestUDF_StartStop] ", log.LstdFlags)
s := udf.NewServer(u.Out(), u.In(), l, 0, nil, nil)
s.Start()
close(u.Responses)
s.Stop()
// read all requests and wait till the chan is closed
for range u.Requests {
}
if err := <-u.ErrC; err != nil {
t.Error(err)
}
}
func TestUDF_StartInitStop(t *testing.T) {
u := udf_test.NewIO()
l := log.New(os.Stderr, "[TestUDF_StartStop] ", log.LstdFlags)
s := udf.NewServer(u.Out(), u.In(), l, 0, nil, nil)
go func() {
req := <-u.Requests
_, ok := req.Message.(*udf.Request_Init)
if !ok {
t.Errorf("expected init message got %T", req.Message)
}
res := &udf.Response{
Message: &udf.Response_Init{
Init: &udf.InitResponse{
Success: true,
},
},
}
u.Responses <- res
close(u.Responses)
}()
s.Start()
err := s.Init(nil)
if err != nil {
t.Fatal(err)
}
s.Stop()
// read all requests and wait till the chan is closed
for range u.Requests {
}
if err := <-u.ErrC; err != nil {
t.Error(err)
}
}
func TestUDF_StartInitAbort(t *testing.T) {
u := udf_test.NewIO()
l := log.New(os.Stderr, "[TestUDF_StartInfoAbort] ", log.LstdFlags)
s := udf.NewServer(u.Out(), u.In(), l, 0, nil, nil)
s.Start()
expErr := errors.New("explicit abort")
go func() {
req := <-u.Requests
_, ok := req.Message.(*udf.Request_Init)
if !ok {
t.Error("expected init message")
}
s.Abort(expErr)
close(u.Responses)
}()
err := s.Init(nil)
if err != expErr {
t.Fatal("expected explicit abort error")
}
}
func TestUDF_StartInfoStop(t *testing.T) {
u := udf_test.NewIO()
l := log.New(os.Stderr, "[TestUDF_StartInfoStop] ", log.LstdFlags)
s := udf.NewServer(u.Out(), u.In(), l, 0, nil, nil)
go func() {
req := <-u.Requests
_, ok := req.Message.(*udf.Request_Info)
if !ok {
t.Errorf("expected info message got %T", req.Message)
}
res := &udf.Response{
Message: &udf.Response_Info{
Info: &udf.InfoResponse{
Wants: udf.EdgeType_STREAM,
Provides: udf.EdgeType_BATCH,
},
},
}
u.Responses <- res
close(u.Responses)
}()
s.Start()
info, err := s.Info()
if err != nil {
t.Fatal(err)
}
if exp, got := udf.EdgeType_STREAM, info.Wants; got != exp {
t.Errorf("unexpected info.Wants got %v exp %v", got, exp)
}
if exp, got := udf.EdgeType_BATCH, info.Provides; got != exp {
t.Errorf("unexpected info.Provides got %v exp %v", got, exp)
}
s.Stop()
// read all requests and wait till the chan is closed
for range u.Requests {
}
if err := <-u.ErrC; err != nil {
t.Error(err)
}
}
func TestUDF_StartInfoAbort(t *testing.T) {
u := udf_test.NewIO()
l := log.New(os.Stderr, "[TestUDF_StartInfoAbort] ", log.LstdFlags)
s := udf.NewServer(u.Out(), u.In(), l, 0, nil, nil)
s.Start()
expErr := errors.New("explicit abort")
go func() {
req := <-u.Requests
_, ok := req.Message.(*udf.Request_Info)
if !ok {
t.Error("expected info message")
}
s.Abort(expErr)
close(u.Responses)
}()
_, err := s.Info()
if err != expErr {
t.Fatal("expected ErrUDFProcessAborted")
}
}
func TestUDF_Keepalive(t *testing.T) {
t.Parallel()
u := udf_test.NewIO()
l := log.New(os.Stderr, "[TestUDF_Keepalive] ", log.LstdFlags)
s := udf.NewServer(u.Out(), u.In(), l, time.Millisecond*100, nil, nil)
s.Start()
s.Init(nil)
req := <-u.Requests
_, ok := req.Message.(*udf.Request_Init)
if !ok {
t.Error("expected init message")
}
select {
case req = <-u.Requests:
case <-time.After(time.Second):
t.Fatal("expected keepalive message")
}
if req == nil {
t.Fatal("expected keepalive message got nil, u was killed.")
}
_, ok = req.Message.(*udf.Request_Keepalive)
if !ok {
t.Errorf("expected keepalive message got %T", req.Message)
}
close(u.Responses)
s.Stop()
// read all requests and wait till the chan is closed
for range u.Requests {
}
if err := <-u.ErrC; err != nil {
t.Error(err)
}
}
func TestUDF_MissedKeepalive(t *testing.T) {
t.Parallel()
abortCalled := make(chan struct{})
aborted := func() {
close(abortCalled)
}
u := udf_test.NewIO()
l := log.New(os.Stderr, "[TestUDF_MissedKeepalive] ", log.LstdFlags)
s := udf.NewServer(u.Out(), u.In(), l, time.Millisecond*100, aborted, nil)
s.Start()
// Since the keepalive is missed, the process should abort on its own.
for range u.Requests {
}
select {
case <-abortCalled:
case <-time.After(time.Second):
t.Error("expected abort callback to be called")
}
close(u.Responses)
if err := <-u.ErrC; err != nil {
t.Error(err)
}
}
func TestUDF_KillCallBack(t *testing.T) {
t.Parallel()
timeout := time.Millisecond * 100
abortCalled := make(chan struct{})
killCalled := make(chan struct{})
aborted := func() {
time.Sleep(timeout * 3)
close(abortCalled)
}
kill := func() {
close(killCalled)
}
u := udf_test.NewIO()
l := log.New(os.Stderr, "[TestUDF_MissedKeepalive] ", log.LstdFlags)
s := udf.NewServer(u.Out(), u.In(), l, timeout, aborted, kill)
s.Start()
// Since the keepalive is missed, the process should abort on its own.
for range u.Requests {
}
// Since abort takes a long time killCallback should be called
select {
case <-killCalled:
case <-time.After(time.Second):
t.Error("expected kill callback to be called")
}
close(u.Responses)
if err := <-u.ErrC; err != nil {
t.Error(err)
}
}
func TestUDF_MissedKeepaliveInit(t *testing.T) {
t.Parallel()
abortCalled := make(chan struct{})
aborted := func() {
close(abortCalled)
}
u := udf_test.NewIO()
l := log.New(os.Stderr, "[TestUDF_MissedKeepaliveInit] ", log.LstdFlags)
s := udf.NewServer(u.Out(), u.In(), l, time.Millisecond*100, aborted, nil)
s.Start()
s.Init(nil)
// Since the keepalive is missed, the process should abort on its own.
for range u.Requests {
}
select {
case <-abortCalled:
case <-time.After(time.Second):
t.Error("expected abort callback to be called")
}
close(u.Responses)
if err := <-u.ErrC; err != nil {
t.Error(err)
}
}
func TestUDF_MissedKeepaliveInfo(t *testing.T) {
t.Parallel()
abortCalled := make(chan struct{})
aborted := func() {
close(abortCalled)
}
u := udf_test.NewIO()
l := log.New(os.Stderr, "[TestUDF_MissedKeepaliveInfo] ", log.LstdFlags)
s := udf.NewServer(u.Out(), u.In(), l, time.Millisecond*100, aborted, nil)
s.Start()
s.Info()
// Since the keepalive is missed, the process should abort on its own.
for range u.Requests {
}
select {
case <-abortCalled:
case <-time.After(time.Second):
t.Error("expected abort callback to be called")
}
close(u.Responses)
if err := <-u.ErrC; err != nil {
t.Error(err)
}
}
func TestUDF_SnapshotRestore(t *testing.T) {
u := udf_test.NewIO()
l := log.New(os.Stderr, "[TestUDF_SnapshotRestore] ", log.LstdFlags)
s := udf.NewServer(u.Out(), u.In(), l, 0, nil, nil)
go func() {
// Init
req := <-u.Requests
_, ok := req.Message.(*udf.Request_Init)
if !ok {
t.Error("expected init message")
}
u.Responses <- &udf.Response{
Message: &udf.Response_Init{
Init: &udf.InitResponse{Success: true},
},
}
// Snapshot
req = <-u.Requests
if req == nil {
t.Fatal("expected snapshot message got nil")
}
_, ok = req.Message.(*udf.Request_Snapshot)
if !ok {
t.Errorf("expected snapshot message got %T", req.Message)
}
data := []byte{42}
u.Responses <- &udf.Response{
Message: &udf.Response_Snapshot{
Snapshot: &udf.SnapshotResponse{Snapshot: data},
},
}
// Restore
req = <-u.Requests
if req == nil {
t.Fatal("expected restore message got nil")
}
restore, ok := req.Message.(*udf.Request_Restore)
if !ok {
t.Errorf("expected restore message got %T", req.Message)
}
if !reflect.DeepEqual(data, restore.Restore.Snapshot) {
t.Errorf("unexpected restore snapshot got %v exp %v", restore.Restore.Snapshot, data)
}
u.Responses <- &udf.Response{
Message: &udf.Response_Restore{
Restore: &udf.RestoreResponse{Success: true},
},
}
close(u.Responses)
}()
s.Start()
s.Init(nil)
snapshot, err := s.Snapshot()
if err != nil {
t.Fatal(err)
}
err = s.Restore(snapshot)
if err != nil {
t.Fatal(err)
}
s.Stop()
// read all requests and wait till the chan is closed
for range u.Requests {
}
if err := <-u.ErrC; err != nil {
t.Error(err)
}
}
func TestUDF_StartInitPointStop(t *testing.T) {
u := udf_test.NewIO()
l := log.New(os.Stderr, "[TestUDF_StartPointStop] ", log.LstdFlags)
s := udf.NewServer(u.Out(), u.In(), l, 0, nil, nil)
go func() {
req := <-u.Requests
_, ok := req.Message.(*udf.Request_Init)
if !ok {
t.Errorf("expected init message got %T", req.Message)
}
res := &udf.Response{
Message: &udf.Response_Init{
Init: &udf.InitResponse{
Success: true,
},
},
}
u.Responses <- res
req = <-u.Requests
pt, ok := req.Message.(*udf.Request_Point)
if !ok {
t.Errorf("expected point message got %T", req.Message)
}
res = &udf.Response{
Message: &udf.Response_Point{
Point: pt.Point,
},
}
u.Responses <- res
close(u.Responses)
}()
s.Start()
err := s.Init(nil)
if err != nil {
t.Fatal(err)
}
// Write point to server
pt := models.Point{
Name: "test",
Database: "db",
RetentionPolicy: "rp",
Tags: models.Tags{"t1": "v1", "t2": "v2"},
Fields: models.Fields{"f1": 1.0, "f2": 2.0},
Time: time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC),
}
s.PointIn() <- pt
rpt := <-s.PointOut()
if !reflect.DeepEqual(rpt, pt) {
t.Errorf("unexpected returned point got: %v exp %v", rpt, pt)
}
s.Stop()
// read all requests and wait till the chan is closed
for range u.Requests {
}
if err := <-u.ErrC; err != nil {
t.Error(err)
}
}
func TestUDF_StartInitBatchStop(t *testing.T) {
u := udf_test.NewIO()
l := log.New(os.Stderr, "[TestUDF_StartPointStop] ", log.LstdFlags)
s := udf.NewServer(u.Out(), u.In(), l, 0, nil, nil)
go func() {
req := <-u.Requests
_, ok := req.Message.(*udf.Request_Init)
if !ok {
t.Errorf("expected init message got %T", req.Message)
}
res := &udf.Response{
Message: &udf.Response_Init{
Init: &udf.InitResponse{
Success: true,
},
},
}
u.Responses <- res
// Begin batch
req = <-u.Requests
bb, ok := req.Message.(*udf.Request_Begin)
if !ok {
t.Errorf("expected begin message got %T", req.Message)
}
res = &udf.Response{
Message: &udf.Response_Begin{
Begin: bb.Begin,
},
}
u.Responses <- res
// Point
req = <-u.Requests
pt, ok := req.Message.(*udf.Request_Point)
if !ok {
t.Errorf("expected point message got %T", req.Message)
}
res = &udf.Response{
Message: &udf.Response_Point{
Point: pt.Point,
},
}
u.Responses <- res
// End batch
req = <-u.Requests
eb, ok := req.Message.(*udf.Request_End)
if !ok {
t.Errorf("expected end message got %T", req.Message)
}
res = &udf.Response{
Message: &udf.Response_End{
End: eb.End,
},
}
u.Responses <- res
close(u.Responses)
}()
s.Start()
err := s.Init(nil)<|fim▁hole|>
// Write point to server
b := models.Batch{
Name: "test",
Tags: models.Tags{"t1": "v1"},
TMax: time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC),
Points: []models.BatchPoint{{
Fields: models.Fields{"f1": 1.0, "f2": 2.0, "f3": int64(1), "f4": "str"},
Time: time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC),
Tags: models.Tags{"t1": "v1", "t2": "v2"},
}},
}
s.BatchIn() <- b
rb := <-s.BatchOut()
if !reflect.DeepEqual(b, rb) {
t.Errorf("unexpected returned batch got: %v exp %v", rb, b)
}
s.Stop()
// read all requests and wait till the chan is closed
for range u.Requests {
}
if err := <-u.ErrC; err != nil {
t.Error(err)
}
}<|fim▁end|> | if err != nil {
t.Fatal(err)
} |
<|file_name|>test_engines.py<|end_file_name|><|fim▁begin|>import pytest
import salt.engines
from tests.support.mock import MagicMock, patch<|fim▁hole|>
def test_engine_module_name():
engine = salt.engines.Engine({}, "foobar.start", {}, {}, {}, {}, name="foobar")
assert engine.name == "foobar"
def test_engine_title_set():
engine = salt.engines.Engine({}, "foobar.start", {}, {}, {}, {}, name="foobar")
with patch("salt.utils.process.appendproctitle", MagicMock()) as mm:
with pytest.raises(KeyError):
# The method does not exist so a KeyError will be raised.
engine.run()
mm.assert_called_with("foobar")<|fim▁end|> | |
<|file_name|>baSlimScroll.directive.ts<|end_file_name|><|fim▁begin|>import { Directive, Input, Output, ElementRef, EventEmitter, OnChanges } from '@angular/core';
import 'jquery-slimscroll';
@Directive({
selector: '[baSlimScroll]'
})
export class BaSlimScroll implements OnChanges {
@Input() public baSlimScrollOptions: Object;
constructor(private _elementRef: ElementRef) {
}
ngOnChanges(changes) {
this._scroll();
}
private _scroll() {<|fim▁hole|> this._init();
}
private _init() {
jQuery(this._elementRef.nativeElement).slimScroll(this.baSlimScrollOptions);
}
private _destroy() {
jQuery(this._elementRef.nativeElement).slimScroll({ destroy: true });
}
}<|fim▁end|> | this._destroy(); |
<|file_name|>permissions.py<|end_file_name|><|fim▁begin|>import functools
from common.tornado_cookies import get_secure_cookie, generate_secure_cookie
from core import cookies
class Perms(object):
NONE = None
READ = 'r'
WRITE = 'w'
<|fim▁hole|> """
if not user.is_authenticated():
return Perms.READ
else:
return Perms.WRITE
def _get_cached_perm_level(request, cookie_name):
perm = get_secure_cookie(request, cookie_name)
if not perm:
return
assert perm in ('r', 'w')
return perm
def _set_cached_perm_level(response, cookie_name, perm_level):
assert perm_level in ('r', 'w')
cookie_val = generate_secure_cookie(cookie_name, perm_level)
response.set_cookie(cookie_name, cookie_val)
def _perm_level_satisfies(perm_val, perm_req):
"""
If a user has permission level `perm_val`,
and is requesting access level `perm_req`.
"""
if perm_req == perm_val:
return True
if (perm_val == Perms.WRITE) and (perm_req == Perms.READ):
return True
return False
def get_permission(request, response, room, perm_req):
"""
Returns True or False.
Sets a cookie on the response object to cache
the result, if necessary.
"""
assert perm_req in (Perms.READ, Perms.WRITE)
if cookies.has_cached_room_permission(
room.shortname,
perm_req,
functools.partial(get_secure_cookie, request),
session_key=request.session.session_key,
uid=getattr(request.user, 'id', None)):
return True
# Cached permission does not satisfy requirement.
perm_actual = _permission_level(request.user, room)
if perm_actual == Perms.NONE:
return False
assert perm_actual in (Perms.READ, Perms.WRITE)
result = _perm_level_satisfies(perm_actual, perm_req)
cookie_name = cookies.room_cookie_name(room.shortname, session_key=request.session.session_key, uid=getattr(request.user, 'id', None))
if result:
_set_cached_perm_level(response, cookie_name, perm_actual)
return result<|fim▁end|> | def _permission_level(user, room):
"""
`user`'s permission level on `room`, ignoring cookies |
<|file_name|>Mood.java<|end_file_name|><|fim▁begin|>package me.moodcat.api;
import java.util.Arrays;
import java.util.List;
import java.util.Locale;
import java.util.stream.Collectors;
import lombok.Getter;
import me.moodcat.database.embeddables.VAVector;
import com.fasterxml.jackson.annotation.JsonFormat;
import com.fasterxml.jackson.annotation.JsonIgnore;
/**
* A mood represents a vector in the valence-arousal plane which will be attached to song.
*/
@JsonFormat(shape = JsonFormat.Shape.OBJECT)
public enum Mood {
// CHECKSTYLE:OFF
ANGRY(new VAVector(-0.6, 0.6), "Angry"),
CALM(new VAVector(0.3, -0.9), "Calm"),
EXCITING(new VAVector(0.4, 0.8), "Exciting"),
HAPPY(new VAVector(0.7, 0.6), "Happy"),
NERVOUS(new VAVector(-0.7, 0.4), "Nervous"),
PLEASING(new VAVector(0.6, 0.3), "Pleasing"),
PEACEFUL(new VAVector(0.5, -0.7), "Peaceful"),
RELAXED(new VAVector(0.6, -0.3), "Relaxed"),
SAD(new VAVector(-0.7, -0.2), "Sad"),
SLEEPY(new VAVector(-0.2, -0.9), "Sleepy");
// CHECKSTYLE:ON
/**
* List of all names that represent moods. Used in {@link #nameRepresentsMood(String)}.
* By storing this once, we save a lot of unnecessary list creations.
*/
private static final List<String> MOOD_NAMES = Arrays.asList(Mood.values()).stream()<|fim▁hole|> /**
* The vector that represents this mood.
*
* @return The vector of this mood.
*/
@Getter
@JsonIgnore
private final VAVector vector;
/**
* Readable name for the frontend.
*
* @return The readable name of this mood.
*/
@Getter
private final String name;
private Mood(final VAVector vector, final String name) {
this.vector = vector;
this.name = name;
}
/**
* Get the mood that is closest to the given vector.
*
* @param vector
* The vector to determine the mood for.
* @return The Mood that is closest to the vector.
*/
public static Mood closestTo(final VAVector vector) {
double distance = Double.MAX_VALUE;
Mood mood = null;
for (final Mood m : Mood.values()) {
final double moodDistance = m.vector.distance(vector);
if (moodDistance < distance) {
distance = moodDistance;
mood = m;
}
}
return mood;
}
/**
* Get the vector that represents the average of the provided list of moods.
*
* @param moods
* The textual list of moods.
* @return The average vector, or the zero-vector if no moods were found.
*/
public static VAVector createTargetVector(final List<String> moods) {
final List<VAVector> actualMoods = moods.stream()
.filter(Mood::nameRepresentsMood)
.map(mood -> Mood.valueOf(mood.toUpperCase(Locale.ROOT)))
.map(mood -> mood.getVector())
.collect(Collectors.toList());
return VAVector.average(actualMoods);
}
private static boolean nameRepresentsMood(final String mood) {
return MOOD_NAMES.contains(mood);
}
}<|fim▁end|> | .map(moodValue -> moodValue.getName())
.collect(Collectors.toList());
|
<|file_name|>integration_test.go<|end_file_name|><|fim▁begin|>// +build integration
package docker
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"runtime"
"testing"
"time"
"k8s.io/klog"
dockertypes "github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
dockerapi "github.com/docker/docker/client"
"github.com/openshift/source-to-image/pkg/api"
"github.com/openshift/source-to-image/pkg/build"
"github.com/openshift/source-to-image/pkg/build/strategies"
"github.com/openshift/source-to-image/pkg/docker"
dockerpkg "github.com/openshift/source-to-image/pkg/docker"
"github.com/openshift/source-to-image/pkg/scm/git"
"github.com/openshift/source-to-image/pkg/tar"
"github.com/openshift/source-to-image/pkg/util"
"github.com/openshift/source-to-image/pkg/util/fs"
"golang.org/x/net/context"
)
const (
DefaultDockerSocket = "unix:///var/run/docker.sock"
TestSource = "https://github.com/openshift/ruby-hello-world"
FakeBuilderImage = "sti_test/sti-fake"
FakeUserImage = "sti_test/sti-fake-user"
FakeImageScripts = "sti_test/sti-fake-scripts"
FakeImageScriptsNoSaveArtifacts = "sti_test/sti-fake-scripts-no-save-artifacts"
FakeImageNoTar = "sti_test/sti-fake-no-tar"
FakeImageOnBuild = "sti_test/sti-fake-onbuild"
FakeNumericUserImage = "sti_test/sti-fake-numericuser"
FakeImageOnBuildRootUser = "sti_test/sti-fake-onbuild-rootuser"
FakeImageOnBuildNumericUser = "sti_test/sti-fake-onbuild-numericuser"
FakeImageAssembleRoot = "sti_test/sti-fake-assemble-root"
FakeImageAssembleUser = "sti_test/sti-fake-assemble-user"
TagCleanBuild = "test/sti-fake-app"
TagCleanBuildUser = "test/sti-fake-app-user"
TagIncrementalBuild = "test/sti-incremental-app"
TagIncrementalBuildUser = "test/sti-incremental-app-user"
TagCleanBuildScripts = "test/sti-fake-app-scripts"
TagIncrementalBuildScripts = "test/sti-incremental-app-scripts"
TagIncrementalBuildScriptsNoSaveArtifacts = "test/sti-incremental-app-scripts-no-save-artifacts"
TagCleanLayeredBuildNoTar = "test/sti-fake-no-tar"
TagCleanBuildOnBuild = "test/sti-fake-app-onbuild"
TagIncrementalBuildOnBuild = "test/sti-incremental-app-onbuild"
TagCleanBuildOnBuildNoName = "test/sti-fake-app-onbuild-noname"
TagCleanBuildNoName = "test/sti-fake-app-noname"
TagCleanLayeredBuildNoTarNoName = "test/sti-fake-no-tar-noname"
TagCleanBuildAllowedUIDsNamedUser = "test/sti-fake-alloweduids-nameduser"
TagCleanBuildAllowedUIDsNumericUser = "test/sti-fake-alloweduids-numericuser"
TagCleanBuildAllowedUIDsOnBuildRoot = "test/sti-fake-alloweduids-onbuildroot"
TagCleanBuildAllowedUIDsOnBuildNumericUser = "test/sti-fake-alloweduids-onbuildnumeric"
TagCleanBuildAllowedUIDsAssembleRoot = "test/sti-fake-alloweduids-assembleroot"
TagCleanBuildAllowedUIDsAssembleUser = "test/sti-fake-alloweduids-assembleuser"
// Need to serve the scripts from local host so any potential changes to the
// scripts are made available for integration testing.
//
// Port 23456 must match the port used in the fake image Dockerfiles
FakeScriptsHTTPURL = "http://127.0.0.1:23456/.s2i/bin"
)
var engineClient docker.Client
func init() {
klog.InitFlags(nil)
var err error
engineClient, err = docker.NewEngineAPIClient(docker.GetDefaultDockerConfig())
if err != nil {
panic(err)
}
// get the full path to this .go file so we can construct the file url
// using this file's dirname
_, filename, _, _ := runtime.Caller(0)
testImagesDir := filepath.Join(filepath.Dir(filepath.Dir(filename)), "scripts")
<|fim▁hole|> }
hs := http.Server{Handler: http.FileServer(http.Dir(testImagesDir))}
hs.SetKeepAlivesEnabled(false)
go hs.Serve(l)
}
func getDefaultContext() (context.Context, context.CancelFunc) {
return context.WithTimeout(context.Background(), 20*time.Second)
}
// TestInjectionBuild tests the build where we inject files to assemble script.
func TestInjectionBuild(t *testing.T) {
tempdir, err := ioutil.TempDir("", "s2i-test-dir")
if err != nil {
t.Errorf("Unable to create temporary directory: %v", err)
}
defer os.RemoveAll(tempdir)
err = ioutil.WriteFile(filepath.Join(tempdir, "secret"), []byte("secret"), 0666)
if err != nil {
t.Errorf("Unable to write content to temporary injection file: %v", err)
}
integration(t).exerciseInjectionBuild(TagCleanBuild, FakeBuilderImage, []string{
tempdir + ":/tmp",
tempdir + ":",
tempdir + ":test;" + tempdir + ":test2",
}, true)
}
func TestInjectionBuildBadDestination(t *testing.T) {
tempdir, err := ioutil.TempDir("", "s2i-test-dir")
if err != nil {
t.Errorf("Unable to create temporary directory: %v", err)
}
defer os.RemoveAll(tempdir)
err = ioutil.WriteFile(filepath.Join(tempdir, "secret"), []byte("secret"), 0666)
if err != nil {
t.Errorf("Unable to write content to temporary injection file: %v", err)
}
integration(t).exerciseInjectionBuild(TagCleanBuild, FakeBuilderImage, []string{tempdir + ":/bad/dir"}, false)
}
type integrationTest struct {
t *testing.T
setupComplete bool
}
func (i integrationTest) InspectImage(name string) (*dockertypes.ImageInspect, error) {
ctx, cancel := getDefaultContext()
defer cancel()
resp, _, err := engineClient.ImageInspectWithRaw(ctx, name)
if err != nil {
if dockerapi.IsErrImageNotFound(err) {
return nil, fmt.Errorf("no such image :%q", name)
}
return nil, err
}
return &resp, nil
}
var (
FakeScriptsFileURL string
)
func getLogLevel() (level int) {
for level = 5; level >= 0; level-- {
if klog.V(klog.Level(level)) == true {
break
}
}
return
}
// setup sets up integration tests
func (i *integrationTest) setup() {
if !i.setupComplete {
// get the full path to this .go file so we can construct the file url
// using this file's dirname
_, filename, _, _ := runtime.Caller(0)
testImagesDir := filepath.Join(filepath.Dir(filepath.Dir(filename)), "scripts")
FakeScriptsFileURL = "file://" + filepath.ToSlash(filepath.Join(testImagesDir, ".s2i", "bin"))
for _, image := range []string{TagCleanBuild, TagCleanBuildUser, TagIncrementalBuild, TagIncrementalBuildUser} {
ctx, cancel := getDefaultContext()
engineClient.ImageRemove(ctx, image, dockertypes.ImageRemoveOptions{})
cancel()
}
i.setupComplete = true
}
from := flag.CommandLine
if vflag := from.Lookup("v"); vflag != nil {
// the thing here is that we are looking for the bash -v passed into test-integration.sh (with no value),
// but for klog (https://k8s.io/klog/blob/master/klog.go), one specifies
// the logging level with -v=# (i.e. -v=0 or -v=3 or -v=5).
// so, for the changes stemming from issue 133, we 'reuse' the bash -v, and set the highest klog level.
// (if you look at STI's main.go, and setupGlog, it essentially maps klog's -v to --loglevel for use by the sti command)
//NOTE - passing --loglevel or -v=5 into test-integration.sh does not work
if getLogLevel() != 5 {
vflag.Value.Set("5")
// FIXME currently klog has only option to redirect output to stderr
// the preferred for STI would be to redirect to stdout
flag.CommandLine.Set("logtostderr", "true")
}
}
}
func integration(t *testing.T) *integrationTest {
i := &integrationTest{t: t}
i.setup()
return i
}
// Test a clean build. The simplest case.
func TestCleanBuild(t *testing.T) {
integration(t).exerciseCleanBuild(TagCleanBuild, false, FakeBuilderImage, "", true, true, false)
}
// Test Labels
func TestCleanBuildLabel(t *testing.T) {
integration(t).exerciseCleanBuild(TagCleanBuild, false, FakeBuilderImage, "", true, true, true)
}
func TestCleanBuildUser(t *testing.T) {
integration(t).exerciseCleanBuild(TagCleanBuildUser, false, FakeUserImage, "", true, true, false)
}
func TestCleanBuildFileScriptsURL(t *testing.T) {
integration(t).exerciseCleanBuild(TagCleanBuild, false, FakeBuilderImage, FakeScriptsFileURL, true, true, false)
}
func TestCleanBuildHttpScriptsURL(t *testing.T) {
integration(t).exerciseCleanBuild(TagCleanBuild, false, FakeBuilderImage, FakeScriptsHTTPURL, true, true, false)
}
func TestCleanBuildScripts(t *testing.T) {
integration(t).exerciseCleanBuild(TagCleanBuildScripts, false, FakeImageScripts, "", true, true, false)
}
func TestLayeredBuildNoTar(t *testing.T) {
integration(t).exerciseCleanBuild(TagCleanLayeredBuildNoTar, false, FakeImageNoTar, FakeScriptsFileURL, false, true, false)
}
// Test that a build config with a callbackURL will invoke HTTP endpoint
func TestCleanBuildCallbackInvoked(t *testing.T) {
integration(t).exerciseCleanBuild(TagCleanBuild, true, FakeBuilderImage, "", true, true, false)
}
func TestCleanBuildOnBuild(t *testing.T) {
integration(t).exerciseCleanBuild(TagCleanBuildOnBuild, false, FakeImageOnBuild, "", true, true, false)
}
func TestCleanBuildOnBuildNoName(t *testing.T) {
integration(t).exerciseCleanBuild(TagCleanBuildOnBuildNoName, false, FakeImageOnBuild, "", false, false, false)
}
func TestCleanBuildNoName(t *testing.T) {
integration(t).exerciseCleanBuild(TagCleanBuildNoName, false, FakeBuilderImage, "", true, false, false)
}
func TestLayeredBuildNoTarNoName(t *testing.T) {
integration(t).exerciseCleanBuild(TagCleanLayeredBuildNoTarNoName, false, FakeImageNoTar, FakeScriptsFileURL, false, false, false)
}
func TestAllowedUIDsNamedUser(t *testing.T) {
integration(t).exerciseCleanAllowedUIDsBuild(TagCleanBuildAllowedUIDsNamedUser, FakeUserImage, true)
}
func TestAllowedUIDsNumericUser(t *testing.T) {
integration(t).exerciseCleanAllowedUIDsBuild(TagCleanBuildAllowedUIDsNumericUser, FakeNumericUserImage, false)
}
func TestAllowedUIDsOnBuildRootUser(t *testing.T) {
integration(t).exerciseCleanAllowedUIDsBuild(TagCleanBuildAllowedUIDsNamedUser, FakeImageOnBuildRootUser, true)
}
func TestAllowedUIDsOnBuildNumericUser(t *testing.T) {
integration(t).exerciseCleanAllowedUIDsBuild(TagCleanBuildAllowedUIDsNumericUser, FakeImageOnBuildNumericUser, false)
}
func TestAllowedUIDsAssembleRoot(t *testing.T) {
integration(t).exerciseCleanAllowedUIDsBuild(TagCleanBuildAllowedUIDsAssembleRoot, FakeImageAssembleRoot, true)
}
func TestAllowedUIDsAssembleUser(t *testing.T) {
integration(t).exerciseCleanAllowedUIDsBuild(TagCleanBuildAllowedUIDsAssembleUser, FakeImageAssembleUser, false)
}
func (i *integrationTest) exerciseCleanAllowedUIDsBuild(tag, imageName string, expectError bool) {
t := i.t
config := &api.Config{
DockerConfig: docker.GetDefaultDockerConfig(),
BuilderImage: imageName,
BuilderPullPolicy: api.DefaultBuilderPullPolicy,
Source: git.MustParse(TestSource),
Tag: tag,
Incremental: false,
ScriptsURL: "",
ExcludeRegExp: tar.DefaultExclusionPattern.String(),
}
config.AllowedUIDs.Set("1-")
_, _, err := strategies.Strategy(engineClient, config, build.Overrides{})
if err != nil && !expectError {
t.Fatalf("Cannot create a new builder: %v", err)
}
if err == nil && expectError {
t.Fatalf("Did not get an error and was expecting one.")
}
}
func (i *integrationTest) exerciseCleanBuild(tag string, verifyCallback bool, imageName string, scriptsURL string, expectImageName bool, setTag bool, checkLabel bool) {
t := i.t
callbackURL := ""
callbackInvoked := false
callbackHasValidJSON := false
if verifyCallback {
handler := func(w http.ResponseWriter, r *http.Request) {
// we got called
callbackInvoked = true
// the header is as expected
contentType := r.Header["Content-Type"][0]
callbackHasValidJSON = contentType == "application/json"
// the request body is as expected
if callbackHasValidJSON {
defer r.Body.Close()
body, _ := ioutil.ReadAll(r.Body)
type CallbackMessage struct {
Success bool
Labels map[string]string
}
var callbackMessage CallbackMessage
err := json.Unmarshal(body, &callbackMessage)
callbackHasValidJSON = (err == nil) && callbackMessage.Success && len(callbackMessage.Labels) > 0
}
}
ts := httptest.NewServer(http.HandlerFunc(handler))
defer ts.Close()
callbackURL = ts.URL
}
var buildTag string
if setTag {
buildTag = tag
} else {
buildTag = ""
}
config := &api.Config{
DockerConfig: docker.GetDefaultDockerConfig(),
BuilderImage: imageName,
BuilderPullPolicy: api.DefaultBuilderPullPolicy,
Source: git.MustParse(TestSource),
Tag: buildTag,
Incremental: false,
CallbackURL: callbackURL,
ScriptsURL: scriptsURL,
ExcludeRegExp: tar.DefaultExclusionPattern.String(),
}
b, _, err := strategies.Strategy(engineClient, config, build.Overrides{})
if err != nil {
t.Fatalf("Cannot create a new builder.")
}
resp, err := b.Build(config)
if err != nil {
t.Fatalf("An error occurred during the build: %v", err)
} else if !resp.Success {
t.Fatalf("The build failed.")
}
if callbackInvoked != verifyCallback {
t.Fatalf("S2I build did not invoke callback")
}
if callbackHasValidJSON != verifyCallback {
t.Fatalf("S2I build did not invoke callback with valid json message")
}
// We restrict this check to only when we are passing tag through the build config
// since we will not end up with an available tag by that name from build
if setTag {
i.checkForImage(tag)
containerID := i.createContainer(tag)
i.checkBasicBuildState(containerID, resp.WorkingDir)
if checkLabel {
i.checkForLabel(tag)
}
i.removeContainer(containerID)
}
// Check if we receive back an ImageID when we are expecting to
if expectImageName && len(resp.ImageID) == 0 {
t.Fatalf("S2I build did not receive an ImageID in response")
}
if !expectImageName && len(resp.ImageID) > 0 {
t.Fatalf("S2I build received an ImageID in response")
}
}
// Test an incremental build.
func TestIncrementalBuildAndRemovePreviousImage(t *testing.T) {
integration(t).exerciseIncrementalBuild(TagIncrementalBuild, FakeBuilderImage, true, false, false)
}
func TestIncrementalBuildAndKeepPreviousImage(t *testing.T) {
integration(t).exerciseIncrementalBuild(TagIncrementalBuild, FakeBuilderImage, false, false, false)
}
func TestIncrementalBuildUser(t *testing.T) {
integration(t).exerciseIncrementalBuild(TagIncrementalBuildUser, FakeBuilderImage, true, false, false)
}
func TestIncrementalBuildScripts(t *testing.T) {
integration(t).exerciseIncrementalBuild(TagIncrementalBuildScripts, FakeImageScripts, true, false, false)
}
func TestIncrementalBuildScriptsNoSaveArtifacts(t *testing.T) {
integration(t).exerciseIncrementalBuild(TagIncrementalBuildScriptsNoSaveArtifacts, FakeImageScriptsNoSaveArtifacts, true, true, false)
}
func TestIncrementalBuildOnBuild(t *testing.T) {
integration(t).exerciseIncrementalBuild(TagIncrementalBuildOnBuild, FakeImageOnBuild, false, true, true)
}
func (i *integrationTest) exerciseInjectionBuild(tag, imageName string, injections []string, expectSuccess bool) {
t := i.t
injectionList := api.VolumeList{}
for _, i := range injections {
err := injectionList.Set(i)
if err != nil {
t.Errorf("injectionList.Set() failed with error %s\n", err)
}
}
// For test purposes, keep at least one injected source
var keptVolume *api.VolumeSpec
if len(injectionList) > 0 {
injectionList[0].Keep = true
keptVolume = &injectionList[0]
}
config := &api.Config{
DockerConfig: docker.GetDefaultDockerConfig(),
BuilderImage: imageName,
BuilderPullPolicy: api.DefaultBuilderPullPolicy,
Source: git.MustParse(TestSource),
Tag: tag,
Injections: injectionList,
ExcludeRegExp: tar.DefaultExclusionPattern.String(),
}
builder, _, err := strategies.Strategy(engineClient, config, build.Overrides{})
if err != nil {
t.Fatalf("Unable to create builder: %v", err)
}
resp, err := builder.Build(config)
if !expectSuccess {
if resp.Success {
t.Fatal("Success was returned, but should have failed")
}
return
}
if err != nil {
t.Fatalf("Unexpected error occurred during build: %v", err)
}
if !resp.Success {
t.Fatalf("S2I build failed.")
}
i.checkForImage(tag)
containerID := i.createContainer(tag)
defer i.removeContainer(containerID)
// Check that the injected file is delivered to assemble script
i.fileExists(containerID, "/sti-fake/secret-delivered")
i.fileExists(containerID, "/sti-fake/relative-secret-delivered")
// Make sure the injected file does not exists in resulting image
testFs := fs.NewFileSystem()
files, err := util.ListFilesToTruncate(testFs, injectionList)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
for _, f := range files {
if err = i.testFile(tag, f); err == nil {
t.Errorf("The file %q must be empty or not exist", f)
}
}
if keptVolume != nil {
keptFiles, err := util.ListFiles(testFs, *keptVolume)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
for _, f := range keptFiles {
if err = i.testFile(tag, f); err != nil {
t.Errorf("The file %q must exist and not be empty", f)
}
}
}
}
func (i *integrationTest) testFile(tag, path string) error {
exitCode := i.runInImage(tag, "test -s "+path)
if exitCode != 0 {
return fmt.Errorf("file %s does not exist or is empty in the container %s", path, tag)
}
return nil
}
func (i *integrationTest) exerciseIncrementalBuild(tag, imageName string, removePreviousImage bool, expectClean bool, checkOnBuild bool) {
t := i.t
start := time.Now()
config := &api.Config{
DockerConfig: docker.GetDefaultDockerConfig(),
BuilderImage: imageName,
BuilderPullPolicy: api.DefaultBuilderPullPolicy,
Source: git.MustParse(TestSource),
Tag: tag,
Incremental: false,
RemovePreviousImage: removePreviousImage,
ExcludeRegExp: tar.DefaultExclusionPattern.String(),
}
builder, _, err := strategies.Strategy(engineClient, config, build.Overrides{})
if err != nil {
t.Fatalf("Unable to create builder: %v", err)
}
resp, err := builder.Build(config)
if err != nil {
t.Fatalf("Unexpected error occurred during build: %v", err)
}
if !resp.Success {
t.Fatalf("S2I build failed.")
}
previousImageID := resp.ImageID
config = &api.Config{
DockerConfig: docker.GetDefaultDockerConfig(),
BuilderImage: imageName,
BuilderPullPolicy: api.DefaultBuilderPullPolicy,
Source: git.MustParse(TestSource),
Tag: tag,
Incremental: true,
RemovePreviousImage: removePreviousImage,
PreviousImagePullPolicy: api.PullIfNotPresent,
ExcludeRegExp: tar.DefaultExclusionPattern.String(),
}
builder, _, err = strategies.Strategy(engineClient, config, build.Overrides{})
if err != nil {
t.Fatalf("Unable to create incremental builder: %v", err)
}
resp, err = builder.Build(config)
if err != nil {
t.Fatalf("Unexpected error occurred during incremental build: %v", err)
}
if !resp.Success {
t.Fatalf("S2I incremental build failed.")
}
i.checkForImage(tag)
containerID := i.createContainer(tag)
defer i.removeContainer(containerID)
i.checkIncrementalBuildState(containerID, resp.WorkingDir, expectClean)
_, err = i.InspectImage(previousImageID)
if removePreviousImage {
if err == nil {
t.Errorf("Previous image %s not deleted", previousImageID)
}
} else {
if err != nil {
t.Errorf("Couldn't find previous image %s", previousImageID)
}
}
if checkOnBuild {
i.fileExists(containerID, "/sti-fake/src/onbuild")
}
if took := time.Since(start); took > docker.DefaultDockerTimeout {
// https://github.com/openshift/source-to-image/issues/301 is a
// case where incremental builds would get stuck until the
// timeout.
t.Errorf("Test took too long (%v), some operation may have gotten stuck waiting for the DefaultDockerTimeout (%v). Inspect the logs to find operations that took long.", took, docker.DefaultDockerTimeout)
}
}
// Support methods
func (i *integrationTest) checkForImage(tag string) {
_, err := i.InspectImage(tag)
if err != nil {
i.t.Errorf("Couldn't find image with tag: %s", tag)
}
}
func (i *integrationTest) createContainer(image string) string {
ctx, cancel := getDefaultContext()
defer cancel()
opts := dockertypes.ContainerCreateConfig{Name: "", Config: &dockercontainer.Config{Image: image}}
container, err := engineClient.ContainerCreate(ctx, opts.Config, opts.HostConfig, opts.NetworkingConfig, opts.Name)
if err != nil {
i.t.Errorf("Couldn't create container from image %s with error %+v", image, err)
return ""
}
ctx, cancel = getDefaultContext()
defer cancel()
err = engineClient.ContainerStart(ctx, container.ID, dockertypes.ContainerStartOptions{})
if err != nil {
i.t.Errorf("Couldn't start container: %s with error %+v", container.ID, err)
return ""
}
ctx, cancel = getDefaultContext()
defer cancel()
waitC, errC := engineClient.ContainerWait(ctx, container.ID, dockercontainer.WaitConditionNextExit)
select {
case result := <-waitC:
if result.StatusCode != 0 {
i.t.Errorf("Bad exit code from container: %d", result.StatusCode)
return ""
}
case err := <-errC:
i.t.Errorf("Error waiting for container: %v", err)
return ""
}
return container.ID
}
func (i *integrationTest) runInContainer(image string, command []string) int {
ctx, cancel := getDefaultContext()
defer cancel()
opts := dockertypes.ContainerCreateConfig{Name: "", Config: &dockercontainer.Config{Image: image, AttachStdout: false, AttachStdin: false, Cmd: command}}
container, err := engineClient.ContainerCreate(ctx, opts.Config, opts.HostConfig, opts.NetworkingConfig, opts.Name)
if err != nil {
i.t.Errorf("Couldn't create container from image %s err %+v", image, err)
return -1
}
ctx, cancel = getDefaultContext()
defer cancel()
err = engineClient.ContainerStart(ctx, container.ID, dockertypes.ContainerStartOptions{})
if err != nil {
i.t.Errorf("Couldn't start container: %s", container.ID)
}
ctx, cancel = getDefaultContext()
defer cancel()
waitC, errC := engineClient.ContainerWait(ctx, container.ID, dockercontainer.WaitConditionNextExit)
exitCode := -1
select {
case result := <-waitC:
exitCode = int(result.StatusCode)
case err := <-errC:
i.t.Errorf("Couldn't wait for container: %s: %v", container.ID, err)
}
ctx, cancel = getDefaultContext()
defer cancel()
err = engineClient.ContainerRemove(ctx, container.ID, dockertypes.ContainerRemoveOptions{})
if err != nil {
i.t.Errorf("Couldn't remove container: %s", container.ID)
}
return exitCode
}
func (i *integrationTest) removeContainer(cID string) {
ctx, cancel := getDefaultContext()
defer cancel()
engineClient.ContainerKill(ctx, cID, "SIGKILL")
removeOpts := dockertypes.ContainerRemoveOptions{
RemoveVolumes: true,
}
err := engineClient.ContainerRemove(ctx, cID, removeOpts)
if err != nil {
i.t.Errorf("Couldn't remove container %s: %s", cID, err)
}
}
func (i *integrationTest) fileExists(cID string, filePath string) {
res := i.fileExistsInContainer(cID, filePath)
if !res {
i.t.Errorf("Couldn't find file %s in container %s", filePath, cID)
}
}
func (i *integrationTest) fileNotExists(cID string, filePath string) {
res := i.fileExistsInContainer(cID, filePath)
if res {
i.t.Errorf("Unexpected file %s in container %s", filePath, cID)
}
}
func (i *integrationTest) runInImage(image string, cmd string) int {
return i.runInContainer(image, []string{"/bin/sh", "-c", cmd})
}
func (i *integrationTest) checkBasicBuildState(cID string, workingDir string) {
i.fileExists(cID, "/sti-fake/assemble-invoked")
i.fileExists(cID, "/sti-fake/run-invoked")
i.fileExists(cID, "/sti-fake/src/Gemfile")
_, err := os.Stat(workingDir)
if !os.IsNotExist(err) {
i.t.Errorf("Unexpected error from stat check on %s", workingDir)
}
}
func (i *integrationTest) checkIncrementalBuildState(cID string, workingDir string, expectClean bool) {
i.checkBasicBuildState(cID, workingDir)
if expectClean {
i.fileNotExists(cID, "/sti-fake/save-artifacts-invoked")
} else {
i.fileExists(cID, "/sti-fake/save-artifacts-invoked")
}
}
func (i *integrationTest) fileExistsInContainer(cID string, filePath string) bool {
ctx, cancel := getDefaultContext()
defer cancel()
rdr, stats, err := engineClient.CopyFromContainer(ctx, cID, filePath)
if err != nil {
return false
}
defer rdr.Close()
return "" != stats.Name
}
func (i *integrationTest) checkForLabel(image string) {
docker := dockerpkg.New(engineClient, (&api.Config{}).PullAuthentication)
labelMap, err := docker.GetLabels(image)
if err != nil {
i.t.Fatalf("Unable to get labels from image %s: %v", image, err)
}
if labelMap["testLabel"] != "testLabel_value" {
i.t.Errorf("Unable to verify 'testLabel' for image '%s'", image)
}
}<|fim▁end|> | l, err := net.Listen("tcp", ":23456")
if err != nil {
panic(err) |
<|file_name|>LayerView.cpp<|end_file_name|><|fim▁begin|>#include "LayerView.h"
#include "LayerWindow.h"
#include "LayerItem.h"
LayerView::LayerView (BRect frame, const char *name, CanvasView *_myView)
: BView (frame, name, B_FOLLOW_ALL_SIDES, B_FRAME_EVENTS | B_WILL_DRAW)
{
fMyView = _myView;
fFrame = frame;
for (int i = 0; i < MAX_LAYERS; i++)
layerItem[i] = NULL;<|fim▁hole|>
LayerView::~LayerView ()
{
// printf ("~LayerView\n");
}
void LayerView::setScrollBars (BScrollBar *_h, BScrollBar *_v)
{
mh = _h;
mv = _v;
mh->SetTarget (this);
mv->SetTarget (this);
}
void LayerView::Draw (BRect updateRect)
{
inherited::Draw (updateRect);
}
void LayerView::AttachedToWindow ()
{
// Yes I know this is rather blunt. Let's call this RAD and all is well.
for (int i = 0; i < MAX_LAYERS; i++)
{
if (layerItem[i])
{
RemoveChild (layerItem[i]);
delete layerItem[i];
layerItem[i] = NULL;
}
}
FrameResized (Bounds().Width(), Bounds().Height());
SetViewColor (B_TRANSPARENT_32_BIT);
BRect layerItemRect = BRect (0, 0, LAYERITEMWIDTH, LAYERITEMHEIGHT);
for (int i = fMyView->numLayers() - 1; i >= 0; i--)
{
layerItem[i] = new LayerItem (layerItemRect, "Layer Item", i, fMyView);
AddChild (layerItem[i]);
layerItemRect.OffsetBy (0, LAYERITEMHEIGHT);
}
}
void LayerView::FrameResized (float width, float height)
{
float hmin, hmax, vmin, vmax;
mh->GetRange (&hmin, &hmax);
mv->GetRange (&vmin, &vmax);
mh->SetRange (hmin, fFrame.Width() - width);
mv->SetRange (vmin, fFrame.Height() - height);
mh->SetProportion (width/fFrame.Width());
mv->SetProportion (height/fFrame.Height());
}<|fim▁end|> | } |
<|file_name|>test_unix_pass_fd.rs<|end_file_name|><|fim▁begin|>use {TryRead, TryWrite};
use mio::*;
use mio::deprecated::{EventLoop, Handler};
use mio::deprecated::unix::*;
use bytes::{Buf, ByteBuf, SliceBuf};
use slab;
use std::path::PathBuf;
use std::io::{self, Read};
use std::os::unix::io::{AsRawFd, FromRawFd};
use tempdir::TempDir;
const SERVER: Token = Token(10_000_000);
const CLIENT: Token = Token(10_000_001);
struct EchoConn {
sock: UnixStream,
pipe_fd: Option<PipeReader>,
token: Option<Token>,
interest: Ready,
}
type Slab<T> = slab::Slab<T, Token>;
impl EchoConn {
fn new(sock: UnixStream) -> EchoConn {<|fim▁hole|> interest: Ready::hup(),
}
}
fn writable(&mut self, event_loop: &mut EventLoop<Echo>) -> io::Result<()> {
let fd = self.pipe_fd.take().unwrap();
match self.sock.try_write_send_fd(b"x", fd.as_raw_fd()) {
Ok(None) => {
debug!("client flushing buf; WOULDBLOCK");
self.pipe_fd = Some(fd);
self.interest.insert(Ready::writable());
}
Ok(Some(r)) => {
debug!("CONN : we wrote {} bytes!", r);
self.interest.insert(Ready::readable());
self.interest.remove(Ready::writable());
}
Err(e) => debug!("not implemented; client err={:?}", e),
}
assert!(self.interest.is_readable() || self.interest.is_writable(), "actual={:?}", self.interest);
event_loop.reregister(&self.sock, self.token.unwrap(), self.interest, PollOpt::edge() | PollOpt::oneshot())
}
fn readable(&mut self, event_loop: &mut EventLoop<Echo>) -> io::Result<()> {
let mut buf = ByteBuf::mut_with_capacity(2048);
match self.sock.try_read_buf(&mut buf) {
Ok(None) => {
panic!("We just got readable, but were unable to read from the socket?");
}
Ok(Some(r)) => {
debug!("CONN : we read {} bytes!", r);
self.interest.remove(Ready::readable());
self.interest.insert(Ready::writable());
}
Err(e) => {
debug!("not implemented; client err={:?}", e);
self.interest.remove(Ready::readable());
}
};
// create fd to pass back. Assume that the write will work
// without blocking, for simplicity -- we're only testing that
// the FD makes it through somehow
let (rd, mut wr) = pipe().unwrap();
let mut buf = buf.flip();
match wr.try_write_buf(&mut buf) {
Ok(None) => {
panic!("writing to our own pipe blocked :(");
}
Ok(Some(r)) => {
debug!("CONN: we wrote {} bytes to the FD", r);
}
Err(e) => {
panic!("not implemented; client err={:?}", e);
}
}
self.pipe_fd = Some(rd);
assert!(self.interest.is_readable() || self.interest.is_writable(), "actual={:?}", self.interest);
event_loop.reregister(&self.sock, self.token.unwrap(), self.interest, PollOpt::edge() | PollOpt::oneshot())
}
}
struct EchoServer {
sock: UnixListener,
conns: Slab<EchoConn>
}
impl EchoServer {
fn accept(&mut self, event_loop: &mut EventLoop<Echo>) -> io::Result<()> {
debug!("server accepting socket");
let sock = self.sock.accept().unwrap();
let conn = EchoConn::new(sock);
let tok = self.conns.insert(conn)
.ok().expect("could not add connection to slab");
// Register the connection
self.conns[tok].token = Some(tok);
event_loop.register(&self.conns[tok].sock, tok, Ready::readable(), PollOpt::edge() | PollOpt::oneshot())
.ok().expect("could not register socket with event loop");
Ok(())
}
fn conn_readable(&mut self, event_loop: &mut EventLoop<Echo>, tok: Token) -> io::Result<()> {
debug!("server conn readable; tok={:?}", tok);
self.conn(tok).readable(event_loop)
}
fn conn_writable(&mut self, event_loop: &mut EventLoop<Echo>, tok: Token) -> io::Result<()> {
debug!("server conn writable; tok={:?}", tok);
self.conn(tok).writable(event_loop)
}
fn conn<'a>(&'a mut self, tok: Token) -> &'a mut EchoConn {
&mut self.conns[tok]
}
}
struct EchoClient {
sock: UnixStream,
msgs: Vec<&'static str>,
tx: SliceBuf<'static>,
rx: SliceBuf<'static>,
token: Token,
interest: Ready,
}
// Sends a message and expects to receive the same exact message, one at a time
impl EchoClient {
fn new(sock: UnixStream, tok: Token, mut msgs: Vec<&'static str>) -> EchoClient {
let curr = msgs.remove(0);
EchoClient {
sock: sock,
msgs: msgs,
tx: SliceBuf::wrap(curr.as_bytes()),
rx: SliceBuf::wrap(curr.as_bytes()),
token: tok,
interest: Ready::none(),
}
}
fn readable(&mut self, event_loop: &mut EventLoop<Echo>) -> io::Result<()> {
debug!("client socket readable");
let mut pipe: PipeReader;
let mut buf = [0; 256];
match self.sock.read_recv_fd(&mut buf) {
Ok((_, None)) => {
panic!("Did not receive passed file descriptor");
}
Ok((r, Some(fd))) => {
assert_eq!(r, 1);
assert_eq!(b'x', buf[0]);
debug!("CLIENT : We read {} bytes!", r);
pipe = From::<Io>::from(unsafe { Io::from_raw_fd(fd) });
}
Err(e) => {
panic!("not implemented; client err={:?}", e);
}
};
// read the data out of the FD itself
let n = match pipe.read(&mut buf) {
Ok(r) => {
debug!("CLIENT : We read {} bytes from the FD", r);
r
}
Err(e) => {
panic!("not implemented, client err={:?}", e);
}
};
for &actual in buf[0..n].iter() {
let expect = self.rx.read_byte().unwrap();
assert!(actual == expect, "actual={}; expect={}", actual, expect);
}
self.interest.remove(Ready::readable());
if !self.rx.has_remaining() {
self.next_msg(event_loop).unwrap();
}
if !self.interest.is_none() {
assert!(self.interest.is_readable() || self.interest.is_writable(), "actual={:?}", self.interest);
try!(event_loop.reregister(&self.sock, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot()));
}
Ok(())
}
fn writable(&mut self, event_loop: &mut EventLoop<Echo>) -> io::Result<()> {
debug!("client socket writable");
match self.sock.try_write_buf(&mut self.tx) {
Ok(None) => {
debug!("client flushing buf; WOULDBLOCK");
self.interest.insert(Ready::writable());
}
Ok(Some(r)) => {
debug!("CLIENT : we wrote {} bytes!", r);
self.interest.insert(Ready::readable());
self.interest.remove(Ready::writable());
}
Err(e) => debug!("not implemented; client err={:?}", e)
}
assert!(self.interest.is_readable() || self.interest.is_writable(), "actual={:?}", self.interest);
event_loop.reregister(&self.sock, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot())
}
fn next_msg(&mut self, event_loop: &mut EventLoop<Echo>) -> io::Result<()> {
if self.msgs.is_empty() {
event_loop.shutdown();
return Ok(());
}
let curr = self.msgs.remove(0);
debug!("client prepping next message");
self.tx = SliceBuf::wrap(curr.as_bytes());
self.rx = SliceBuf::wrap(curr.as_bytes());
self.interest.insert(Ready::writable());
event_loop.reregister(&self.sock, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot())
}
}
struct Echo {
server: EchoServer,
client: EchoClient,
}
impl Echo {
fn new(srv: UnixListener, client: UnixStream, msgs: Vec<&'static str>) -> Echo {
Echo {
server: EchoServer {
sock: srv,
conns: Slab::with_capacity(128)
},
client: EchoClient::new(client, CLIENT, msgs)
}
}
}
impl Handler for Echo {
type Timeout = usize;
type Message = ();
fn ready(&mut self, event_loop: &mut EventLoop<Echo>, token: Token, events: Ready) {
if events.is_readable() {
match token {
SERVER => self.server.accept(event_loop).unwrap(),
CLIENT => self.client.readable(event_loop).unwrap(),
i => self.server.conn_readable(event_loop, i).unwrap()
};
}
if events.is_writable() {
match token {
SERVER => panic!("received writable for token 0"),
CLIENT => self.client.writable(event_loop).unwrap(),
_ => self.server.conn_writable(event_loop, token).unwrap()
};
}
}
}
#[test]
pub fn test_unix_pass_fd() {
debug!("Starting TEST_UNIX_PASS_FD");
let mut event_loop = EventLoop::new().unwrap();
let tmp_dir = TempDir::new("mio").unwrap();
let addr = tmp_dir.path().join(&PathBuf::from("sock"));
let srv = UnixListener::bind(&addr).unwrap();
info!("listen for connections");
event_loop.register(&srv, SERVER, Ready::readable(), PollOpt::edge() | PollOpt::oneshot()).unwrap();
let sock = UnixStream::connect(&addr).unwrap();
// Connect to the server
event_loop.register(&sock, CLIENT, Ready::writable(), PollOpt::edge() | PollOpt::oneshot()).unwrap();
// Start the event loop
event_loop.run(&mut Echo::new(srv, sock, vec!["foo", "bar"])).unwrap();
}<|fim▁end|> | EchoConn {
sock: sock,
pipe_fd: None,
token: None, |
<|file_name|>service.rs<|end_file_name|><|fim▁begin|>extern crate proc_macro;
use std::convert::From;
use syn;
use proc_macro::TokenStream;
use proc_macro2::TokenStream as TokenStream2;
use quote::{quote,ToTokens};
use super::utils::*;
struct Service<'a> {<|fim▁hole|> idents_cap: Vec<syn::Ident>,
args: Vec<Vec<syn::Pat>>,
args_ty: Vec<Vec<syn::Type>>,
outputs: Vec<Option<syn::Type>>,
}
impl<'a> Service<'a> {
pub fn new(ast: &'a syn::ItemImpl) -> Self {
let signatures = ast.items.iter().filter_map(|item| match item {
syn::ImplItem::Method(item) => Some(&item.sig),
_ => None,
});
let (mut idents, mut idents_cap, mut args, mut args_ty, mut outputs) =
(Vec::new(), Vec::new(), Vec::new(), Vec::new(), Vec::new());
for sig in signatures {
let (mut a, mut a_t) = (Vec::new(), Vec::new());
let mut has_self = false;
for arg in sig.inputs.iter() {
match arg {
syn::FnArg::Typed(arg) => {
a.push((*arg.pat).clone());
a_t.push((*arg.ty).clone());
},
syn::FnArg::Receiver(_) => {
has_self = true;
},
}
}
if !has_self {
continue;
}
let ident = sig.ident.clone();
args.push(a);
args_ty.push(a_t);
idents_cap.push(to_camel_ident(&ident));
idents.push(ident);
outputs.push(match sig.output.clone() {
syn::ReturnType::Default => None,
syn::ReturnType::Type(_, ty) => Some(*ty),
});
//sigs.push(sig.clone());
}
Self { ast: &ast, idents, idents_cap, args, args_ty, outputs }
}
pub fn generate(&self) -> TokenStream {
let ast = &self.ast;
let (types, server, client) = (self.types(), self.server(), self.client());
(quote!{
#ast
pub mod service {
use super::*;
use libfoxlive::rpc::Service;
use std::marker::PhantomData;
use futures::future::{Future,FutureExt,ok,err};
#types
#server
#client
}
}).into()
}
fn types(&self) -> TokenStream2 {
let Self { idents_cap, args_ty, outputs, .. } = self;
let (_impl_generics, ty_generics, where_clause) = self.ast.generics.split_for_impl();
// we need phantom variant for handling generics cases: R, R<A>, R<A,B>.
let phantom = quote! { _Phantom(PhantomData<Request #ty_generics>) };
let responses = outputs.iter().zip(idents_cap).map(|(output, ident)| match output {
None => quote! { #ident },
Some(t) => quote! { #ident(#t) },
});
// Response as future
/* let fut_outputs = outputs.iter().zip(sigs.iter()).map(|(output, sig)| {
if sig.asyncness.is_some() {
panic!(format!("{}", output.to_token_stream()))
} else {
}
}).collect::<Vec<_>>(); */
quote! {
pub enum Request #ty_generics #where_clause {
#(#idents_cap(#(#args_ty),*),)*
#phantom
}
#[derive(Clone)]
pub enum Response #ty_generics #where_clause {
#(#responses,)*
#phantom
}
}
}
fn server(&self) -> TokenStream2 {
let Self { ast, idents, idents_cap, args, outputs, .. } = self;
let ty = &*ast.self_ty;
let (impl_generics, ty_generics, where_clause) = self.ast.generics.split_for_impl();
let calls = outputs.iter().enumerate().map(|(i, output)| {
let (ident, ident_cap, args) = (&idents[i], &idents_cap[i], &args[i]);
match output {
None => quote! {{
self.#ident(#(#args),*);
Some(Response::#ident_cap)
}},
Some(_) => quote! { Some(Response::#ident_cap(self.#ident(#(#args),*))) },
}
});
quote! {
impl #impl_generics Service for #ty #where_clause {
type Request = Request #ty_generics;
type Response = Response #ty_generics;
// type ResponseFut = ResponseFut #impl_generics;
fn process_request(&mut self, request: Self::Request) -> Option<Self::Response> {
match request {
#(Request::#idents_cap(#(#args),*) => #calls,)*
_ => None,
}
}
}
}
}
fn client(&self) -> TokenStream2 {
let Self { idents, idents_cap, args, args_ty, outputs, .. } = self;
let generics = self.ast.generics.clone();
let (impl_generics, ty_generics, where_clause) = generics.split_for_impl();
let variants = outputs.iter().zip(idents_cap).map(|(output, ident)| match output {
None => quote! { Ok(Response::#ident) => ok(()) },
Some(_) => quote! { Ok(Response::#ident(r)) => ok(r) },
});
let outputs = outputs.iter().map(|o| match o {
None => quote! { () },
Some(t) => t.to_token_stream(),
});
quote! {
pub trait Client #impl_generics #where_clause {
type ResponseFut : Future<Output=Result<Response #ty_generics,()>>+'static;
fn send_request(&mut self, request: Request #ty_generics) -> Self::ResponseFut;
#(fn #idents(&mut self, #(#args: #args_ty),*) -> Box<Future<Output=Result<#outputs,()>>> {
Box::new(self.send_request(Request::#idents_cap(#(#args),*))
.then(|response| match response {
#variants,
_ => err(())
}))
})*
}
}
}
}
/// Macro generating RPC service traits and types, for the decorated
/// struct impl block.
pub fn service(_attrs: TokenStream, input: TokenStream) -> TokenStream {
let ast = syn::parse::<syn::ItemImpl>(input).unwrap();
let service = Service::new(&ast);
service.generate()
}<|fim▁end|> | ast: &'a syn::ItemImpl,
idents: Vec<syn::Ident>, |
<|file_name|>boot2docker.go<|end_file_name|><|fim▁begin|>package provision
import (
"errors"
"github.com/docker/machine/drivers"
"github.com/docker/machine/libmachine/provision/pkgaction"
"github.com/docker/machine/log"
"github.com/docker/machine/state"
"github.com/docker/machine/utils"
)
var (
ErrUnknownDriver = errors.New("unknown driver")
)
func init() {
Register("boot2docker", &RegisteredProvisioner{
New: NewBoot2DockerProvisioner,
})
}
func NewBoot2DockerProvisioner(d drivers.Driver) Provisioner {
g := GenericProvisioner{
DockerOptionsDir: "/etc/docker",
DaemonOptionsFile: "/etc/systemd/system/docker.service",
OsReleaseId: "docker",
Packages: []string{},
Driver: d,
}
p := &Boot2DockerProvisioner{
DebianProvisioner{
GenericProvisioner: g,
},
}
return p
}
type Boot2DockerProvisioner struct {
DebianProvisioner
}
func (provisioner *Boot2DockerProvisioner) upgradeIso() error {
log.Info("Stopping machine to do the upgrade...")
if err := provisioner.Driver.Stop(); err != nil {
return err
}
if err := utils.WaitFor(drivers.MachineInState(provisioner.Driver, state.Stopped)); err != nil {
return err
}
machineName := provisioner.GetDriver().GetMachineName()
log.Infof("Upgrading machine %s...", machineName)<|fim▁hole|> isoFilename := ""
switch provisioner.GetDriver().DriverName() {
case "virtualbox":
isoFilename = "boot2docker-virtualbox.iso"
case "vmwarefusion", "vmwarevsphere":
isoFilename = "boot2docker-vmware.iso"
case "hyper-v":
isoFilename = "boot2docker-hyperv.iso"
default:
return ErrUnknownDriver
}
b2dutils := utils.NewB2dUtils("", "", isoFilename)
// Usually we call this implicitly, but call it here explicitly to get
// the latest boot2docker ISO.
if err := b2dutils.DownloadLatestBoot2Docker(); err != nil {
return err
}
// Copy the latest version of boot2docker ISO to the machine's directory
if err := b2dutils.CopyIsoToMachineDir("", machineName); err != nil {
return err
}
if err := provisioner.Driver.Start(); err != nil {
return err
}
return utils.WaitFor(drivers.MachineInState(provisioner.Driver, state.Running))
}
func (provisioner *Boot2DockerProvisioner) Package(name string, action pkgaction.PackageAction) error {
if name == "docker" && action == pkgaction.Upgrade {
if err := provisioner.upgradeIso(); err != nil {
return err
}
}
return nil
}<|fim▁end|> | |
<|file_name|>call_log.py<|end_file_name|><|fim▁begin|>from django.db import models
from django_crypto_fields.fields import EncryptedTextField
from edc_base.model.models import BaseUuidModel
try:
from edc_sync.mixins import SyncMixin
except ImportError:
SyncMixin = type('SyncMixin', (object, ), {})
from ..managers import CallLogManager
class CallLog (SyncMixin, BaseUuidModel):
"""Maintains a log of calls for a particular participant."""
subject_identifier = models.CharField(
verbose_name="Subject Identifier",
max_length=50,
blank=True,
db_index=True,
unique=True,<|fim▁hole|>
locator_information = EncryptedTextField(
help_text=('This information has been imported from'
'the previous locator. You may update as required.')
)
contact_notes = EncryptedTextField(
null=True,
blank=True,
help_text=''
)
label = models.CharField(
max_length=25,
null=True,
editable=False,
help_text="from followup list"
)
# history = AuditTrail()
objects = CallLogManager()
def natural_key(self):
return self.subject_identifier
class Meta:
app_label = 'edc_contact'<|fim▁end|> | ) |
<|file_name|>view.go<|end_file_name|><|fim▁begin|>package fastlanestat
import (
"net/http"
"html/template"
// "fmt"
// Import appengine urlfetch package, that is needed to make http call to the api
"appengine"
"appengine/datastore"
)
type ViewContext struct {
PricePoints []PricePoint
}
func viewStatsHandler(w http.ResponseWriter, r *http.Request) {
c := appengine.NewContext(r)
// The Query type and its methods are used to construct a query.
q := datastore.NewQuery("PricePoint").
Order("-PointInTime").
Limit(5000)
// To retrieve the results,
// you must execute the Query using its GetAll or Run methods.
var pricePoints []PricePoint
//_, err :=
q.GetAll(c, &pricePoints)
// handle error
// ...
viewContext := ViewContext{ PricePoints: pricePoints }<|fim▁hole|>}<|fim▁end|> | t, _ := template.ParseFiles("templates/simple.htmltemplate")
t.Execute(w, viewContext) |
<|file_name|>DBToasterJoinComponent.java<|end_file_name|><|fim▁begin|>/*
*
* * Copyright (c) 2011-2015 EPFL DATA Laboratory<|fim▁hole|> * *
* * All rights reserved.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package ch.epfl.data.squall.components.dbtoaster;
import backtype.storm.Config;
import backtype.storm.topology.TopologyBuilder;
import ch.epfl.data.squall.components.Component;
import ch.epfl.data.squall.components.JoinerComponent;
import ch.epfl.data.squall.components.AbstractJoinerComponent;
import ch.epfl.data.squall.operators.AggregateStream;
import ch.epfl.data.squall.predicates.Predicate;
import ch.epfl.data.squall.storm_components.StormComponent;
import ch.epfl.data.squall.storm_components.dbtoaster.StormDBToasterJoin;
import ch.epfl.data.squall.storm_components.synchronization.TopologyKiller;
import ch.epfl.data.squall.types.Type;
import ch.epfl.data.squall.utilities.MyUtilities;
import org.apache.log4j.Logger;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class DBToasterJoinComponent extends AbstractJoinerComponent<DBToasterJoinComponent> {
protected DBToasterJoinComponent getThis() {
return this;
}
private static final long serialVersionUID = 1L;
private static Logger LOG = Logger.getLogger(DBToasterJoinComponent.class);
private Map<String, Type[]> _parentNameColTypes;
private Set<String> _parentsWithMultiplicity;
private Map<String, AggregateStream> _parentsWithAggregator;
private String _equivalentSQL;
protected DBToasterJoinComponent(List<Component> relations, Map<String, Type[]> relationTypes,
Set<String> relationsWithMultiplicity, Map<String, AggregateStream> relationsWithAggregator,
String sql, String name) {
super(relations, name);
_parentsWithMultiplicity = relationsWithMultiplicity;
_parentsWithAggregator = relationsWithAggregator;
_parentNameColTypes = relationTypes;
_equivalentSQL = sql;
}
@Override
public void makeBolts(TopologyBuilder builder, TopologyKiller killer,
List<String> allCompNames, Config conf, int hierarchyPosition) {
// by default print out for the last component
// for other conditions, can be set via setPrintOut
if (hierarchyPosition == StormComponent.FINAL_COMPONENT
&& !getPrintOutSet())
setPrintOut(true);
MyUtilities.checkBatchOutput(getBatchOutputMillis(),
getChainOperator().getAggregation(), conf);
setStormEmitter(new StormDBToasterJoin(getParents(), this,
allCompNames,
_parentNameColTypes,
_parentsWithMultiplicity,
_parentsWithAggregator,
hierarchyPosition,
builder, killer, conf));
}
@Override
public DBToasterJoinComponent setJoinPredicate(Predicate predicate) {
throw new UnsupportedOperationException();
}
public String getSQLQuery() {
return _equivalentSQL;
}
}<|fim▁end|> | * * Copyright (c) 2014-2015 The Squall Collaboration (see NOTICE) |
<|file_name|>ui.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: omi
# @Date: 2014-08-24 21:51:57
# @Last Modified by: omi
# @Last Modified time: 2015-08-02 20:57:35
'''
网易云音乐 Ui
'''
import hashlib
import re
import curses
import terminalsize
from api import NetEase
from scrollstring import scrollstring, truelen
from storage import Storage
from config import Config
import logger
from utils import notify
log = logger.getLogger(__name__)
try:
import dbus
dbus_activity = True
except ImportError:
dbus_activity = False
log.warn('dbus module not installed.')
log.warn('Osdlyrics Not Available.')
def escape_quote(text):
return text.replace('\'', '\\\'').replace('\'', '\'\'')
class Ui:
def __init__(self):
self.screen = curses.initscr()
self.screen.timeout(100) # the screen refresh every 100ms
# charactor break buffer
curses.cbreak()
self.screen.keypad(1)
self.netease = NetEase()
curses.start_color()
curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_CYAN, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_YELLOW, curses.COLOR_BLACK)
# term resize handling
size = terminalsize.get_terminal_size()
self.x = max(size[0], 10)
self.y = max(size[1], 25)
self.startcol = int(float(self.x) / 5)
self.indented_startcol = max(self.startcol - 3, 0)
self.update_space()
self.lyric = ''
self.now_lyric = ''
self.tlyric = ''
self.storage = Storage()
self.config = Config()
self.newversion = False
def notify(self, summary, song, album, artist):
if summary != 'disable':
body = '%s\nin %s by %s' % (song, album, artist)
content = escape_quote(summary + ': ' + body)
notify(content)
def build_playinfo(self,
song_name,
artist,
album_name,
quality,
start,
pause=False):
curses.noecho()
# refresh top 2 line
self.screen.move(1, 1)
self.screen.clrtoeol()
self.screen.move(2, 1)
self.screen.clrtoeol()
if pause:
self.screen.addstr(1, self.indented_startcol,
'_ _ z Z Z ' + quality, curses.color_pair(3))
else:
self.screen.addstr(1, self.indented_startcol,
'♫ ♪ ♫ ♪ ' + quality, curses.color_pair(3))
self.screen.addstr(
1, min(self.indented_startcol + 18, self.x - 1),
song_name + self.space + artist + ' < ' + album_name + ' >',
curses.color_pair(4))
self.screen.refresh()
def build_process_bar(self, now_playing, total_length, playing_flag,
pause_flag, playing_mode):
if (self.storage.database['player_info']['idx'] >=
len(self.storage.database['player_info']['player_list'])):
return
curses.noecho()
self.screen.move(3, 1)
self.screen.clrtoeol()
self.screen.move(4, 1)
self.screen.clrtoeol()
if not playing_flag:
return
if total_length <= 0:
total_length = 1
if now_playing > total_length or now_playing <= 0:
now_playing = 0
process = '['
for i in range(0, 33):
if i < now_playing / total_length * 33:
if (i + 1) > now_playing / total_length * 33:
if not pause_flag:
process += '>'
continue
process += '='
else:
process += ' '
process += '] '
now_minute = int(now_playing / 60)
if now_minute > 9:
now_minute = str(now_minute)
else:
now_minute = '0' + str(now_minute)
now_second = int(now_playing - int(now_playing / 60) * 60)
if now_second > 9:
now_second = str(now_second)
else:
now_second = '0' + str(now_second)
total_minute = int(total_length / 60)
if total_minute > 9:
total_minute = str(total_minute)
else:
total_minute = '0' + str(total_minute)
total_second = int(total_length - int(total_length / 60) * 60)
if total_second > 9:
total_second = str(total_second)
else:
total_second = '0' + str(total_second)
process += '(' + now_minute + ':' + now_second + '/' + total_minute + ':' + total_second + ')' # NOQA
if playing_mode == 0:
process = '顺序播放 ' + process
elif playing_mode == 1:
process = '顺序循环 ' + process
elif playing_mode == 2:
process = '单曲循环 ' + process
elif playing_mode == 3:
process = '随机播放 ' + process
elif playing_mode == 4:
process = '随机循环 ' + process
else:
pass
self.screen.addstr(3, self.startcol - 2, process, curses.color_pair(1))
song = self.storage.database['songs'][
self.storage.database['player_info']['player_list'][
self.storage.database['player_info']['idx']]]
if 'lyric' not in song.keys() or len(song['lyric']) <= 0:
self.now_lyric = '暂无歌词 ~>_<~ \n'
if dbus_activity and self.config.get_item('osdlyrics'):
self.now_playing = song['song_name'] + ' - ' + song[
'artist'] + '\n'
else:
key = now_minute + ':' + now_second
for line in song['lyric']:
if key in line:
if 'tlyric' not in song.keys() or len(song['tlyric']) <= 0:
self.now_lyric = line
else:
self.now_lyric = line
for tline in song['tlyric']:
if key in tline and self.config.get_item(
'translation'):
self.now_lyric = tline + ' || ' + self.now_lyric # NOQA
self.now_lyric = re.sub('\[.*?\]', '', self.now_lyric)
if dbus_activity and self.config.get_item('osdlyrics'):
try:
bus = dbus.SessionBus().get_object('org.musicbox.Bus', '/')
if self.now_lyric == '暂无歌词 ~>_<~ \n':
bus.refresh_lyrics(self.now_playing,
dbus_interface='local.musicbox.Lyrics')
else:
bus.refresh_lyrics(self.now_lyric,
dbus_interface='local.musicbox.Lyrics')
except Exception as e:
log.error(e)
pass
self.screen.addstr(4, self.startcol - 2, str(self.now_lyric),
curses.color_pair(3))
self.screen.refresh()
def build_loading(self):
self.screen.addstr(7, self.startcol, '享受高品质音乐,loading...',
curses.color_pair(1))
self.screen.refresh()
# start is the timestamp of this function being called
def build_menu(self, datatype, title, datalist, offset, index, step,
start):
# keep playing info in line 1
curses.noecho()
self.screen.move(5, 1)
self.screen.clrtobot()
self.screen.addstr(5, self.startcol, title, curses.color_pair(1))
if len(datalist) == 0:
self.screen.addstr(8, self.startcol, '这里什么都没有 -,-')
else:
if datatype == 'main':
for i in range(offset, min(len(datalist), offset + step)):
if i == index:
self.screen.addstr(i - offset + 9,
self.indented_startcol,
'-> ' + str(i) + '. ' + datalist[i],
curses.color_pair(2))
else:
self.screen.addstr(i - offset + 9, self.startcol,
str(i) + '. ' + datalist[i])
elif datatype == 'songs' or datatype == 'fmsongs':
iter_range = min(len(datalist), offset + step)
for i in range(offset, iter_range):
# this item is focus
if i == index:
self.screen.addstr(i - offset + 8, 0,
' ' * self.startcol)
lead = '-> ' + str(i) + '. '
self.screen.addstr(i - offset + 8,
self.indented_startcol, lead,
curses.color_pair(2))
name = '{}{}{} < {} >'.format(datalist[i][
'song_name'], self.space, datalist[i][
'artist'], datalist[i]['album_name'])
# the length decides whether to scoll
if truelen(name) < self.x - self.startcol - 1:
self.screen.addstr(
i - offset + 8,
self.indented_startcol + len(lead), name,
curses.color_pair(2))
else:
name = scrollstring(name + ' ', start)
self.screen.addstr(
i - offset + 8,
self.indented_startcol + len(lead), str(name),
curses.color_pair(2))
else:
self.screen.addstr(i - offset + 8, 0,
' ' * self.startcol)
self.screen.addstr(i - offset + 8, self.startcol, str(
str(i) + '. ' + datalist[i]['song_name'] +
self.space + datalist[i][
'artist'] + ' < ' + datalist[i][
'album_name'] + ' >')[:int(self.x * 2)])
self.screen.addstr(iter_range - offset + 9, 0,
' ' * self.x)
elif datatype == 'artists':
for i in range(offset, min(len(datalist), offset + step)):
if i == index:
self.screen.addstr(
i - offset + 9, self.indented_startcol,
'-> ' + str(i) + '. ' + datalist[i]['artists_name']
+ self.space + str(datalist[i]['alias']),
curses.color_pair(2))
else:
self.screen.addstr(
i - offset + 9, self.startcol,
str(i) + '. ' + datalist[i]['artists_name'] +
self.space + datalist[i][
'alias'])
elif datatype == 'albums':
for i in range(offset, min(len(datalist), offset + step)):
if i == index:
self.screen.addstr(
i - offset + 9, self.indented_startcol,
'-> ' + str(i) + '. ' + datalist[i]['albums_name']
+ self.space + datalist[i][
'artists_name'], curses.color_pair(2))
else:
self.screen.addstr(
i - offset + 9, self.startcol,
str(i) + '. ' + datalist[i]['albums_name'] +
self.space + datalist[i][
'artists_name'])
elif datatype == 'playlists':
for i in range(offset, min(len(datalist), offset + step)):
if i == index:
self.screen.addstr(
i - offset + 9, self.indented_startcol,
'-> ' + str(i) + '. ' + datalist[i]['title'],
curses.color_pair(2))
else:
self.screen.addstr(
i - offset + 9, self.startcol,
str(i) + '. ' + datalist[i]['title'])
elif datatype == 'top_playlists':
for i in range(offset, min(len(datalist), offset + step)):
if i == index:
self.screen.addstr(
i - offset + 9, self.indented_startcol, '-> ' +
str(i) + '. ' + datalist[i]['playlists_name'] +
self.space + datalist[i]['creator_name'],
curses.color_pair(2))
else:
self.screen.addstr(
i - offset + 9, self.startcol,
str(i) + '. ' + datalist[i]['playlists_name'] +
self.space + datalist[i][
'creator_name'])
elif datatype == 'toplists':
for i in range(offset, min(len(datalist), offset + step)):
if i == index:
self.screen.addstr(i - offset + 9,
self.indented_startcol,
'-> ' + str(i) + '. ' + datalist[i],
curses.color_pair(2))
else:
self.screen.addstr(i - offset + 9, self.startcol,
str(i) + '. ' + datalist[i])
elif datatype in ('playlist_classes', 'playlist_class_detail'):
for i in range(offset, min(len(datalist), offset + step)):
if i == index:
self.screen.addstr(i - offset + 9,
self.indented_startcol,
'-> ' + str(i) + '. ' + datalist[i],
curses.color_pair(2))
else:
self.screen.addstr(i - offset + 9, self.startcol,
str(i) + '. ' + datalist[i])
elif datatype == 'djchannels':
for i in range(offset, min(len(datalist), offset + step)):
if i == index:
self.screen.addstr(
i - offset + 8, self.indented_startcol,
'-> ' + str(i) + '. ' + datalist[i]['song_name'],
curses.color_pair(2))
else:
self.screen.addstr(
i - offset + 8, self.startcol,
str(i) + '. ' + datalist[i]['song_name'])
elif datatype == 'search':
self.screen.move(6, 1)
self.screen.clrtobot()
self.screen.timeout(-1)
self.screen.addstr(8, self.startcol, '选择搜索类型:',
curses.color_pair(1))
for i in range(offset, min(len(datalist), offset + step)):
if i == index:
self.screen.addstr(
i - offset + 10, self.indented_startcol,
'-> ' + str(i) + '.' + datalist[i - 1],
curses.color_pair(2))<|fim▁hole|> self.screen.addstr(i - offset + 10, self.startcol,
str(i) + '.' + datalist[i - 1])
self.screen.timeout(100)
elif datatype == 'help':
for i in range(offset, min(len(datalist), offset + step)):
if i == index:
self.screen.addstr(i - offset + 9,
self.indented_startcol,
'-> ' + str(i) + '. \'' +
(datalist[i][0].upper() +
'\'').ljust(11) + datalist[i][
1] + ' ' + datalist[i][2],
curses.color_pair(2))
else:
self.screen.addstr(i - offset + 9, self.startcol,
str(i) + '. \'' +
(datalist[i][0].upper() +
'\'').ljust(11) + datalist[i][
1] + ' ' + datalist[i][2])
self.screen.addstr(
20, 6, 'NetEase-MusicBox 基于Python,所有版权音乐来源于网易,本地不做任何保存')
self.screen.addstr(21, 10,
'按 [G] 到 Github 了解更多信息,帮助改进,或者Star表示支持~~')
self.screen.addstr(22, self.startcol,
'Build with love to music by omi')
self.screen.refresh()
def build_search(self, stype):
self.screen.timeout(-1)
netease = self.netease
if stype == 'songs':
song_name = self.get_param('搜索歌曲:')
if song_name == '/return':
return []
else:
try:
data = netease.search(song_name, stype=1)
song_ids = []
if 'songs' in data['result']:
if 'mp3Url' in data['result']['songs']:
songs = data['result']['songs']
# if search song result do not has mp3Url
# send ids to get mp3Url
else:
for i in range(0, len(data['result']['songs'])):
song_ids.append(data['result']['songs'][i][
'id'])
songs = netease.songs_detail(song_ids)
return netease.dig_info(songs, 'songs')
except Exception as e:
log.error(e)
return []
elif stype == 'artists':
artist_name = self.get_param('搜索艺术家:')
if artist_name == '/return':
return []
else:
try:
data = netease.search(artist_name, stype=100)
if 'artists' in data['result']:
artists = data['result']['artists']
return netease.dig_info(artists, 'artists')
except Exception as e:
log.error(e)
return []
elif stype == 'albums':
albums_name = self.get_param('搜索专辑:')
if albums_name == '/return':
return []
else:
try:
data = netease.search(albums_name, stype=10)
if 'albums' in data['result']:
albums = data['result']['albums']
return netease.dig_info(albums, 'albums')
except Exception as e:
log.error(e)
return []
elif stype == 'search_playlist':
search_playlist = self.get_param('搜索网易精选集:')
if search_playlist == '/return':
return []
else:
try:
data = netease.search(search_playlist, stype=1000)
if 'playlists' in data['result']:
playlists = data['result']['playlists']
return netease.dig_info(playlists, 'top_playlists')
except Exception as e:
log.error(e)
return []
return []
def build_login(self):
self.build_login_bar()
local_account = self.get_account()
local_password = hashlib.md5(self.get_password()).hexdigest()
login_info = self.netease.login(local_account, local_password)
account = [local_account, local_password]
if login_info['code'] != 200:
x = self.build_login_error()
if x == ord('1'):
return self.build_login()
else:
return -1
else:
return [login_info, account]
def build_login_bar(self):
curses.noecho()
self.screen.move(4, 1)
self.screen.clrtobot()
self.screen.addstr(5, self.startcol, '请输入登录信息(支持手机登陆)',
curses.color_pair(1))
self.screen.addstr(8, self.startcol, '账号:', curses.color_pair(1))
self.screen.addstr(9, self.startcol, '密码:', curses.color_pair(1))
self.screen.move(8, 24)
self.screen.refresh()
def build_login_error(self):
self.screen.move(4, 1)
self.screen.timeout(-1) # disable the screen timeout
self.screen.clrtobot()
self.screen.addstr(8, self.startcol, '艾玛,登录信息好像不对呢 (O_O)#',
curses.color_pair(1))
self.screen.addstr(10, self.startcol, '[1] 再试一次')
self.screen.addstr(11, self.startcol, '[2] 稍后再试')
self.screen.addstr(14, self.startcol, '请键入对应数字:', curses.color_pair(2))
self.screen.refresh()
x = self.screen.getch()
self.screen.timeout(100) # restore the screen timeout
return x
def get_account(self):
self.screen.timeout(-1) # disable the screen timeout
curses.echo()
account = self.screen.getstr(8, self.startcol + 6, 60)
self.screen.timeout(100) # restore the screen timeout
return account
def get_password(self):
self.screen.timeout(-1) # disable the screen timeout
curses.noecho()
password = self.screen.getstr(9, self.startcol + 6, 60)
self.screen.timeout(100) # restore the screen timeout
return password
def get_param(self, prompt_string):
# keep playing info in line 1
curses.echo()
self.screen.move(4, 1)
self.screen.clrtobot()
self.screen.addstr(5, self.startcol, prompt_string,
curses.color_pair(1))
self.screen.refresh()
info = self.screen.getstr(10, self.startcol, 60)
if info == '':
return '/return'
elif info.strip() is '':
return self.get_param(prompt_string)
else:
return info
def update_size(self):
# get terminal size
size = terminalsize.get_terminal_size()
self.x = max(size[0], 10)
self.y = max(size[1], 25)
# update intendations
curses.resizeterm(self.y, self.x)
self.startcol = int(float(self.x) / 5)
self.indented_startcol = max(self.startcol - 3, 0)
self.update_space()
self.screen.clear()
self.screen.refresh()
def update_space(self):
if self.x > 140:
self.space = ' - '
elif self.x > 80:
self.space = ' - '
else:
self.space = ' - '
self.screen.refresh()<|fim▁end|> | else: |
<|file_name|>manage.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_backend_test.settings")
<|fim▁hole|> from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)<|fim▁end|> | |
<|file_name|>test_main.py<|end_file_name|><|fim▁begin|>"""
Tests for main.py
"""
import pathlib
import main
def test_get_id():
path = pathlib.Path("./nbs/chapters/00-Introduction-to-the-course.ipynb")
assert main.get_id(path) == "00"
def test_get_id_with_no_id():
path = pathlib.Path("./nbs/other/Assessment.ipynb")
assert main.get_id(path) == "assessment"
def test_get_name():
path = pathlib.Path("./nbs/chapters/00-Introduction-to-the-course.ipynb")
assert main.get_name(path) == "Introduction to the course"
def test_get_with_no_id():<|fim▁hole|> path = pathlib.Path("./nbs/other/Assessment.ipynb")
html_output = main.convert_html(path)
assert len(html_output) == 2
assert type(html_output) is tuple
assert type(html_output[0]) is str
def test_render_template():
path = pathlib.Path("./nbs/other/Assessment.ipynb")
path_id = main.get_id(path)
nb, _ = main.convert_html(path)
nb = nb.replace("{{root}}", main.ROOT)
html = main.render_template("content.html", {"nb": nb,
"root": main.ROOT,
"id": path_id,})
assert type(html) is str
assert main.ROOT in html
assert path_id in html
assert nb in html<|fim▁end|> | path = pathlib.Path("./nbs/other/Assessment.ipynb")
assert main.get_name(path) == "Assessment"
def test_convert_html(): |
<|file_name|>htmlbrelement.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::root::DomRoot;
use crate::dom::document::Document;
use crate::dom::htmlelement::HTMLElement;
use crate::dom::node::Node;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
#[dom_struct]
pub struct HTMLBRElement {<|fim▁hole|> htmlelement: HTMLElement,
}
impl HTMLBRElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLBRElement {
HTMLBRElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLBRElement> {
Node::reflect_node(
Box::new(HTMLBRElement::new_inherited(local_name, prefix, document)),
document,
)
}
}<|fim▁end|> | |
<|file_name|>0009_auto_20190407_1443.py<|end_file_name|><|fim▁begin|># Generated by Django 2.1.7 on 2019-04-07 21:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('data_log', '0008_auto_20190402_2035'),
]
operations = [
migrations.AlterModelOptions(
name='craftrunelog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)},
),
migrations.AlterModelOptions(
name='dungeonlog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)},
),
migrations.AlterModelOptions(
name='fulllog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)},
),
migrations.AlterModelOptions(
name='magicboxcraft',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)},
),<|fim▁hole|> ),
migrations.AlterModelOptions(
name='riftraidlog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)},
),
migrations.AlterModelOptions(
name='shoprefreshlog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)},
),
migrations.AlterModelOptions(
name='summonlog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)},
),
migrations.AlterModelOptions(
name='wishlog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)},
),
migrations.AlterModelOptions(
name='worldbosslog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)},
),
migrations.AlterField(
model_name='riftraidrunecraftdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rune_crafts', to='data_log.RiftRaidLog'),
),
]<|fim▁end|> | migrations.AlterModelOptions(
name='riftdungeonlog',
options={'get_latest_by': 'timestamp', 'ordering': ('-timestamp',)}, |
<|file_name|>codeFixChangeJSDocSyntax20.ts<|end_file_name|><|fim▁begin|>/// <reference path='fourslash.ts' /><|fim▁hole|>verify.rangeAfterCodeFix("any");<|fim▁end|> | //// var index = { get p(): [|*|] { return 12 } }; |
<|file_name|>ContextUtils.java<|end_file_name|><|fim▁begin|>/* ========================================================================= *
* Boarder *
* http://boarder.mikuz.org/ *
* ========================================================================= *
* Copyright (C) 2013 Boarder *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* ========================================================================= */
package fi.mikuz.boarder.util;
import org.acra.ACRA;
import android.content.Context;
import android.os.Looper;
import android.util.Log;
import android.widget.Toast;
public abstract class ContextUtils {
private static final String TAG = ContextUtils.class.getSimpleName();
public static void toast(Context context, String toast) {
toast(context, toast, Toast.LENGTH_SHORT);
}
<|fim▁hole|> if (Looper.myLooper() == null) {
Exception e = new IllegalStateException("Not running in a looper");
Log.e(TAG, errLogMsg, e);
ACRA.getErrorReporter().handleException(e);
} else if (Looper.myLooper() != Looper.getMainLooper()) {
Exception e = new IllegalStateException("Not running in the main looper");
Log.e(TAG, errLogMsg, e);
ACRA.getErrorReporter().handleException(e);
} else {
try {
Toast.makeText(context, toast, duration).show();
} catch (NullPointerException e) {
Log.e(TAG, errLogMsg, e);
}
}
}
}<|fim▁end|> | public static void toast(Context context, String toast, int duration) {
String errLogMsg = "Unable to toast message: \"" + toast + "\"";
|
<|file_name|>stb_image.cpp<|end_file_name|><|fim▁begin|>/* Copyright: stbi-1.33 - public domain JPEG/PNG reader - http://nothings.org/stb_image.c
when you control the images you're loading
no warranty implied; use at your own risk
*/
#include "graphics/image/stb_image.h"
#include "platform/Platform.h"
#ifndef STBI_NO_HDR
#include <math.h> // ldexp
#include <string.h> // strcmp, strtok
#endif
#ifndef STBI_NO_STDIO
#include <stdio.h>
#endif
#include <stdlib.h>
#include <memory.h>
#include <assert.h>
#include <stdarg.h>
namespace stbi {
#ifndef _MSC_VER
#ifdef __cplusplus
#define stbi_inline inline
#else
#define stbi_inline
#endif
#else
#define stbi_inline __forceinline
#endif
// implementation:
typedef unsigned char uint8;
typedef u16 uint16;
typedef s16 int16;
typedef u32 uint32;
typedef s32 int32;
typedef unsigned int uint;
#if defined(STBI_NO_STDIO) && !defined(STBI_NO_WRITE)
#define STBI_NO_WRITE
#endif
#define STBI_NOTUSED(v) (void)sizeof(v)
#ifdef _MSC_VER
#define STBI_HAS_LROTL
#endif
#ifdef STBI_HAS_LROTL
#define stbi_lrot(x,y) _lrotl(x,y)
#else
#define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (32 - (y))))
#endif
#ifdef STBI_NO_CALLBACK
typedef const uint8 * bufptr;
#else
typedef uint8 * bufptr;
#endif
///////////////////////////////////////////////
//
// stbi struct and start_xxx functions
// stbi structure is our basic context used by all images, so it
// contains all the IO context, plus some basic image information
typedef struct
{
uint32 img_x, img_y;
int img_n, img_out_n;
#ifndef STBI_NO_CALLBACK
stbi_io_callbacks io;
void *io_user_data;
int read_from_callbacks;
int buflen;
uint8 buffer_start[128];
#endif // !STBI_NO_CALLBACK
bufptr img_buffer, img_buffer_end;
bufptr img_buffer_original;
} stbi;
#ifndef STBI_NO_CALLBACK
static void refill_buffer(stbi *s);
#endif // !STBI_NO_CALLBACK
// initialize a memory-decode context
static void start_mem(stbi *s, uint8 const *buffer, int len)
{
#ifndef STBI_NO_CALLBACK
s->io.read = NULL;
s->read_from_callbacks = 0;
#endif // !STBI_NO_CALLBACK
s->img_buffer = s->img_buffer_original = (bufptr) buffer;
s->img_buffer_end = (bufptr) buffer+len;
}
#ifndef STBI_NO_CALLBACK
// initialize a callback-based context
static void start_callbacks(stbi *s, stbi_io_callbacks *c, void *user)
{
s->io = *c;
s->io_user_data = user;
s->buflen = sizeof(s->buffer_start);
s->read_from_callbacks = 1;
s->img_buffer_original = s->buffer_start;
refill_buffer(s);
}
#ifndef STBI_NO_STDIO
static int stdio_read(void *user, char *data, int size)
{
return (int) fread(data,1,size,(FILE*) user);
}
static void stdio_skip(void *user, unsigned n)
{
fseek((FILE*) user, n, SEEK_CUR);
}
static int stdio_eof(void *user)
{<|fim▁hole|>}
static stbi_io_callbacks stbi_stdio_callbacks =
{
stdio_read,
stdio_skip,
stdio_eof,
};
static void start_file(stbi *s, FILE *f)
{
start_callbacks(s, &stbi_stdio_callbacks, (void *) f);
}
//static void stop_file(stbi *s) { }
#endif // !STBI_NO_STDIO
#endif // !STBI_NO_CALLBACK
static void stbi_rewind(stbi *s)
{
// conceptually rewind SHOULD rewind to the beginning of the stream,
// but we just rewind to the beginning of the initial buffer, because
// we only use it after doing 'test', which only ever looks at at most 92 bytes
s->img_buffer = s->img_buffer_original;
}
static int stbi_jpeg_test(stbi *s);
static stbi_uc *stbi_jpeg_load(stbi *s, int *x, int *y, int *comp, int req_comp);
static int stbi_jpeg_info(stbi *s, int *x, int *y, int *comp);
static int stbi_png_test(stbi *s);
static stbi_uc *stbi_png_load(stbi *s, int *x, int *y, int *comp, int req_comp);
static int stbi_png_info(stbi *s, int *x, int *y, int *comp);
static int stbi_bmp_test(stbi *s);
static stbi_uc *stbi_bmp_load(stbi *s, int *x, int *y, int *comp, int req_comp);
static int stbi_tga_test(stbi *s);
static stbi_uc *stbi_tga_load(stbi *s, int *x, int *y, int *comp, int req_comp);
static int stbi_tga_info(stbi *s, int *x, int *y, int *comp);
static int stbi_psd_test(stbi *s);
static stbi_uc *stbi_psd_load(stbi *s, int *x, int *y, int *comp, int req_comp);
#ifndef STBI_NO_HDR
static int stbi_hdr_test(stbi *s);
static float *stbi_hdr_load(stbi *s, int *x, int *y, int *comp, int req_comp);
#endif // !STBI_NO_HDR
static int stbi_pic_test(stbi *s);
static stbi_uc *stbi_pic_load(stbi *s, int *x, int *y, int *comp, int req_comp);
#ifndef STBI_NO_GIF
static int stbi_gif_test(stbi *s);
static stbi_uc *stbi_gif_load(stbi *s, int *x, int *y, int *comp, int req_comp);
static int stbi_gif_info(stbi *s, int *x, int *y, int *comp);
#endif // !STBI_NO_GIF
// this is not threadsafe
static const char *failure_reason;
const char *stbi_failure_reason(void)
{
return failure_reason;
}
static int stbi_error(const char *str)
{
failure_reason = str;
return 0;
}
// stbi_error - error
// stbi_error_pf - error returning pointer to float
// stbi_error_puc - error returning pointer to unsigned char
#ifdef STBI_NO_FAILURE_STRINGS
#define stbi_error(x,y) 0
#elif defined(STBI_FAILURE_USERMSG)
#define stbi_error(x,y) stbi_error(y)
#else
#define stbi_error(x,y) stbi_error(x)
#endif
#define stbi_error_pf(x,y) ((float *) (stbi_error(x,y)?NULL:NULL))
#define stbi_error_puc(x,y) ((unsigned char *) (stbi_error(x,y)?NULL:NULL))
void stbi_image_free(void *retval_from_stbi_load)
{
free(retval_from_stbi_load);
}
#ifndef STBI_NO_HDR
static float *ldr_to_hdr(stbi_uc *data, int x, int y, int comp);
static stbi_uc *hdr_to_ldr(float *data, int x, int y, int comp);
#endif
static unsigned char *stbi_load_main(stbi *s, int *x, int *y, int *comp, int req_comp)
{
if (stbi_jpeg_test(s)) return stbi_jpeg_load(s,x,y,comp,req_comp);
if (stbi_png_test(s)) return stbi_png_load(s,x,y,comp,req_comp);
if (stbi_bmp_test(s)) return stbi_bmp_load(s,x,y,comp,req_comp);
#ifndef STBI_NO_GIF
if (stbi_gif_test(s)) return stbi_gif_load(s,x,y,comp,req_comp);
#endif // !STBI_NO_GIF
if (stbi_psd_test(s)) return stbi_psd_load(s,x,y,comp,req_comp);
if (stbi_pic_test(s)) return stbi_pic_load(s,x,y,comp,req_comp);
#ifndef STBI_NO_HDR
if (stbi_hdr_test(s)) {
float *hdr = stbi_hdr_load(s, x,y,comp,req_comp);
return hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp);
}
#endif
// test tga last because it's a crappy test!
if (stbi_tga_test(s))
return stbi_tga_load(s,x,y,comp,req_comp);
return stbi_error_puc("unknown image type", "Image not of any known type, or corrupt");
}
#ifndef STBI_NO_STDIO
unsigned char *stbi_load(char const *filename, int *x, int *y, int *comp, int req_comp)
{
FILE *f = fopen(filename, "rb");
unsigned char *result;
if (!f) return stbi_error_puc("can't fopen", "Unable to open file");
result = stbi_load_from_file(f,x,y,comp,req_comp);
fclose(f);
return result;
}
unsigned char *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req_comp)
{
stbi s;
start_file(&s,f);
return stbi_load_main(&s,x,y,comp,req_comp);
}
#endif //!STBI_NO_STDIO
unsigned char *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp)
{
stbi s;
start_mem(&s,buffer,len);
return stbi_load_main(&s,x,y,comp,req_comp);
}
#ifndef STBI_NO_CALLBACK
unsigned char *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp)
{
stbi s;
start_callbacks(&s, (stbi_io_callbacks *) clbk, user);
return stbi_load_main(&s,x,y,comp,req_comp);
}
#endif // !STBI_NO_CALLBACK
#ifndef STBI_NO_HDR
float *stbi_loadf_main(stbi *s, int *x, int *y, int *comp, int req_comp)
{
unsigned char *data;
#ifndef STBI_NO_HDR
if (stbi_hdr_test(s))
return stbi_hdr_load(s,x,y,comp,req_comp);
#endif
data = stbi_load_main(s, x, y, comp, req_comp);
if (data)
return ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp);
return stbi_error_pf("unknown image type", "Image not of any known type, or corrupt");
}
float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp)
{
stbi s;
start_mem(&s,buffer,len);
return stbi_loadf_main(&s,x,y,comp,req_comp);
}
#ifndef STBI_NO_CALLBACK
float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp)
{
stbi s;
start_callbacks(&s, (stbi_io_callbacks *) clbk, user);
return stbi_loadf_main(&s,x,y,comp,req_comp);
}
#ifndef STBI_NO_STDIO
float *stbi_loadf(char const *filename, int *x, int *y, int *comp, int req_comp)
{
FILE *f = fopen(filename, "rb");
float *result;
if (!f) return stbi_error_pf("can't fopen", "Unable to open file");
result = stbi_loadf_from_file(f,x,y,comp,req_comp);
fclose(f);
return result;
}
float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *comp, int req_comp)
{
stbi s;
start_file(&s,f);
return stbi_loadf_main(&s,x,y,comp,req_comp);
}
#endif // !STBI_NO_STDIO
#endif // !STBI_NO_CALLBACK
#endif // !STBI_NO_HDR
// these is-hdr-or-not is defined independent of whether STBI_NO_HDR is
// defined, for API simplicity; if STBI_NO_HDR is defined, it always
// reports false!
int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len)
{
#ifndef STBI_NO_HDR
stbi s;
start_mem(&s,buffer,len);
return stbi_hdr_test(&s);
#else
STBI_NOTUSED(buffer);
STBI_NOTUSED(len);
return 0;
#endif
}
#ifndef STBI_NO_CALLBACK
#ifndef STBI_NO_STDIO
extern int stbi_is_hdr (char const *filename)
{
FILE *f = fopen(filename, "rb");
int result=0;
if (f) {
result = stbi_is_hdr_from_file(f);
fclose(f);
}
return result;
}
extern int stbi_is_hdr_from_file(FILE *f)
{
#ifndef STBI_NO_HDR
stbi s;
start_file(&s,f);
return stbi_hdr_test(&s);
#else
return 0;
#endif
}
#endif // !STBI_NO_STDIO
extern int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user)
{
#ifndef STBI_NO_HDR
stbi s;
start_callbacks(&s, (stbi_io_callbacks *) clbk, user);
return stbi_hdr_test(&s);
#else
return 0;
#endif
}
#endif // !STBI_NO_CALLBACK
#ifndef STBI_NO_HDR
static float h2l_gamma_i=1.0f/2.2f, h2l_scale_i=1.0f;
static float l2h_gamma=2.2f, l2h_scale=1.0f;
void stbi_hdr_to_ldr_gamma(float gamma) { h2l_gamma_i = 1/gamma; }
void stbi_hdr_to_ldr_scale(float scale) { h2l_scale_i = 1/scale; }
void stbi_ldr_to_hdr_gamma(float gamma) { l2h_gamma = gamma; }
void stbi_ldr_to_hdr_scale(float scale) { l2h_scale = scale; }
#endif
//////////////////////////////////////////////////////////////////////////////
//
// Common code used by all image loaders
//
enum
{
SCAN_load=0,
SCAN_type,
SCAN_header
};
#ifndef STBI_NO_CALLBACK
static void refill_buffer(stbi *s)
{
int n = (s->io.read)(s->io_user_data,(char*)s->buffer_start,s->buflen);
if (n == 0) {
// at end of file, treat same as if from memory
s->read_from_callbacks = 0;
s->img_buffer = s->img_buffer_end-1;
*s->img_buffer = 0;
} else {
s->img_buffer = s->buffer_start;
s->img_buffer_end = s->buffer_start + n;
}
}
#endif // !STBI_NO_CALLBACK
stbi_inline static int get8(stbi *s)
{
if (s->img_buffer < s->img_buffer_end)
return *s->img_buffer++;
#ifndef STBI_NO_CALLBACK
if (s->read_from_callbacks) {
refill_buffer(s);
return *s->img_buffer++;
}
#endif // !STBI_NO_CALLBACK
return 0;
}
stbi_inline static int at_eof(stbi *s)
{
#ifndef STBI_NO_CALLBACK
if (s->io.read) {
if (!(s->io.eof)(s->io_user_data)) return 0;
// if feof() is true, check if buffer = end
// special case: we've only got the special 0 character at the end
if (s->read_from_callbacks == 0) return 1;
}
#endif // !STBI_NO_CALLBACK
return s->img_buffer >= s->img_buffer_end;
}
stbi_inline static uint8 get8u(stbi *s)
{
return (uint8) get8(s);
}
static void skip(stbi *s, int n)
{
#ifndef STBI_NO_CALLBACK
if (s->io.read) {
int blen = s->img_buffer_end - s->img_buffer;
if (blen < n) {
s->img_buffer = s->img_buffer_end;
(s->io.skip)(s->io_user_data, n - blen);
return;
}
}
#endif // !STBI_NO_CALLBACK
s->img_buffer += n;
}
static int getn(stbi *s, stbi_uc *buffer, int n)
{
#ifndef STBI_NO_CALLBACK
if (s->io.read) {
int blen = s->img_buffer_end - s->img_buffer;
if (blen < n) {
int res, count;
memcpy(buffer, s->img_buffer, blen);
count = (s->io.read)(s->io_user_data, (char*) buffer + blen, n - blen);
res = (count == (n-blen));
s->img_buffer = s->img_buffer_end;
return res;
}
}
#endif // !STBI_NO_CALLBACK
if (s->img_buffer+n <= s->img_buffer_end) {
memcpy(buffer, s->img_buffer, n);
s->img_buffer += n;
return 1;
} else {
return 0;
}
}
static int get16(stbi *s)
{
int z = get8(s);
return (z << 8) + get8(s);
}
static uint32 get32(stbi *s)
{
uint32 z = get16(s);
return (z << 16) + get16(s);
}
static int get16le(stbi *s)
{
int z = get8(s);
return z + (get8(s) << 8);
}
static uint32 get32le(stbi *s)
{
uint32 z = get16le(s);
return z + (get16le(s) << 16);
}
//////////////////////////////////////////////////////////////////////////////
//
// generic converter from built-in img_n to req_comp
// individual types do this automatically as much as possible (e.g. jpeg
// does all cases internally since it needs to colorspace convert anyway,
// and it never has alpha, so very few cases ). png can automatically
// interleave an alpha=255 channel, but falls back to this for other cases
//
// assume data buffer is malloced, so malloc a new one and free that one
// only failure mode is malloc failing
static uint8 compute_y(int r, int g, int b)
{
return (uint8) (((r*77) + (g*150) + (29*b)) >> 8);
}
static unsigned char *convert_format(unsigned char *data, int img_n, int req_comp, uint x, uint y)
{
int i,j;
unsigned char *good;
if (req_comp == img_n) return data;
assert(req_comp >= 1 && req_comp <= 4);
good = (unsigned char *) malloc(req_comp * x * y);
if (good == NULL) {
free(data);
return stbi_error_puc("outofmem", "Out of memory");
}
for (j=0; j < (int) y; ++j) {
unsigned char *src = data + j * x * img_n ;
unsigned char *dest = good + j * x * req_comp;
#define STBI_COMBO(a,b) ((a)*8+(b))
#define STBI_CASE(a,b) case STBI_COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b)
// convert source image with img_n components to one with req_comp components;
// avoid switch per pixel, so use switch per scanline and massive macros
switch (STBI_COMBO(img_n, req_comp)) {
STBI_CASE(1,2) dest[0]=src[0], dest[1]=255; break;
STBI_CASE(1,3) dest[0]=dest[1]=dest[2]=src[0]; break;
STBI_CASE(1,4) dest[0]=dest[1]=dest[2]=src[0], dest[3]=255; break;
STBI_CASE(2,1) dest[0]=src[0]; break;
STBI_CASE(2,3) dest[0]=dest[1]=dest[2]=src[0]; break;
STBI_CASE(2,4) dest[0]=dest[1]=dest[2]=src[0], dest[3]=src[1]; break;
STBI_CASE(3,4) dest[0]=src[0],dest[1]=src[1],dest[2]=src[2],dest[3]=255; break;
STBI_CASE(3,1) dest[0]=compute_y(src[0],src[1],src[2]); break;
STBI_CASE(3,2) dest[0]=compute_y(src[0],src[1],src[2]), dest[1] = 255; break;
STBI_CASE(4,1) dest[0]=compute_y(src[0],src[1],src[2]); break;
STBI_CASE(4,2) dest[0]=compute_y(src[0],src[1],src[2]), dest[1] = src[3]; break;
STBI_CASE(4,3) dest[0]=src[0],dest[1]=src[1],dest[2]=src[2]; break;
default: assert(0);
}
#undef STBI_CASE
#undef STBI_COMBO
}
free(data);
return good;
}
#ifndef STBI_NO_HDR
static float *ldr_to_hdr(stbi_uc *data, int x, int y, int comp)
{
int i,k,n;
float *output = (float *) malloc(x * y * comp * sizeof(*output));
if (output == NULL) { free(data); return stbi_error_pf("outofmem", "Out of memory"); }
// compute number of non-alpha components
if (comp & 1) n = comp; else n = comp-1;
for (i=0; i < x*y; ++i) {
for (k=0; k < n; ++k) {
output[i*comp + k] = (float) pow(data[i*comp+k]/255.0f, l2h_gamma) * l2h_scale;
}
if (k < comp) output[i*comp + k] = data[i*comp+k]/255.0f;
}
free(data);
return output;
}
#define stbi_float2int(x) ((int) (x))
static stbi_uc *hdr_to_ldr(float *data, int x, int y, int comp)
{
int i,k,n;
stbi_uc *output = (stbi_uc *) malloc(x * y * comp);
if (output == NULL) { free(data); return stbi_error_puc("outofmem", "Out of memory"); }
// compute number of non-alpha components
if (comp & 1) n = comp; else n = comp-1;
for (i=0; i < x*y; ++i) {
for (k=0; k < n; ++k) {
float z = (float) pow(data[i*comp+k]*h2l_scale_i, h2l_gamma_i) * 255 + 0.5f;
if (z < 0) z = 0;
if (z > 255) z = 255;
output[i*comp + k] = (uint8) stbi_float2int(z);
}
if (k < comp) {
float z = data[i*comp+k] * 255 + 0.5f;
if (z < 0) z = 0;
if (z > 255) z = 255;
output[i*comp + k] = (uint8) stbi_float2int(z);
}
}
free(data);
return output;
}
#undef stbi_float2int
#endif
//////////////////////////////////////////////////////////////////////////////
//
// "baseline" JPEG/JFIF decoder (not actually fully baseline implementation)
//
// simple implementation
// - channel subsampling of at most 2 in each dimension
// - doesn't support delayed output of y-dimension
// - simple interface (only one output format: 8-bit interleaved RGB)
// - doesn't try to recover corrupt jpegs
// - doesn't allow partial loading, loading multiple at once
// - still fast on x86 (copying globals into locals doesn't help x86)
// - allocates lots of intermediate memory (full size of all components)
// - non-interleaved case requires this anyway
// - allows good upsampling (see next)
// high-quality
// - upsampled channels are bilinearly interpolated, even across blocks
// - quality integer IDCT derived from IJG's 'slow'
// performance
// - fast huffman; reasonable integer IDCT
// - uses a lot of intermediate memory, could cache poorly
// - load http://nothings.org/remote/anemones.jpg 3 times on 2.8Ghz P4
// stb_jpeg: 1.34 seconds (MSVC6, default release build)
// stb_jpeg: 1.06 seconds (MSVC6, processor = Pentium Pro)
// IJL11.dll: 1.08 seconds (compiled by intel)
// IJG 1998: 0.98 seconds (MSVC6, makefile provided by IJG)
// IJG 1998: 0.95 seconds (MSVC6, makefile + proc=PPro)
// huffman decoding acceleration
#define STBI_FAST_BITS 9 // larger handles more cases; smaller stomps less cache
typedef struct
{
uint8 fast[1 << STBI_FAST_BITS];
// weirdly, repacking this into AoS is a 10% speed loss, instead of a win
uint16 code[256];
uint8 values[256];
uint8 size[257];
unsigned int maxcode[18];
int delta[17]; // old 'firstsymbol' - old 'firstcode'
} huffman;
typedef struct
{
#ifdef STBI_SIMD
unsigned short dequant2[4][64];
#endif
stbi *s;
huffman huff_dc[4];
huffman huff_ac[4];
uint8 dequant[4][64];
// sizes for components, interleaved MCUs
int img_h_max, img_v_max;
int img_mcu_x, img_mcu_y;
int img_mcu_w, img_mcu_h;
// definition of jpeg image component
struct
{
int id;
int h,v;
int tq;
int hd,ha;
int dc_pred;
int x,y,w2,h2;
uint8 *data;
void *raw_data;
uint8 *linebuf;
} img_comp[4];
uint32 code_buffer; // jpeg entropy-coded buffer
int code_bits; // number of valid bits
unsigned char marker; // marker seen while filling entropy buffer
int nomore; // flag if we saw a marker so must stop
int scan_n, order[4];
int restart_interval, todo;
} jpeg;
static int build_huffman(huffman *h, int *count)
{
int i,j,k=0,code;
// build size list for each symbol (from JPEG spec)
for (i=0; i < 16; ++i)
for (j=0; j < count[i]; ++j)
h->size[k++] = (uint8) (i+1);
h->size[k] = 0;
// compute actual symbols (from jpeg spec)
code = 0;
k = 0;
for(j=1; j <= 16; ++j) {
// compute delta to add to code to compute symbol id
h->delta[j] = k - code;
if (h->size[k] == j) {
while (h->size[k] == j)
h->code[k++] = (uint16) (code++);
if (code-1 >= (1 << j)) return stbi_error("bad code lengths","Corrupt JPEG");
}
// compute largest code + 1 for this size, preshifted as needed later
h->maxcode[j] = code << (16-j);
code <<= 1;
}
h->maxcode[j] = 0xffffffff;
// build non-spec acceleration table; 255 is flag for not-accelerated
memset(h->fast, 255, 1 << STBI_FAST_BITS);
for (i=0; i < k; ++i) {
int s = h->size[i];
if (s <= STBI_FAST_BITS) {
int c = h->code[i] << (STBI_FAST_BITS-s);
int m = 1 << (STBI_FAST_BITS-s);
for (j=0; j < m; ++j) {
h->fast[c+j] = (uint8) i;
}
}
}
return 1;
}
static void grow_buffer_unsafe(jpeg *j)
{
do {
int b = j->nomore ? 0 : get8(j->s);
if (b == 0xff) {
int c = get8(j->s);
if (c != 0) {
j->marker = (unsigned char) c;
j->nomore = 1;
return;
}
}
j->code_buffer |= b << (24 - j->code_bits);
j->code_bits += 8;
} while (j->code_bits <= 24);
}
// (1 << n) - 1
static uint32 bmask[17]={0,1,3,7,15,31,63,127,255,511,1023,2047,4095,8191,16383,32767,65535};
// decode a jpeg huffman value from the bitstream
stbi_inline static int decode(jpeg *j, huffman *h)
{
unsigned int temp;
int c,k;
if (j->code_bits < 16) grow_buffer_unsafe(j);
// look at the top STBI_FAST_BITS and determine what symbol ID it is,
// if the code is <= STBI_FAST_BITS
c = (j->code_buffer >> (32 - STBI_FAST_BITS)) & ((1 << STBI_FAST_BITS)-1);
k = h->fast[c];
if (k < 255) {
int s = h->size[k];
if (s > j->code_bits)
return -1;
j->code_buffer <<= s;
j->code_bits -= s;
return h->values[k];
}
// naive test is to shift the code_buffer down so k bits are
// valid, then test against maxcode. To speed this up, we've
// preshifted maxcode left so that it has (16-k) 0s at the
// end; in other words, regardless of the number of bits, it
// wants to be compared against something shifted to have 16;
// that way we don't need to shift inside the loop.
temp = j->code_buffer >> 16;
for (k=STBI_FAST_BITS+1 ; ; ++k)
if (temp < h->maxcode[k])
break;
if (k == 17) {
// error! code not found
j->code_bits -= 16;
return -1;
}
if (k > j->code_bits)
return -1;
// convert the huffman code to the symbol id
c = ((j->code_buffer >> (32 - k)) & bmask[k]) + h->delta[k];
assert((((j->code_buffer) >> (32 - h->size[c])) & bmask[h->size[c]]) == h->code[c]);
// convert the id to a symbol
j->code_bits -= k;
j->code_buffer <<= k;
return h->values[c];
}
// combined JPEG 'receive' and JPEG 'extend', since baseline
// always extends everything it receives.
stbi_inline static int extend_receive(jpeg *j, int n)
{
unsigned int m = 1 << (n-1);
unsigned int k;
if (j->code_bits < n) grow_buffer_unsafe(j);
#if 1
k = stbi_lrot(j->code_buffer, n);
j->code_buffer = k & ~bmask[n];
k &= bmask[n];
j->code_bits -= n;
#else
k = (j->code_buffer >> (32 - n)) & bmask[n];
j->code_bits -= n;
j->code_buffer <<= n;
#endif
// the following test is probably a random branch that won't
// predict well. I tried to table accelerate it but failed.
// maybe it's compiling as a conditional move?
if (k < m)
return (-1 << n) + k + 1;
else
return k;
}
// given a value that's at position X in the zigzag stream,
// where does it appear in the 8x8 matrix coded as row-major?
static uint8 dezigzag[64+15] =
{
0, 1, 8, 16, 9, 2, 3, 10,
17, 24, 32, 25, 18, 11, 4, 5,
12, 19, 26, 33, 40, 48, 41, 34,
27, 20, 13, 6, 7, 14, 21, 28,
35, 42, 49, 56, 57, 50, 43, 36,
29, 22, 15, 23, 30, 37, 44, 51,
58, 59, 52, 45, 38, 31, 39, 46,
53, 60, 61, 54, 47, 55, 62, 63,
// let corrupt input sample past end
63, 63, 63, 63, 63, 63, 63, 63,
63, 63, 63, 63, 63, 63, 63
};
// decode one 64-entry block--
static int decode_block(jpeg *j, short data[64], huffman *hdc, huffman *hac, int b)
{
int diff,dc,k;
int t = decode(j, hdc);
if (t < 0) return stbi_error("bad huffman code","Corrupt JPEG");
// 0 all the ac values now so we can do it 32-bits at a time
memset(data,0,64*sizeof(data[0]));
diff = t ? extend_receive(j, t) : 0;
dc = j->img_comp[b].dc_pred + diff;
j->img_comp[b].dc_pred = dc;
data[0] = (short) dc;
// decode AC components, see JPEG spec
k = 1;
do {
int r,s;
int rs = decode(j, hac);
if (rs < 0) return stbi_error("bad huffman code","Corrupt JPEG");
s = rs & 15;
r = rs >> 4;
if (s == 0) {
if (rs != 0xf0) break; // end block
k += 16;
} else {
k += r;
// decode into unzigzag'd location
data[dezigzag[k++]] = (short) extend_receive(j,s);
}
} while (k < 64);
return 1;
}
// take a -128..127 value and clamp it and convert to 0..255
stbi_inline static uint8 clamp(int x)
{
// trick to use a single test to catch both cases
if ((unsigned int) x > 255) {
if (x < 0) return 0;
if (x > 255) return 255;
}
return (uint8) x;
}
#define stbi_f2f(x) (int) (((x) * 4096 + 0.5))
#define stbi_fsh(x) ((x) << 12)
// derived from jidctint -- DCT_ISLOW
#define STBI_IDCT_1D(s0,s1,s2,s3,s4,s5,s6,s7) \
int t0,t1,t2,t3,p1,p2,p3,p4,p5,x0,x1,x2,x3; \
p2 = s2; \
p3 = s6; \
p1 = (p2+p3) * stbi_f2f(0.5411961f); \
t2 = p1 + p3*stbi_f2f(-1.847759065f); \
t3 = p1 + p2*stbi_f2f( 0.765366865f); \
p2 = s0; \
p3 = s4; \
t0 = stbi_fsh(p2+p3); \
t1 = stbi_fsh(p2-p3); \
x0 = t0+t3; \
x3 = t0-t3; \
x1 = t1+t2; \
x2 = t1-t2; \
t0 = s7; \
t1 = s5; \
t2 = s3; \
t3 = s1; \
p3 = t0+t2; \
p4 = t1+t3; \
p1 = t0+t3; \
p2 = t1+t2; \
p5 = (p3+p4)*stbi_f2f( 1.175875602f); \
t0 = t0*stbi_f2f( 0.298631336f); \
t1 = t1*stbi_f2f( 2.053119869f); \
t2 = t2*stbi_f2f( 3.072711026f); \
t3 = t3*stbi_f2f( 1.501321110f); \
p1 = p5 + p1*stbi_f2f(-0.899976223f); \
p2 = p5 + p2*stbi_f2f(-2.562915447f); \
p3 = p3*stbi_f2f(-1.961570560f); \
p4 = p4*stbi_f2f(-0.390180644f); \
t3 += p1+p4; \
t2 += p2+p3; \
t1 += p2+p4; \
t0 += p1+p3;
#ifdef STBI_SIMD
typedef unsigned short stbi_dequantize_t;
#else
typedef uint8 stbi_dequantize_t;
#endif
// .344 seconds on 3*anemones.jpg
static void idct_block(uint8 *out, int out_stride, short data[64], stbi_dequantize_t *dequantize)
{
int i,val[64],*v=val;
stbi_dequantize_t *dq = dequantize;
uint8 *o;
short *d = data;
// columns
for (i=0; i < 8; ++i,++d,++dq, ++v) {
// if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing
if (d[ 8]==0 && d[16]==0 && d[24]==0 && d[32]==0
&& d[40]==0 && d[48]==0 && d[56]==0) {
// no shortcut 0 seconds
// (1|2|3|4|5|6|7)==0 0 seconds
// all separate -0.047 seconds
// 1 && 2|3 && 4|5 && 6|7: -0.047 seconds
int dcterm = d[0] * dq[0] << 2;
v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm;
} else {
STBI_IDCT_1D(d[ 0]*dq[ 0],d[ 8]*dq[ 8],d[16]*dq[16],d[24]*dq[24],
d[32]*dq[32],d[40]*dq[40],d[48]*dq[48],d[56]*dq[56])
// constants scaled things up by 1<<12; let's bring them back
// down, but keep 2 extra bits of precision
x0 += 512; x1 += 512; x2 += 512; x3 += 512;
v[ 0] = (x0+t3) >> 10;
v[56] = (x0-t3) >> 10;
v[ 8] = (x1+t2) >> 10;
v[48] = (x1-t2) >> 10;
v[16] = (x2+t1) >> 10;
v[40] = (x2-t1) >> 10;
v[24] = (x3+t0) >> 10;
v[32] = (x3-t0) >> 10;
}
}
for (i=0, v=val, o=out; i < 8; ++i,v+=8,o+=out_stride) {
// no fast case since the first 1D IDCT spread components out
STBI_IDCT_1D(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7])
// constants scaled things up by 1<<12, plus we had 1<<2 from first
// loop, plus horizontal and vertical each scale by sqrt(8) so together
// we've got an extra 1<<3, so 1<<17 total we need to remove.
// so we want to round that, which means adding 0.5 * 1<<17,
// aka 65536. Also, we'll end up with -128 to 127 that we want
// to encode as 0..255 by adding 128, so we'll add that before the shift
x0 += 65536 + (128<<17);
x1 += 65536 + (128<<17);
x2 += 65536 + (128<<17);
x3 += 65536 + (128<<17);
// tried computing the shifts into temps, or'ing the temps to see
// if any were out of range, but that was slower
o[0] = clamp((x0+t3) >> 17);
o[7] = clamp((x0-t3) >> 17);
o[1] = clamp((x1+t2) >> 17);
o[6] = clamp((x1-t2) >> 17);
o[2] = clamp((x2+t1) >> 17);
o[5] = clamp((x2-t1) >> 17);
o[3] = clamp((x3+t0) >> 17);
o[4] = clamp((x3-t0) >> 17);
}
}
#ifdef STBI_SIMD
static stbi_idct_8x8 stbi_idct_installed = idct_block;
void stbi_install_idct(stbi_idct_8x8 func)
{
stbi_idct_installed = func;
}
#endif
#define STBI_MARKER_none 0xff
// if there's a pending marker from the entropy stream, return that
// otherwise, fetch from the stream and get a marker. if there's no
// marker, return 0xff, which is never a valid marker value
static uint8 get_marker(jpeg *j)
{
uint8 x;
if (j->marker != STBI_MARKER_none) { x = j->marker; j->marker = STBI_MARKER_none; return x; }
x = get8u(j->s);
if (x != 0xff) return STBI_MARKER_none;
while (x == 0xff)
x = get8u(j->s);
return x;
}
// in each scan, we'll have scan_n components, and the order
// of the components is specified by order[]
#define STBI_RESTART(x) ((x) >= 0xd0 && (x) <= 0xd7)
// after a restart interval, reset the entropy decoder and
// the dc prediction
static void reset(jpeg *j)
{
j->code_bits = 0;
j->code_buffer = 0;
j->nomore = 0;
j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = 0;
j->marker = STBI_MARKER_none;
j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff;
// no more than 1<<31 MCUs if no restart_interal? that's plenty safe,
// since we don't even allow 1<<30 pixels
}
static int parse_entropy_coded_data(jpeg *z)
{
reset(z);
if (z->scan_n == 1) {
int i,j;
#ifdef STBI_SIMD
__declspec(align(16))
#endif
short data[64];
int n = z->order[0];
// non-interleaved data, we just need to process one block at a time,
// in trivial scanline order
// number of blocks to do just depends on how many actual "pixels" this
// component has, independent of interleaved MCU blocking and such
int w = (z->img_comp[n].x+7) >> 3;
int h = (z->img_comp[n].y+7) >> 3;
for (j=0; j < h; ++j) {
for (i=0; i < w; ++i) {
if (!decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+z->img_comp[n].ha, n)) return 0;
#ifdef STBI_SIMD
stbi_idct_installed(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data, z->dequant2[z->img_comp[n].tq]);
#else
idct_block(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data, z->dequant[z->img_comp[n].tq]);
#endif
// every data block is an MCU, so countdown the restart interval
if (--z->todo <= 0) {
if (z->code_bits < 24) grow_buffer_unsafe(z);
// if it's NOT a restart, then just bail, so we get corrupt data
// rather than no data
if (!STBI_RESTART(z->marker)) return 1;
reset(z);
}
}
}
} else { // interleaved!
int i,j,k,x,y;
short data[64];
for (j=0; j < z->img_mcu_y; ++j) {
for (i=0; i < z->img_mcu_x; ++i) {
// scan an interleaved mcu... process scan_n components in order
for (k=0; k < z->scan_n; ++k) {
int n = z->order[k];
// scan out an mcu's worth of this component; that's just determined
// by the basic H and V specified for the component
for (y=0; y < z->img_comp[n].v; ++y) {
for (x=0; x < z->img_comp[n].h; ++x) {
int x2 = (i*z->img_comp[n].h + x)*8;
int y2 = (j*z->img_comp[n].v + y)*8;
if (!decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+z->img_comp[n].ha, n)) return 0;
#ifdef STBI_SIMD
stbi_idct_installed(z->img_comp[n].data+z->img_comp[n].w2*y2+x2, z->img_comp[n].w2, data, z->dequant2[z->img_comp[n].tq]);
#else
idct_block(z->img_comp[n].data+z->img_comp[n].w2*y2+x2, z->img_comp[n].w2, data, z->dequant[z->img_comp[n].tq]);
#endif
}
}
}
// after all interleaved components, that's an interleaved MCU,
// so now count down the restart interval
if (--z->todo <= 0) {
if (z->code_bits < 24) grow_buffer_unsafe(z);
// if it's NOT a restart, then just bail, so we get corrupt data
// rather than no data
if (!STBI_RESTART(z->marker)) return 1;
reset(z);
}
}
}
}
return 1;
}
static int process_marker(jpeg *z, int m)
{
int L;
switch (m) {
case STBI_MARKER_none: // no marker found
return stbi_error("expected marker","Corrupt JPEG");
case 0xC2: // SOF - progressive
return stbi_error("progressive jpeg","JPEG format not supported (progressive)");
case 0xDD: // DRI - specify restart interval
if (get16(z->s) != 4) return stbi_error("bad DRI len","Corrupt JPEG");
z->restart_interval = get16(z->s);
return 1;
case 0xDB: // DQT - define quantization table
L = get16(z->s)-2;
while (L > 0) {
int q = get8(z->s);
int p = q >> 4;
int t = q & 15,i;
if (p != 0) return stbi_error("bad DQT type","Corrupt JPEG");
if (t > 3) return stbi_error("bad DQT table","Corrupt JPEG");
for (i=0; i < 64; ++i)
z->dequant[t][dezigzag[i]] = get8u(z->s);
#ifdef STBI_SIMD
for (i=0; i < 64; ++i)
z->dequant2[t][i] = z->dequant[t][i];
#endif
L -= 65;
}
return L==0;
case 0xC4: // DHT - define huffman table
L = get16(z->s)-2;
while (L > 0) {
uint8 *v;
int sizes[16],i,m=0;
int q = get8(z->s);
int tc = q >> 4;
int th = q & 15;
if (tc > 1 || th > 3) return stbi_error("bad DHT header","Corrupt JPEG");
for (i=0; i < 16; ++i) {
sizes[i] = get8(z->s);
m += sizes[i];
}
L -= 17;
if (tc == 0) {
if (!build_huffman(z->huff_dc+th, sizes)) return 0;
v = z->huff_dc[th].values;
} else {
if (!build_huffman(z->huff_ac+th, sizes)) return 0;
v = z->huff_ac[th].values;
}
for (i=0; i < m; ++i)
v[i] = get8u(z->s);
L -= m;
}
return L==0;
}
// check for comment block or APP blocks
if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) {
skip(z->s, get16(z->s)-2);
return 1;
}
return 0;
}
// after we see SOS
static int process_scan_header(jpeg *z)
{
int i;
int Ls = get16(z->s);
z->scan_n = get8(z->s);
if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int) z->s->img_n) return stbi_error("bad SOS component count","Corrupt JPEG");
if (Ls != 6+2*z->scan_n) return stbi_error("bad SOS len","Corrupt JPEG");
for (i=0; i < z->scan_n; ++i) {
int id = get8(z->s), which;
int q = get8(z->s);
for (which = 0; which < z->s->img_n; ++which)
if (z->img_comp[which].id == id)
break;
if (which == z->s->img_n) return 0;
z->img_comp[which].hd = q >> 4; if (z->img_comp[which].hd > 3) return stbi_error("bad DC huff","Corrupt JPEG");
z->img_comp[which].ha = q & 15; if (z->img_comp[which].ha > 3) return stbi_error("bad AC huff","Corrupt JPEG");
z->order[i] = which;
}
if (get8(z->s) != 0) return stbi_error("bad SOS","Corrupt JPEG");
get8(z->s); // should be 63, but might be 0
if (get8(z->s) != 0) return stbi_error("bad SOS","Corrupt JPEG");
return 1;
}
static int process_frame_header(jpeg *z, int scan)
{
stbi *s = z->s;
int Lf,p,i,q, h_max=1,v_max=1,c;
Lf = get16(s); if (Lf < 11) return stbi_error("bad SOF len","Corrupt JPEG"); // JPEG
p = get8(s); if (p != 8) return stbi_error("only 8-bit","JPEG format not supported: 8-bit only"); // JPEG baseline
s->img_y = get16(s); if (s->img_y == 0) return stbi_error("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG
s->img_x = get16(s); if (s->img_x == 0) return stbi_error("0 width","Corrupt JPEG"); // JPEG requires
c = get8(s);
if (c != 3 && c != 1) return stbi_error("bad component count","Corrupt JPEG"); // JFIF requires
s->img_n = c;
for (i=0; i < c; ++i) {
z->img_comp[i].data = NULL;
z->img_comp[i].linebuf = NULL;
}
if (Lf != 8+3*s->img_n) return stbi_error("bad SOF len","Corrupt JPEG");
for (i=0; i < s->img_n; ++i) {
z->img_comp[i].id = get8(s);
if (z->img_comp[i].id != i+1) // JFIF requires
if (z->img_comp[i].id != i) // some version of jpegtran outputs non-JFIF-compliant files!
return stbi_error("bad component ID","Corrupt JPEG");
q = get8(s);
z->img_comp[i].h = (q >> 4); if (!z->img_comp[i].h || z->img_comp[i].h > 4) return stbi_error("bad H","Corrupt JPEG");
z->img_comp[i].v = q & 15; if (!z->img_comp[i].v || z->img_comp[i].v > 4) return stbi_error("bad V","Corrupt JPEG");
z->img_comp[i].tq = get8(s); if (z->img_comp[i].tq > 3) return stbi_error("bad TQ","Corrupt JPEG");
}
if (scan != SCAN_load) return 1;
if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi_error("too large", "Image too large to decode");
for (i=0; i < s->img_n; ++i) {
if (z->img_comp[i].h > h_max) h_max = z->img_comp[i].h;
if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v;
}
// compute interleaved mcu info
z->img_h_max = h_max;
z->img_v_max = v_max;
z->img_mcu_w = h_max * 8;
z->img_mcu_h = v_max * 8;
z->img_mcu_x = (s->img_x + z->img_mcu_w-1) / z->img_mcu_w;
z->img_mcu_y = (s->img_y + z->img_mcu_h-1) / z->img_mcu_h;
for (i=0; i < s->img_n; ++i) {
// number of effective pixels (e.g. for non-interleaved MCU)
z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max-1) / h_max;
z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max-1) / v_max;
// to simplify generation, we'll allocate enough memory to decode
// the bogus oversized data from using interleaved MCUs and their
// big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't
// discard the extra data until colorspace conversion
z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8;
z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8;
z->img_comp[i].raw_data = malloc(z->img_comp[i].w2 * z->img_comp[i].h2+15);
if (z->img_comp[i].raw_data == NULL) {
for(--i; i >= 0; --i) {
free(z->img_comp[i].raw_data);
z->img_comp[i].data = NULL;
}
return stbi_error("outofmem", "Out of memory");
}
// align blocks for installable-idct using mmx/sse
z->img_comp[i].data = (uint8*) (((size_t) z->img_comp[i].raw_data + 15) & ~15);
z->img_comp[i].linebuf = NULL;
}
return 1;
}
// use comparisons since in some cases we handle more than one case (e.g. SOF)
#define STBI_DNL(x) ((x) == 0xdc)
#define STBI_SOI(x) ((x) == 0xd8)
#define STBI_EOI(x) ((x) == 0xd9)
#define STBI_SOF(x) ((x) == 0xc0 || (x) == 0xc1)
#define STBI_SOS(x) ((x) == 0xda)
static int decode_jpeg_header(jpeg *z, int scan)
{
int m;
z->marker = STBI_MARKER_none; // initialize cached marker to empty
m = get_marker(z);
if (!STBI_SOI(m)) return stbi_error("no SOI","Corrupt JPEG");
if (scan == SCAN_type) return 1;
m = get_marker(z);
while (!STBI_SOF(m)) {
if (!process_marker(z,m)) return 0;
m = get_marker(z);
while (m == STBI_MARKER_none) {
// some files have extra padding after their blocks, so ok, we'll scan
if (at_eof(z->s)) return stbi_error("no SOF", "Corrupt JPEG");
m = get_marker(z);
}
}
if (!process_frame_header(z, scan)) return 0;
return 1;
}
static int decode_jpeg_image(jpeg *j)
{
int m;
j->restart_interval = 0;
if (!decode_jpeg_header(j, SCAN_load)) return 0;
m = get_marker(j);
while (!STBI_EOI(m)) {
if (STBI_SOS(m)) {
if (!process_scan_header(j)) return 0;
if (!parse_entropy_coded_data(j)) return 0;
if (j->marker == STBI_MARKER_none ) {
// handle 0s at the end of image data from IP Kamera 9060
while (!at_eof(j->s)) {
int x = get8(j->s);
if (x == 255) {
j->marker = get8u(j->s);
break;
} else if (x != 0) {
return 0;
}
}
// if we reach eof without hitting a marker, get_marker() below will fail and we'll eventually return 0
}
} else {
if (!process_marker(j, m)) return 0;
}
m = get_marker(j);
}
return 1;
}
// static jfif-centered resampling (across block boundaries)
typedef uint8 *(*resample_row_func)(uint8 *out, uint8 *in0, uint8 *in1,
int w, int hs);
#define stbi_div4(x) ((uint8) ((x) >> 2))
static uint8 *resample_row_1(uint8 *out, uint8 *in_near, uint8 *in_far, int w, int hs)
{
STBI_NOTUSED(out);
STBI_NOTUSED(in_far);
STBI_NOTUSED(w);
STBI_NOTUSED(hs);
return in_near;
}
static uint8* resample_row_v_2(uint8 *out, uint8 *in_near, uint8 *in_far, int w, int hs)
{
// need to generate two samples vertically for every one in input
int i;
STBI_NOTUSED(hs);
for (i=0; i < w; ++i)
out[i] = stbi_div4(3*in_near[i] + in_far[i] + 2);
return out;
}
static uint8* resample_row_h_2(uint8 *out, uint8 *in_near, uint8 *in_far, int w, int hs)
{
// need to generate two samples horizontally for every one in input
int i;
uint8 *input = in_near;
if (w == 1) {
// if only one sample, can't do any interpolation
out[0] = out[1] = input[0];
return out;
}
out[0] = input[0];
out[1] = stbi_div4(input[0]*3 + input[1] + 2);
for (i=1; i < w-1; ++i) {
int n = 3*input[i]+2;
out[i*2+0] = stbi_div4(n+input[i-1]);
out[i*2+1] = stbi_div4(n+input[i+1]);
}
out[i*2+0] = stbi_div4(input[w-2]*3 + input[w-1] + 2);
out[i*2+1] = input[w-1];
STBI_NOTUSED(in_far);
STBI_NOTUSED(hs);
return out;
}
#define stbi_div16(x) ((uint8) ((x) >> 4))
static uint8 *resample_row_hv_2(uint8 *out, uint8 *in_near, uint8 *in_far, int w, int hs)
{
// need to generate 2x2 samples for every one in input
int i,t0,t1;
if (w == 1) {
out[0] = out[1] = stbi_div4(3*in_near[0] + in_far[0] + 2);
return out;
}
t1 = 3*in_near[0] + in_far[0];
out[0] = stbi_div4(t1+2);
for (i=1; i < w; ++i) {
t0 = t1;
t1 = 3*in_near[i]+in_far[i];
out[i*2-1] = stbi_div16(3*t0 + t1 + 8);
out[i*2 ] = stbi_div16(3*t1 + t0 + 8);
}
out[w*2-1] = stbi_div4(t1+2);
STBI_NOTUSED(hs);
return out;
}
static uint8 *resample_row_generic(uint8 *out, uint8 *in_near, uint8 *in_far, int w, int hs)
{
STBI_NOTUSED(in_far);
// resample with nearest-neighbor
int i,j;
for (i=0; i < w; ++i)
for (j=0; j < hs; ++j)
out[i*hs+j] = in_near[i];
return out;
}
#define stbi_float2fixed(x) ((int) ((x) * 65536 + 0.5))
// 0.38 seconds on 3*anemones.jpg (0.25 with processor = Pro)
// VC6 without processor=Pro is generating multiple LEAs per multiply!
static void YCbCr_to_RGB_row(uint8 *out, const uint8 *y, const uint8 *pcb, const uint8 *pcr, int count, int step)
{
int i;
for (i=0; i < count; ++i) {
int y_fixed = (y[i] << 16) + 32768; // rounding
int r,g,b;
int cr = pcr[i] - 128;
int cb = pcb[i] - 128;
r = y_fixed + cr*stbi_float2fixed(1.40200f);
g = y_fixed - cr*stbi_float2fixed(0.71414f) - cb*stbi_float2fixed(0.34414f);
b = y_fixed + cb*stbi_float2fixed(1.77200f);
r >>= 16;
g >>= 16;
b >>= 16;
if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; }
if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; }
if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; }
out[0] = (uint8)r;
out[1] = (uint8)g;
out[2] = (uint8)b;
out[3] = 255;
out += step;
}
}
#ifdef STBI_SIMD
static stbi_YCbCr_to_RGB_run stbi_YCbCr_installed = YCbCr_to_RGB_row;
void stbi_install_YCbCr_to_RGB(stbi_YCbCr_to_RGB_run func)
{
stbi_YCbCr_installed = func;
}
#endif
// clean up the temporary component buffers
static void cleanup_jpeg(jpeg *j)
{
int i;
for (i=0; i < j->s->img_n; ++i) {
if (j->img_comp[i].data) {
free(j->img_comp[i].raw_data);
j->img_comp[i].data = NULL;
}
free(j->img_comp[i].linebuf), j->img_comp[i].linebuf = NULL;
}
}
typedef struct
{
resample_row_func resample;
uint8 *line0,*line1;
int hs,vs; // expansion factor in each axis
int w_lores; // horizontal pixels pre-expansion
int ystep; // how far through vertical expansion we are
int ypos; // which pre-expansion row we're on
} stbi_resample;
static uint8 *load_jpeg_image(jpeg *z, int *out_x, int *out_y, int *comp, int req_comp)
{
int n, decode_n;
// validate req_comp
if (req_comp < 0 || req_comp > 4) return stbi_error_puc("bad req_comp", "Internal error");
z->s->img_n = 0;
// load a jpeg image from whichever source
if (!decode_jpeg_image(z)) { cleanup_jpeg(z); return NULL; }
// determine actual number of components to generate
n = req_comp ? req_comp : z->s->img_n;
if (z->s->img_n == 3 && n < 3)
decode_n = 1;
else
decode_n = z->s->img_n;
// resample and color-convert
{
int k;
uint i,j;
uint8 *output;
uint8 *coutput[4];
stbi_resample res_comp[4];
for (k=0; k < decode_n; ++k) {
stbi_resample *r = &res_comp[k];
// allocate line buffer big enough for upsampling off the edges
// with upsample factor of 4
z->img_comp[k].linebuf = (uint8 *) malloc(z->s->img_x + 3);
if (!z->img_comp[k].linebuf) { cleanup_jpeg(z); return stbi_error_puc("outofmem", "Out of memory"); }
r->hs = z->img_h_max / z->img_comp[k].h;
r->vs = z->img_v_max / z->img_comp[k].v;
r->ystep = r->vs >> 1;
r->w_lores = (z->s->img_x + r->hs-1) / r->hs;
r->ypos = 0;
r->line0 = r->line1 = z->img_comp[k].data;
if (r->hs == 1 && r->vs == 1) r->resample = resample_row_1;
else if (r->hs == 1 && r->vs == 2) r->resample = resample_row_v_2;
else if (r->hs == 2 && r->vs == 1) r->resample = resample_row_h_2;
else if (r->hs == 2 && r->vs == 2) r->resample = resample_row_hv_2;
else r->resample = resample_row_generic;
}
// can't error after this so, this is safe
output = (uint8 *) malloc(n * z->s->img_x * z->s->img_y + 1);
if (!output) { cleanup_jpeg(z); return stbi_error_puc("outofmem", "Out of memory"); }
// now go ahead and resample
for (j=0; j < z->s->img_y; ++j) {
uint8 *out = output + n * z->s->img_x * j;
for (k=0; k < decode_n; ++k) {
stbi_resample *r = &res_comp[k];
int y_bot = r->ystep >= (r->vs >> 1);
coutput[k] = r->resample(z->img_comp[k].linebuf,
y_bot ? r->line1 : r->line0,
y_bot ? r->line0 : r->line1,
r->w_lores, r->hs);
if (++r->ystep >= r->vs) {
r->ystep = 0;
r->line0 = r->line1;
if (++r->ypos < z->img_comp[k].y)
r->line1 += z->img_comp[k].w2;
}
}
if (n >= 3) {
uint8 *y = coutput[0];
if (z->s->img_n == 3) {
#ifdef STBI_SIMD
stbi_YCbCr_installed(out, y, coutput[1], coutput[2], z->s.img_x, n);
#else
YCbCr_to_RGB_row(out, y, coutput[1], coutput[2], z->s->img_x, n);
#endif
} else {
for (i=0; i < z->s->img_x; ++i) {
out[0] = out[1] = out[2] = y[i];
out[3] = 255; // not used if n==3
out += n;
}
}
} else {
uint8 *y = coutput[0];
if (n == 1)
for (i=0; i < z->s->img_x; ++i) out[i] = y[i];
else
for (i=0; i < z->s->img_x; ++i) *out++ = y[i], *out++ = 255;
}
}
cleanup_jpeg(z);
*out_x = z->s->img_x;
*out_y = z->s->img_y;
if (comp) *comp = z->s->img_n; // report original components, not output
return output;
}
}
static unsigned char *stbi_jpeg_load(stbi *s, int *x, int *y, int *comp, int req_comp)
{
jpeg j;
j.s = s;
return load_jpeg_image(&j, x,y,comp,req_comp);
}
static int stbi_jpeg_test(stbi *s)
{
int r;
jpeg j;
j.s = s;
r = decode_jpeg_header(&j, SCAN_type);
stbi_rewind(s);
return r;
}
static int stbi_jpeg_info_raw(jpeg *j, int *x, int *y, int *comp)
{
if (!decode_jpeg_header(j, SCAN_header)) {
stbi_rewind( j->s );
return 0;
}
if (x) *x = j->s->img_x;
if (y) *y = j->s->img_y;
if (comp) *comp = j->s->img_n;
return 1;
}
static int stbi_jpeg_info(stbi *s, int *x, int *y, int *comp)
{
jpeg j;
j.s = s;
return stbi_jpeg_info_raw(&j, x, y, comp);
}
// public domain zlib decode v0.2 Sean Barrett 2006-11-18
// simple implementation
// - all input must be provided in an upfront buffer
// - all output is written to a single output buffer (can malloc/realloc)
// performance
// - fast huffman
// fast-way is faster to check than jpeg huffman, but slow way is slower
#define STBI_ZFAST_BITS 9 // accelerate all cases in default tables
#define STBI_ZFAST_MASK ((1 << STBI_ZFAST_BITS) - 1)
// zlib-style huffman encoding
// (jpegs packs from left, zlib from right, so can't share code)
typedef struct
{
uint16 fast[1 << STBI_ZFAST_BITS];
uint16 firstcode[16];
int maxcode[17];
uint16 firstsymbol[16];
uint8 size[288];
uint16 value[288];
} zhuffman;
stbi_inline static int bitreverse16(int n)
{
n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1);
n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2);
n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4);
n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8);
return n;
}
stbi_inline static int bit_reverse(int v, int bits)
{
assert(bits <= 16);
// to bit reverse n bits, reverse 16 and shift
// e.g. 11 bits, bit reverse and shift away 5
return bitreverse16(v) >> (16-bits);
}
static int zbuild_huffman(zhuffman *z, uint8 *sizelist, int num)
{
int i,k=0;
int code, next_code[16], sizes[17];
// DEFLATE spec for generating codes
memset(sizes, 0, sizeof(sizes));
memset(z->fast, 255, sizeof(z->fast));
for (i=0; i < num; ++i)
++sizes[sizelist[i]];
sizes[0] = 0;
for (i=1; i < 16; ++i)
assert(sizes[i] <= (1 << i));
code = 0;
for (i=1; i < 16; ++i) {
next_code[i] = code;
z->firstcode[i] = (uint16) code;
z->firstsymbol[i] = (uint16) k;
code = (code + sizes[i]);
if (sizes[i])
if (code-1 >= (1 << i)) return stbi_error("bad codelengths","Corrupt JPEG");
z->maxcode[i] = code << (16-i); // preshift for inner loop
code <<= 1;
k += sizes[i];
}
z->maxcode[16] = 0x10000; // sentinel
for (i=0; i < num; ++i) {
int s = sizelist[i];
if (s) {
int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s];
z->size[c] = (uint8)s;
z->value[c] = (uint16)i;
if (s <= STBI_ZFAST_BITS) {
int k = bit_reverse(next_code[s],s);
while (k < (1 << STBI_ZFAST_BITS)) {
z->fast[k] = (uint16) c;
k += (1 << s);
}
}
++next_code[s];
}
}
return 1;
}
// zlib-from-memory implementation for PNG reading
// because PNG allows splitting the zlib stream arbitrarily,
// and it's annoying structurally to have PNG call ZLIB call PNG,
// we require PNG read all the IDATs and combine them into a single
// memory buffer
typedef struct
{
const uint8 *zbuffer, *zbuffer_end;
int num_bits;
uint32 code_buffer;
char *zout;
char *zout_start;
char *zout_end;
int z_expandable;
zhuffman z_length, z_distance;
} zbuf;
stbi_inline static int zget8(zbuf *z)
{
if (z->zbuffer >= z->zbuffer_end) return 0;
return *z->zbuffer++;
}
static void fill_bits(zbuf *z)
{
do {
assert(z->code_buffer < (1U << z->num_bits));
z->code_buffer |= zget8(z) << z->num_bits;
z->num_bits += 8;
} while (z->num_bits <= 24);
}
stbi_inline static unsigned int zreceive(zbuf *z, int n)
{
unsigned int k;
if (z->num_bits < n) fill_bits(z);
k = z->code_buffer & ((1 << n) - 1);
z->code_buffer >>= n;
z->num_bits -= n;
return k;
}
stbi_inline static int zhuffman_decode(zbuf *a, zhuffman *z)
{
int b,s,k;
if (a->num_bits < 16) fill_bits(a);
b = z->fast[a->code_buffer & STBI_ZFAST_MASK];
if (b < 0xffff) {
s = z->size[b];
a->code_buffer >>= s;
a->num_bits -= s;
return z->value[b];
}
// not resolved by fast table, so compute it the slow way
// use jpeg approach, which requires MSbits at top
k = bit_reverse(a->code_buffer, 16);
for (s=STBI_ZFAST_BITS+1; ; ++s)
if (k < z->maxcode[s])
break;
if (s == 16) return -1; // invalid code!
// code size is s, so:
b = (k >> (16-s)) - z->firstcode[s] + z->firstsymbol[s];
assert(z->size[b] == s);
a->code_buffer >>= s;
a->num_bits -= s;
return z->value[b];
}
static int expand(zbuf *z, int n) // need to make room for n bytes
{
char *q;
int cur, limit;
if (!z->z_expandable) return stbi_error("output buffer limit","Corrupt PNG");
cur = (int) (z->zout - z->zout_start);
limit = (int) (z->zout_end - z->zout_start);
while (cur + n > limit)
limit *= 2;
q = (char *) realloc(z->zout_start, limit);
if (q == NULL) return stbi_error("outofmem", "Out of memory");
z->zout_start = q;
z->zout = q + cur;
z->zout_end = q + limit;
return 1;
}
static int length_base[31] = {
3,4,5,6,7,8,9,10,11,13,
15,17,19,23,27,31,35,43,51,59,
67,83,99,115,131,163,195,227,258,0,0
};
static int length_extra[31] = {
0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0
};
static int dist_base[32] = {
1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,
257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0
};
static int dist_extra[32] = {
0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13
};
static int parse_huffman_block(zbuf *a)
{
for(;;) {
int z = zhuffman_decode(a, &a->z_length);
if (z < 256) {
if (z < 0) return stbi_error("bad huffman code","Corrupt PNG"); // error in huffman codes
if (a->zout >= a->zout_end) if (!expand(a, 1)) return 0;
*a->zout++ = (char) z;
} else {
uint8 *p;
int len,dist;
if (z == 256) return 1;
z -= 257;
len = length_base[z];
if (length_extra[z]) len += zreceive(a, length_extra[z]);
z = zhuffman_decode(a, &a->z_distance);
if (z < 0) return stbi_error("bad huffman code","Corrupt PNG");
dist = dist_base[z];
if (dist_extra[z]) dist += zreceive(a, dist_extra[z]);
if (a->zout - a->zout_start < dist) return stbi_error("bad dist","Corrupt PNG");
if (a->zout + len > a->zout_end) if (!expand(a, len)) return 0;
p = (uint8 *) (a->zout - dist);
while (len--)
*a->zout++ = *p++;
}
}
}
static int compute_huffman_codes(zbuf *a)
{
static uint8 length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 };
zhuffman z_codelength;
uint8 lencodes[286+32+137];//padding for maximum single op
uint8 codelength_sizes[19];
int i,n;
int hlit = zreceive(a,5) + 257;
int hdist = zreceive(a,5) + 1;
int hclen = zreceive(a,4) + 4;
memset(codelength_sizes, 0, sizeof(codelength_sizes));
for (i=0; i < hclen; ++i) {
int s = zreceive(a,3);
codelength_sizes[length_dezigzag[i]] = (uint8) s;
}
if (!zbuild_huffman(&z_codelength, codelength_sizes, 19)) return 0;
n = 0;
while (n < hlit + hdist) {
int c = zhuffman_decode(a, &z_codelength);
assert(c >= 0 && c < 19);
if (c < 16)
lencodes[n++] = (uint8) c;
else if (c == 16) {
c = zreceive(a,2)+3;
memset(lencodes+n, lencodes[n-1], c);
n += c;
} else if (c == 17) {
c = zreceive(a,3)+3;
memset(lencodes+n, 0, c);
n += c;
} else {
assert(c == 18);
c = zreceive(a,7)+11;
memset(lencodes+n, 0, c);
n += c;
}
}
if (n != hlit+hdist) return stbi_error("bad codelengths","Corrupt PNG");
if (!zbuild_huffman(&a->z_length, lencodes, hlit)) return 0;
if (!zbuild_huffman(&a->z_distance, lencodes+hlit, hdist)) return 0;
return 1;
}
static int parse_uncompressed_block(zbuf *a)
{
uint8 header[4];
int len,nlen,k;
if (a->num_bits & 7)
zreceive(a, a->num_bits & 7); // discard
// drain the bit-packed data into header
k = 0;
while (a->num_bits > 0) {
header[k++] = (uint8) (a->code_buffer & 255); // wtf this warns?
a->code_buffer >>= 8;
a->num_bits -= 8;
}
assert(a->num_bits == 0);
// now fill header the normal way
while (k < 4)
header[k++] = (uint8) zget8(a);
len = header[1] * 256 + header[0];
nlen = header[3] * 256 + header[2];
if (nlen != (len ^ 0xffff)) return stbi_error("zlib corrupt","Corrupt PNG");
if (a->zbuffer + len > a->zbuffer_end) return stbi_error("read past buffer","Corrupt PNG");
if (a->zout + len > a->zout_end)
if (!expand(a, len)) return 0;
memcpy(a->zout, a->zbuffer, len);
a->zbuffer += len;
a->zout += len;
return 1;
}
static int parse_zlib_header(zbuf *a)
{
int cmf = zget8(a);
int cm = cmf & 15;
/* int cinfo = cmf >> 4; */
int flg = zget8(a);
if ((cmf*256+flg) % 31 != 0) return stbi_error("bad zlib header","Corrupt PNG"); // zlib spec
if (flg & 32) return stbi_error("no preset dict","Corrupt PNG"); // preset dictionary not allowed in png
if (cm != 8) return stbi_error("bad compression","Corrupt PNG"); // DEFLATE required for png
// window = 1 << (8 + cinfo)... but who cares, we fully buffer output
return 1;
}
// @TODO: should statically initialize these for optimal thread safety
static uint8 default_length[288], default_distance[32];
static void init_defaults(void)
{
int i; // use <= to match clearly with spec
for (i=0; i <= 143; ++i) default_length[i] = 8;
for ( ; i <= 255; ++i) default_length[i] = 9;
for ( ; i <= 279; ++i) default_length[i] = 7;
for ( ; i <= 287; ++i) default_length[i] = 8;
for (i=0; i <= 31; ++i) default_distance[i] = 5;
}
int stbi_png_partial; // a quick hack to only allow decoding some of a PNG... I should implement real streaming support instead
static int parse_zlib(zbuf *a, int parse_header)
{
int final, type;
if (parse_header)
if (!parse_zlib_header(a)) return 0;
a->num_bits = 0;
a->code_buffer = 0;
do {
final = zreceive(a,1);
type = zreceive(a,2);
if (type == 0) {
if (!parse_uncompressed_block(a)) return 0;
} else if (type == 3) {
return 0;
} else {
if (type == 1) {
// use fixed code lengths
if (!default_distance[31]) init_defaults();
if (!zbuild_huffman(&a->z_length , default_length , 288)) return 0;
if (!zbuild_huffman(&a->z_distance, default_distance, 32)) return 0;
} else {
if (!compute_huffman_codes(a)) return 0;
}
if (!parse_huffman_block(a)) return 0;
}
if (stbi_png_partial && a->zout - a->zout_start > 65536)
break;
} while (!final);
return 1;
}
static int do_zlib(zbuf *a, char *obuf, int olen, int exp, int parse_header)
{
a->zout_start = obuf;
a->zout = obuf;
a->zout_end = obuf + olen;
a->z_expandable = exp;
return parse_zlib(a, parse_header);
}
char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen)
{
zbuf a;
char *p = (char *) malloc(initial_size);
if (p == NULL) return NULL;
a.zbuffer = (const uint8 *) buffer;
a.zbuffer_end = (const uint8 *) buffer + len;
if (do_zlib(&a, p, initial_size, 1, 1)) {
if (outlen) *outlen = (int) (a.zout - a.zout_start);
return a.zout_start;
} else {
free(a.zout_start);
return NULL;
}
}
char *stbi_zlib_decode_malloc(char const *buffer, int len, int *outlen)
{
return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen);
}
char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header)
{
zbuf a;
char *p = (char *) malloc(initial_size);
if (p == NULL) return NULL;
a.zbuffer = (const uint8 *) buffer;
a.zbuffer_end = (const uint8 *) buffer + len;
if (do_zlib(&a, p, initial_size, 1, parse_header)) {
if (outlen) *outlen = (int) (a.zout - a.zout_start);
return a.zout_start;
} else {
free(a.zout_start);
return NULL;
}
}
int stbi_zlib_decode_buffer(char *obuffer, int olen, char const *ibuffer, int ilen)
{
zbuf a;
a.zbuffer = (const uint8 *) ibuffer;
a.zbuffer_end = (const uint8 *) ibuffer + ilen;
if (do_zlib(&a, obuffer, olen, 0, 1))
return (int) (a.zout - a.zout_start);
else
return -1;
}
char *stbi_zlib_decode_noheader_malloc(char const *buffer, int len, int *outlen)
{
zbuf a;
char *p = (char *) malloc(16384);
if (p == NULL) return NULL;
a.zbuffer = (const uint8 *) buffer;
a.zbuffer_end = (const uint8 *) buffer+len;
if (do_zlib(&a, p, 16384, 1, 0)) {
if (outlen) *outlen = (int) (a.zout - a.zout_start);
return a.zout_start;
} else {
free(a.zout_start);
return NULL;
}
}
int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen)
{
zbuf a;
a.zbuffer = (const uint8 *) ibuffer;
a.zbuffer_end = (const uint8 *) ibuffer + ilen;
if (do_zlib(&a, obuffer, olen, 0, 0))
return (int) (a.zout - a.zout_start);
else
return -1;
}
// public domain "baseline" PNG decoder v0.10 Sean Barrett 2006-11-18
// simple implementation
// - only 8-bit samples
// - no CRC checking
// - allocates lots of intermediate memory
// - avoids problem of streaming data between subsystems
// - avoids explicit window management
// performance
// - uses stb_zlib, a PD zlib implementation with fast huffman decoding
typedef struct
{
uint32 length;
uint32 type;
} chunk;
#define STBI_PNG_TYPE(a,b,c,d) (((a) << 24) + ((b) << 16) + ((c) << 8) + (d))
static chunk get_chunk_header(stbi *s)
{
chunk c;
c.length = get32(s);
c.type = get32(s);
return c;
}
static int check_png_header(stbi *s)
{
static uint8 png_sig[8] = { 137,80,78,71,13,10,26,10 };
int i;
for (i=0; i < 8; ++i)
if (get8u(s) != png_sig[i]) return stbi_error("bad png sig","Not a PNG");
return 1;
}
typedef struct
{
stbi *s;
uint8 *idata, *expanded, *out;
} png;
enum {
F_none=0, F_sub=1, F_up=2, F_avg=3, F_paeth=4,
F_avg_first, F_paeth_first
};
static uint8 first_row_filter[5] =
{
F_none, F_sub, F_none, F_avg_first, F_paeth_first
};
static int paeth(int a, int b, int c)
{
int p = a + b - c;
int pa = abs(p-a);
int pb = abs(p-b);
int pc = abs(p-c);
if (pa <= pb && pa <= pc) return a;
if (pb <= pc) return b;
return c;
}
// create the png data from post-deflated data
static int create_png_image_raw(png *a, uint8 *raw, uint32 raw_len, int out_n, uint32 x, uint32 y)
{
stbi *s = a->s;
uint32 i,j,stride = x*out_n;
int k;
int img_n = s->img_n; // copy it into a local for later
assert(out_n == s->img_n || out_n == s->img_n+1);
if (stbi_png_partial) y = 1;
a->out = (uint8 *) malloc(x * y * out_n);
if (!a->out) return stbi_error("outofmem", "Out of memory");
if (!stbi_png_partial) {
if (s->img_x == x && s->img_y == y) {
if (raw_len != (img_n * x + 1) * y) return stbi_error("not enough pixels","Corrupt PNG");
} else { // interlaced:
if (raw_len < (img_n * x + 1) * y) return stbi_error("not enough pixels","Corrupt PNG");
}
}
for (j=0; j < y; ++j) {
uint8 *cur = a->out + stride*j;
uint8 *prior = cur - stride;
int filter = *raw++;
if (filter > 4) return stbi_error("invalid filter","Corrupt PNG");
// if first row, use special filter that doesn't sample previous row
if (j == 0) filter = first_row_filter[filter];
// handle first pixel explicitly
for (k=0; k < img_n; ++k) {
switch (filter) {
case F_none : cur[k] = raw[k]; break;
case F_sub : cur[k] = raw[k]; break;
case F_up : cur[k] = raw[k] + prior[k]; break;
case F_avg : cur[k] = raw[k] + (prior[k]>>1); break;
case F_paeth : cur[k] = (uint8) (raw[k] + paeth(0,prior[k],0)); break;
case F_avg_first : cur[k] = raw[k]; break;
case F_paeth_first: cur[k] = raw[k]; break;
}
}
if (img_n != out_n) cur[img_n] = 255;
raw += img_n;
cur += out_n;
prior += out_n;
// this is a little gross, so that we don't switch per-pixel or per-component
if (img_n == out_n) {
#define STBI_CASE(f) \
case f: \
for (i=x-1; i >= 1; --i, raw+=img_n,cur+=img_n,prior+=img_n) \
for (k=0; k < img_n; ++k)
switch (filter) {
STBI_CASE(F_none) cur[k] = raw[k]; break;
STBI_CASE(F_sub) cur[k] = raw[k] + cur[k-img_n]; break;
STBI_CASE(F_up) cur[k] = raw[k] + prior[k]; break;
STBI_CASE(F_avg) cur[k] = raw[k] + ((prior[k] + cur[k-img_n])>>1); break;
STBI_CASE(F_paeth) cur[k] = (uint8) (raw[k] + paeth(cur[k-img_n],prior[k],prior[k-img_n])); break;
STBI_CASE(F_avg_first) cur[k] = raw[k] + (cur[k-img_n] >> 1); break;
STBI_CASE(F_paeth_first) cur[k] = (uint8) (raw[k] + paeth(cur[k-img_n],0,0)); break;
}
#undef STBI_CASE
} else {
assert(img_n+1 == out_n);
#define STBI_CASE(f) \
case f: \
for (i=x-1; i >= 1; --i, cur[img_n]=255,raw+=img_n,cur+=out_n,prior+=out_n) \
for (k=0; k < img_n; ++k)
switch (filter) {
STBI_CASE(F_none) cur[k] = raw[k]; break;
STBI_CASE(F_sub) cur[k] = raw[k] + cur[k-out_n]; break;
STBI_CASE(F_up) cur[k] = raw[k] + prior[k]; break;
STBI_CASE(F_avg) cur[k] = raw[k] + ((prior[k] + cur[k-out_n])>>1); break;
STBI_CASE(F_paeth) cur[k] = (uint8) (raw[k] + paeth(cur[k-out_n],prior[k],prior[k-out_n])); break;
STBI_CASE(F_avg_first) cur[k] = raw[k] + (cur[k-out_n] >> 1); break;
STBI_CASE(F_paeth_first) cur[k] = (uint8) (raw[k] + paeth(cur[k-out_n],0,0)); break;
}
#undef STBI_CASE
}
}
return 1;
}
static int create_png_image(png *a, uint8 *raw, uint32 raw_len, int out_n, int interlaced)
{
uint8 *final;
int p;
int save;
if (!interlaced)
return create_png_image_raw(a, raw, raw_len, out_n, a->s->img_x, a->s->img_y);
save = stbi_png_partial;
stbi_png_partial = 0;
// de-interlacing
final = (uint8 *) malloc(a->s->img_x * a->s->img_y * out_n);
for (p=0; p < 7; ++p) {
int xorig[] = { 0,4,0,2,0,1,0 };
int yorig[] = { 0,0,4,0,2,0,1 };
int xspc[] = { 8,8,4,4,2,2,1 };
int yspc[] = { 8,8,8,4,4,2,2 };
int i,j,x,y;
// pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1
x = (a->s->img_x - xorig[p] + xspc[p]-1) / xspc[p];
y = (a->s->img_y - yorig[p] + yspc[p]-1) / yspc[p];
if (x && y) {
if (!create_png_image_raw(a, raw, raw_len, out_n, x, y)) {
free(final);
return 0;
}
for (j=0; j < y; ++j)
for (i=0; i < x; ++i)
memcpy(final + (j*yspc[p]+yorig[p])*a->s->img_x*out_n + (i*xspc[p]+xorig[p])*out_n,
a->out + (j*x+i)*out_n, out_n);
free(a->out);
raw += (x*out_n+1)*y;
raw_len -= (x*out_n+1)*y;
}
}
a->out = final;
stbi_png_partial = save;
return 1;
}
static int compute_transparency(png *z, uint8 tc[3], int out_n)
{
stbi *s = z->s;
uint32 i, pixel_count = s->img_x * s->img_y;
uint8 *p = z->out;
// compute color-based transparency, assuming we've
// already got 255 as the alpha value in the output
assert(out_n == 2 || out_n == 4);
if (out_n == 2) {
for (i=0; i < pixel_count; ++i) {
p[1] = (p[0] == tc[0] ? 0 : 255);
p += 2;
}
} else {
for (i=0; i < pixel_count; ++i) {
if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2])
p[3] = 0;
p += 4;
}
}
return 1;
}
static int expand_palette(png *a, uint8 *palette, int len, int pal_img_n)
{
uint32 i, pixel_count = a->s->img_x * a->s->img_y;
uint8 *p, *temp_out, *orig = a->out;
p = (uint8 *) malloc(pixel_count * pal_img_n);
if (p == NULL) return stbi_error("outofmem", "Out of memory");
// between here and free(out) below, exitting would leak
temp_out = p;
if (pal_img_n == 3) {
for (i=0; i < pixel_count; ++i) {
int n = orig[i]*4;
p[0] = palette[n ];
p[1] = palette[n+1];
p[2] = palette[n+2];
p += 3;
}
} else {
for (i=0; i < pixel_count; ++i) {
int n = orig[i]*4;
p[0] = palette[n ];
p[1] = palette[n+1];
p[2] = palette[n+2];
p[3] = palette[n+3];
p += 4;
}
}
free(a->out);
a->out = temp_out;
STBI_NOTUSED(len);
return 1;
}
static int stbi_unpremultiply_on_load = 0;
static int stbi_de_iphone_flag = 0;
void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply)
{
stbi_unpremultiply_on_load = flag_true_if_should_unpremultiply;
}
void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert)
{
stbi_de_iphone_flag = flag_true_if_should_convert;
}
static void stbi_de_iphone(png *z)
{
stbi *s = z->s;
uint32 i, pixel_count = s->img_x * s->img_y;
uint8 *p = z->out;
if (s->img_out_n == 3) { // convert bgr to rgb
for (i=0; i < pixel_count; ++i) {
uint8 t = p[0];
p[0] = p[2];
p[2] = t;
p += 3;
}
} else {
assert(s->img_out_n == 4);
if (stbi_unpremultiply_on_load) {
// convert bgr to rgb and unpremultiply
for (i=0; i < pixel_count; ++i) {
uint8 a = p[3];
uint8 t = p[0];
if (a) {
p[0] = p[2] * 255 / a;
p[1] = p[1] * 255 / a;
p[2] = t * 255 / a;
} else {
p[0] = p[2];
p[2] = t;
}
p += 4;
}
} else {
// convert bgr to rgb
for (i=0; i < pixel_count; ++i) {
uint8 t = p[0];
p[0] = p[2];
p[2] = t;
p += 4;
}
}
}
}
static int parse_png_file(png *z, int scan, int req_comp)
{
uint8 palette[1024], pal_img_n=0;
uint8 has_trans=0, tc[3];
uint32 ioff=0, idata_limit=0, i, pal_len=0;
int first=1,k,interlace=0, iphone=0;
stbi *s = z->s;
z->expanded = NULL;
z->idata = NULL;
z->out = NULL;
if (!check_png_header(s)) return 0;
if (scan == SCAN_type) return 1;
for (;;) {
chunk c = get_chunk_header(s);
switch (c.type) {
case STBI_PNG_TYPE('C','g','B','I'):
iphone = stbi_de_iphone_flag;
skip(s, c.length);
break;
case STBI_PNG_TYPE('I','H','D','R'): {
int depth,color,comp,filter;
if (!first) return stbi_error("multiple IHDR","Corrupt PNG");
first = 0;
if (c.length != 13) return stbi_error("bad IHDR len","Corrupt PNG");
s->img_x = get32(s); if (s->img_x > (1 << 24)) return stbi_error("too large","Very large image (corrupt?)");
s->img_y = get32(s); if (s->img_y > (1 << 24)) return stbi_error("too large","Very large image (corrupt?)");
depth = get8(s); if (depth != 8) return stbi_error("8bit only","PNG not supported: 8-bit only");
color = get8(s); if (color > 6) return stbi_error("bad ctype","Corrupt PNG");
if (color == 3) pal_img_n = 3; else if (color & 1) return stbi_error("bad ctype","Corrupt PNG");
comp = get8(s); if (comp) return stbi_error("bad comp method","Corrupt PNG");
filter= get8(s); if (filter) return stbi_error("bad filter method","Corrupt PNG");
interlace = get8(s); if (interlace>1) return stbi_error("bad interlace method","Corrupt PNG");
if (!s->img_x || !s->img_y) return stbi_error("0-pixel image","Corrupt PNG");
if (!pal_img_n) {
s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0);
if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi_error("too large", "Image too large to decode");
if (scan == SCAN_header) return 1;
} else {
// if paletted, then pal_n is our final components, and
// img_n is # components to decompress/filter.
s->img_n = 1;
if ((1 << 30) / s->img_x / 4 < s->img_y) return stbi_error("too large","Corrupt PNG");
// if SCAN_header, have to scan to see if we have a tRNS
}
break;
}
case STBI_PNG_TYPE('P','L','T','E'): {
if (first) return stbi_error("first not IHDR", "Corrupt PNG");
if (c.length > 256*3) return stbi_error("invalid PLTE","Corrupt PNG");
pal_len = c.length / 3;
if (pal_len * 3 != c.length) return stbi_error("invalid PLTE","Corrupt PNG");
for (i=0; i < pal_len; ++i) {
palette[i*4+0] = get8u(s);
palette[i*4+1] = get8u(s);
palette[i*4+2] = get8u(s);
palette[i*4+3] = 255;
}
break;
}
case STBI_PNG_TYPE('t','R','N','S'): {
if (first) return stbi_error("first not IHDR", "Corrupt PNG");
if (z->idata) return stbi_error("tRNS after IDAT","Corrupt PNG");
if (pal_img_n) {
if (scan == SCAN_header) { s->img_n = 4; return 1; }
if (pal_len == 0) return stbi_error("tRNS before PLTE","Corrupt PNG");
if (c.length > pal_len) return stbi_error("bad tRNS len","Corrupt PNG");
pal_img_n = 4;
for (i=0; i < c.length; ++i)
palette[i*4+3] = get8u(s);
} else {
if (!(s->img_n & 1)) return stbi_error("tRNS with alpha","Corrupt PNG");
if (c.length != (uint32) s->img_n*2) return stbi_error("bad tRNS len","Corrupt PNG");
has_trans = 1;
for (k=0; k < s->img_n; ++k)
tc[k] = (uint8) get16(s); // non 8-bit images will be larger
}
break;
}
case STBI_PNG_TYPE('I','D','A','T'): {
if (first) return stbi_error("first not IHDR", "Corrupt PNG");
if (pal_img_n && !pal_len) return stbi_error("no PLTE","Corrupt PNG");
if (scan == SCAN_header) { s->img_n = pal_img_n; return 1; }
if (ioff + c.length > idata_limit) {
uint8 *p;
if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096;
while (ioff + c.length > idata_limit)
idata_limit *= 2;
p = (uint8 *) realloc(z->idata, idata_limit); if (p == NULL) return stbi_error("outofmem", "Out of memory");
z->idata = p;
}
if (!getn(s, z->idata+ioff,c.length)) return stbi_error("outofdata","Corrupt PNG");
ioff += c.length;
break;
}
case STBI_PNG_TYPE('I','E','N','D'): {
uint32 raw_len;
if (first) return stbi_error("first not IHDR", "Corrupt PNG");
if (scan != SCAN_load) return 1;
if (z->idata == NULL) return stbi_error("no IDAT","Corrupt PNG");
z->expanded = (uint8 *) stbi_zlib_decode_malloc_guesssize_headerflag((char *) z->idata, ioff, 16384, (int *) &raw_len, !iphone);
if (z->expanded == NULL) return 0; // zlib should set error
free(z->idata); z->idata = NULL;
if ((req_comp == s->img_n+1 && req_comp != 3 && !pal_img_n) || has_trans)
s->img_out_n = s->img_n+1;
else
s->img_out_n = s->img_n;
if (!create_png_image(z, z->expanded, raw_len, s->img_out_n, interlace)) return 0;
if (has_trans)
if (!compute_transparency(z, tc, s->img_out_n)) return 0;
if (iphone && s->img_out_n > 2)
stbi_de_iphone(z);
if (pal_img_n) {
// pal_img_n == 3 or 4
s->img_n = pal_img_n; // record the actual colors we had
s->img_out_n = pal_img_n;
if (req_comp >= 3) s->img_out_n = req_comp;
if (!expand_palette(z, palette, pal_len, s->img_out_n))
return 0;
}
free(z->expanded); z->expanded = NULL;
return 1;
}
default:
// if critical, fail
if (first) return stbi_error("first not IHDR", "Corrupt PNG");
if ((c.type & (1 << 29)) == 0) {
#ifndef STBI_NO_FAILURE_STRINGS
// not threadsafe
static char invalid_chunk[] = "XXXX chunk not known";
invalid_chunk[0] = (uint8) (c.type >> 24);
invalid_chunk[1] = (uint8) (c.type >> 16);
invalid_chunk[2] = (uint8) (c.type >> 8);
invalid_chunk[3] = (uint8) (c.type >> 0);
#endif
return stbi_error(invalid_chunk, "PNG not supported: unknown chunk type");
}
skip(s, c.length);
break;
}
// end of chunk, read and skip CRC
get32(s);
}
}
static unsigned char *do_png(png *p, int *x, int *y, int *n, int req_comp)
{
unsigned char *result=NULL;
if (req_comp < 0 || req_comp > 4) return stbi_error_puc("bad req_comp", "Internal error");
if (parse_png_file(p, SCAN_load, req_comp)) {
result = p->out;
p->out = NULL;
if (req_comp && req_comp != p->s->img_out_n) {
result = convert_format(result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y);
p->s->img_out_n = req_comp;
if (result == NULL) return result;
}
*x = p->s->img_x;
*y = p->s->img_y;
if (n) *n = p->s->img_n;
}
free(p->out); p->out = NULL;
free(p->expanded); p->expanded = NULL;
free(p->idata); p->idata = NULL;
return result;
}
static unsigned char *stbi_png_load(stbi *s, int *x, int *y, int *comp, int req_comp)
{
png p;
p.s = s;
return do_png(&p, x,y,comp,req_comp);
}
static int stbi_png_test(stbi *s)
{
int r;
r = check_png_header(s);
stbi_rewind(s);
return r;
}
static int stbi_png_info_raw(png *p, int *x, int *y, int *comp)
{
if (!parse_png_file(p, SCAN_header, 0)) {
stbi_rewind( p->s );
return 0;
}
if (x) *x = p->s->img_x;
if (y) *y = p->s->img_y;
if (comp) *comp = p->s->img_n;
return 1;
}
static int stbi_png_info(stbi *s, int *x, int *y, int *comp)
{
png p;
p.s = s;
return stbi_png_info_raw(&p, x, y, comp);
}
// Microsoft/Windows BMP image
static int bmp_test(stbi *s)
{
int sz;
if (get8(s) != 'B') return 0;
if (get8(s) != 'M') return 0;
get32le(s); // discard filesize
get16le(s); // discard reserved
get16le(s); // discard reserved
get32le(s); // discard data offset
sz = get32le(s);
if (sz == 12 || sz == 40 || sz == 56 || sz == 108) return 1;
return 0;
}
static int stbi_bmp_test(stbi *s)
{
int r = bmp_test(s);
stbi_rewind(s);
return r;
}
// returns 0..31 for the highest set bit
static int high_bit(unsigned int z)
{
int n=0;
if (z == 0) return -1;
if (z >= 0x10000) n += 16, z >>= 16;
if (z >= 0x00100) n += 8, z >>= 8;
if (z >= 0x00010) n += 4, z >>= 4;
if (z >= 0x00004) n += 2, z >>= 2;
if (z >= 0x00002) n += 1, z >>= 1;
return n;
}
static int bitcount(unsigned int a)
{
a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2
a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4
a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits
a = (a + (a >> 8)); // max 16 per 8 bits
a = (a + (a >> 16)); // max 32 per 8 bits
return a & 0xff;
}
static int shiftsigned(int v, int shift, int bits)
{
int result;
int z=0;
if (shift < 0) v <<= -shift;
else v >>= shift;
result = v;
z = bits;
while (z < 8) {
result += v >> z;
z += bits;
}
return result;
}
static stbi_uc *bmp_load(stbi *s, int *x, int *y, int *comp, int req_comp)
{
uint8 *out;
unsigned int mr=0,mg=0,mb=0,ma=0;
stbi_uc pal[256][4];
int psize=0,i,j,compress=0,width;
int bpp, flip_vertically, pad, target, offset, hsz;
if (get8(s) != 'B' || get8(s) != 'M') return stbi_error_puc("not BMP", "Corrupt BMP");
get32le(s); // discard filesize
get16le(s); // discard reserved
get16le(s); // discard reserved
offset = get32le(s);
hsz = get32le(s);
if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108) return stbi_error_puc("unknown BMP", "BMP type not supported: unknown");
if (hsz == 12) {
s->img_x = get16le(s);
s->img_y = get16le(s);
} else {
s->img_x = get32le(s);
s->img_y = get32le(s);
}
if (get16le(s) != 1) return stbi_error_puc("bad BMP", "bad BMP");
bpp = get16le(s);
if (bpp == 1) return stbi_error_puc("monochrome", "BMP type not supported: 1-bit");
flip_vertically = ((int) s->img_y) > 0;
s->img_y = abs((int) s->img_y);
if (hsz == 12) {
if (bpp < 24)
psize = (offset - 14 - 24) / 3;
} else {
compress = get32le(s);
if (compress == 1 || compress == 2) return stbi_error_puc("BMP RLE", "BMP type not supported: RLE");
get32le(s); // discard sizeof
get32le(s); // discard hres
get32le(s); // discard vres
get32le(s); // discard colorsused
get32le(s); // discard max important
if (hsz == 40 || hsz == 56) {
if (hsz == 56) {
get32le(s);
get32le(s);
get32le(s);
get32le(s);
}
if (bpp == 16 || bpp == 32) {
if (compress == 0) {
if (bpp == 32) {
mr = 0xffu << 16;
mg = 0xffu << 8;
mb = 0xffu << 0;
ma = 0xffu << 24;
// @TODO: check for cases like alpha value is all 0 and switch it to 255
} else {
mr = 31u << 10;
mg = 31u << 5;
mb = 31u << 0;
}
} else if (compress == 3) {
mr = get32le(s);
mg = get32le(s);
mb = get32le(s);
// not documented, but generated by photoshop and handled by mspaint
if (mr == mg && mg == mb) {
// ?!?!?
return stbi_error_puc("bad BMP", "bad BMP");
}
} else {
return stbi_error_puc("bad BMP", "bad BMP");
}
}
} else {
assert(hsz == 108);
mr = get32le(s);
mg = get32le(s);
mb = get32le(s);
ma = get32le(s);
get32le(s); // discard color space
for (i=0; i < 12; ++i)
get32le(s); // discard color space parameters
}
if (bpp < 16)
psize = (offset - 14 - hsz) >> 2;
}
s->img_n = ma ? 4 : 3;
if (req_comp && req_comp >= 3) // we can directly decode 3 or 4
target = req_comp;
else
target = s->img_n; // if they want monochrome, we'll post-convert
out = (stbi_uc *) malloc(target * s->img_x * s->img_y);
if (!out) return stbi_error_puc("outofmem", "Out of memory");
if (bpp < 16) {
int z=0;
if (psize == 0 || psize > 256) { free(out); return stbi_error_puc("invalid", "Corrupt BMP"); }
for (i=0; i < psize; ++i) {
pal[i][2] = get8u(s);
pal[i][1] = get8u(s);
pal[i][0] = get8u(s);
if (hsz != 12) get8(s);
pal[i][3] = 255;
}
skip(s, offset - 14 - hsz - psize * (hsz == 12 ? 3 : 4));
if (bpp == 4) width = (s->img_x + 1) >> 1;
else if (bpp == 8) width = s->img_x;
else
{ free(out); return stbi_error_puc("bad bpp", "Corrupt BMP"); }
pad = (-width)&3;
for (j=0; j < (int) s->img_y; ++j) {
for (i=0; i < (int) s->img_x; i += 2) {
int v=get8(s),v2=0;
if (bpp == 4) {
v2 = v & 15;
v >>= 4;
}
out[z++] = pal[v][0];
out[z++] = pal[v][1];
out[z++] = pal[v][2];
if (target == 4) out[z++] = 255;
if (i+1 == (int) s->img_x) break;
v = (bpp == 8) ? get8(s) : v2;
out[z++] = pal[v][0];
out[z++] = pal[v][1];
out[z++] = pal[v][2];
if (target == 4) out[z++] = 255;
}
skip(s, pad);
}
} else {
int rshift=0,gshift=0,bshift=0,ashift=0,rcount=0,gcount=0,bcount=0,acount=0;
int z = 0;
int easy=0;
skip(s, offset - 14 - hsz);
if (bpp == 24) width = 3 * s->img_x;
else if (bpp == 16) width = 2*s->img_x;
else /* bpp = 32 and pad = 0 */ width=0;
pad = (-width) & 3;
if (bpp == 24) {
easy = 1;
} else if (bpp == 32) {
if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000)
easy = 2;
}
if (!easy) {
if (!mr || !mg || !mb) { free(out); return stbi_error_puc("bad masks", "Corrupt BMP"); }
// right shift amt to put high bit in position #7
rshift = high_bit(mr)-7; rcount = bitcount(mr);
gshift = high_bit(mg)-7; gcount = bitcount(mr);
bshift = high_bit(mb)-7; bcount = bitcount(mr);
ashift = high_bit(ma)-7; acount = bitcount(mr);
}
for (j=0; j < (int) s->img_y; ++j) {
if (easy) {
for (i=0; i < (int) s->img_x; ++i) {
int a;
out[z+2] = get8u(s);
out[z+1] = get8u(s);
out[z+0] = get8u(s);
z += 3;
a = (easy == 2 ? get8(s) : 255);
if (target == 4) out[z++] = (uint8) a;
}
} else {
for (i=0; i < (int) s->img_x; ++i) {
uint32 v = (bpp == 16 ? get16le(s) : get32le(s));
int a;
out[z++] = (uint8) shiftsigned(v & mr, rshift, rcount);
out[z++] = (uint8) shiftsigned(v & mg, gshift, gcount);
out[z++] = (uint8) shiftsigned(v & mb, bshift, bcount);
a = (ma ? shiftsigned(v & ma, ashift, acount) : 255);
if (target == 4) out[z++] = (uint8) a;
}
}
skip(s, pad);
}
}
if (flip_vertically) {
stbi_uc t;
for (j=0; j < (int) s->img_y>>1; ++j) {
stbi_uc *p1 = out + j *s->img_x*target;
stbi_uc *p2 = out + (s->img_y-1-j)*s->img_x*target;
for (i=0; i < (int) s->img_x*target; ++i) {
t = p1[i], p1[i] = p2[i], p2[i] = t;
}
}
}
if (req_comp && req_comp != target) {
out = convert_format(out, target, req_comp, s->img_x, s->img_y);
if (out == NULL) return out; // convert_format frees input on failure
}
*x = s->img_x;
*y = s->img_y;
if (comp) *comp = s->img_n;
return out;
}
static stbi_uc *stbi_bmp_load(stbi *s,int *x, int *y, int *comp, int req_comp)
{
return bmp_load(s, x,y,comp,req_comp);
}
// Targa Truevision - TGA
// by Jonathan Dummer
static int tga_info(stbi *s, int *x, int *y, int *comp)
{
int tga_w, tga_h, tga_comp;
int sz;
get8u(s); // discard Offset
sz = get8u(s); // color type
if( sz > 1 ) {
stbi_rewind(s);
return 0; // only RGB or indexed allowed
}
sz = get8u(s); // image type
// only RGB or grey allowed, +/- RLE
if ((sz != 1) && (sz != 2) && (sz != 3) && (sz != 9) && (sz != 10) && (sz != 11)) return 0;
skip(s,9);
tga_w = get16le(s);
if( tga_w < 1 ) {
stbi_rewind(s);
return 0; // test width
}
tga_h = get16le(s);
if( tga_h < 1 ) {
stbi_rewind(s);
return 0; // test height
}
sz = get8(s); // bits per pixel
// only RGB or RGBA or grey allowed
if ((sz != 8) && (sz != 16) && (sz != 24) && (sz != 32)) {
stbi_rewind(s);
return 0;
}
tga_comp = sz;
if (x) *x = tga_w;
if (y) *y = tga_h;
if (comp) *comp = tga_comp / 8;
return 1; // seems to have passed everything
}
int stbi_tga_info(stbi *s, int *x, int *y, int *comp)
{
return tga_info(s, x, y, comp);
}
static int tga_test(stbi *s)
{
int sz;
get8u(s); // discard Offset
sz = get8u(s); // color type
if ( sz > 1 ) return 0; // only RGB or indexed allowed
sz = get8u(s); // image type
if ( (sz != 1) && (sz != 2) && (sz != 3) && (sz != 9) && (sz != 10) && (sz != 11) ) return 0; // only RGB or grey allowed, +/- RLE
get16(s); // discard palette start
get16(s); // discard palette length
get8(s); // discard bits per palette color entry
get16(s); // discard x origin
get16(s); // discard y origin
if ( get16(s) < 1 ) return 0; // test width
if ( get16(s) < 1 ) return 0; // test height
sz = get8(s); // bits per pixel
if ( (sz != 8) && (sz != 16) && (sz != 24) && (sz != 32) ) return 0; // only RGB or RGBA or grey allowed
return 1; // seems to have passed everything
}
static int stbi_tga_test(stbi *s)
{
int res = tga_test(s);
stbi_rewind(s);
return res;
}
static stbi_uc *tga_load(stbi *s, int *x, int *y, int *comp, int req_comp)
{
// read in the TGA header stuff
int tga_offset = get8u(s);
int tga_indexed = get8u(s);
int tga_image_type = get8u(s);
int tga_is_RLE = 0;
int tga_palette_start = get16le(s);
int tga_palette_len = get16le(s);
int tga_palette_bits = get8u(s);
/* tga_x_origin = */ (void)get16le(s);
/* tga_y_origin = */ (void)get16le(s);
int tga_width = get16le(s);
int tga_height = get16le(s);
int tga_bits_per_pixel = get8u(s);
int tga_inverted = get8u(s);
// image data
unsigned char *tga_data;
unsigned char *tga_palette = NULL;
int i, j;
unsigned char raw_data[4];
unsigned char trans_data[4];
int RLE_count = 0;
int RLE_repeating = 0;
int read_next_pixel = 1;
// do a tiny bit of precessing
if ( tga_image_type >= 8 )
{
tga_image_type -= 8;
tga_is_RLE = 1;
}
/* int tga_alpha_bits = tga_inverted & 15; */
tga_inverted = 1 - ((tga_inverted >> 5) & 1);
// error check
if ( //(tga_indexed) ||
(tga_width < 1) || (tga_height < 1) ||
(tga_image_type < 1) || (tga_image_type > 3) ||
((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16) &&
(tga_bits_per_pixel != 24) && (tga_bits_per_pixel != 32))
)
{
return NULL; // we don't report this as a bad TGA because we don't even know if it's TGA
}
// If I'm paletted, then I'll use the number of bits from the palette
if ( tga_indexed )
{
tga_bits_per_pixel = tga_palette_bits;
}
// tga info
*x = tga_width;
*y = tga_height;
if ( (req_comp < 1) || (req_comp > 4) )
{
// just use whatever the file was
req_comp = tga_bits_per_pixel / 8;
*comp = req_comp;
}
else
{
// force a new number of components
*comp = tga_bits_per_pixel/8;
}
tga_data = (unsigned char*)malloc( tga_width * tga_height * req_comp );
if (!tga_data) return stbi_error_puc("outofmem", "Out of memory");
// skip to the data's starting position (offset usually = 0)
skip(s, tga_offset );
// do I need to load a palette?
if ( tga_indexed )
{
// any data to skip? (offset usually = 0)
skip(s, tga_palette_start );
// load the palette
tga_palette = (unsigned char*)malloc( tga_palette_len * tga_palette_bits / 8 );
if (!tga_palette) return stbi_error_puc("outofmem", "Out of memory");
if (!getn(s, tga_palette, tga_palette_len * tga_palette_bits / 8 )) {
free(tga_data);
free(tga_palette);
return stbi_error_puc("bad palette", "Corrupt TGA");
}
}
// load the data
trans_data[0] = trans_data[1] = trans_data[2] = trans_data[3] = 0;
for (i=0; i < tga_width * tga_height; ++i)
{
// if I'm in RLE mode, do I need to get a RLE chunk?
if ( tga_is_RLE )
{
if ( RLE_count == 0 )
{
// yep, get the next byte as a RLE command
int RLE_cmd = get8u(s);
RLE_count = 1 + (RLE_cmd & 127);
RLE_repeating = RLE_cmd >> 7;
read_next_pixel = 1;
}
else if ( !RLE_repeating )
{
read_next_pixel = 1;
}
}
else
{
read_next_pixel = 1;
}
// OK, if I need to read a pixel, do it now
if ( read_next_pixel )
{
// load however much data we did have
if ( tga_indexed )
{
// read in 1 byte, then perform the lookup
int pal_idx = get8u(s);
if ( pal_idx >= tga_palette_len )
{
// invalid index
pal_idx = 0;
}
pal_idx *= tga_bits_per_pixel / 8;
for (j = 0; j*8 < tga_bits_per_pixel; ++j)
{
raw_data[j] = tga_palette[pal_idx+j];
}
}
else
{
// read in the data raw
for (j = 0; j*8 < tga_bits_per_pixel; ++j)
{
raw_data[j] = get8u(s);
}
}
// convert raw to the intermediate format
switch (tga_bits_per_pixel)
{
case 8:
// Luminous => RGBA
trans_data[0] = raw_data[0];
trans_data[1] = raw_data[0];
trans_data[2] = raw_data[0];
trans_data[3] = 255;
break;
case 16: // B5G5R5A1 => RGBA
trans_data[3] = 255 * ((raw_data[1] & 0x80) >> 7);
case 15: // B5G5R5 => RGBA
trans_data[0] = (raw_data[1] >> 2) & 0x1F;
trans_data[1] = ((raw_data[1] << 3) & 0x1C) | ((raw_data[0] >> 5) & 0x07);
trans_data[2] = (raw_data[0] & 0x1F);
// Convert 5-bit channels to 8-bit channels by left shifting by three
// and repeating the first three bits to cover the range [0,255] with
// even spacing. For example:
// channel input bits: 4 3 2 1 0
// channel output bits: 4 3 2 1 0 4 3 2
trans_data[0] = (trans_data[0] << 3) | (trans_data[0] >> 2);
trans_data[1] = (trans_data[1] << 3) | (trans_data[1] >> 2);
trans_data[2] = (trans_data[2] << 3) | (trans_data[2] >> 2);
break;
case 24:
// BGR => RGBA
trans_data[0] = raw_data[2];
trans_data[1] = raw_data[1];
trans_data[2] = raw_data[0];
trans_data[3] = 255;
break;
case 32:
// BGRA => RGBA
trans_data[0] = raw_data[2];
trans_data[1] = raw_data[1];
trans_data[2] = raw_data[0];
trans_data[3] = raw_data[3];
break;
}
// clear the reading flag for the next pixel
read_next_pixel = 0;
} // end of reading a pixel
// convert to final format
switch (req_comp)
{
case 1:
// RGBA => Luminance
tga_data[i*req_comp+0] = compute_y(trans_data[0],trans_data[1],trans_data[2]);
break;
case 2:
// RGBA => Luminance,Alpha
tga_data[i*req_comp+0] = compute_y(trans_data[0],trans_data[1],trans_data[2]);
tga_data[i*req_comp+1] = trans_data[3];
break;
case 3:
// RGBA => RGB
tga_data[i*req_comp+0] = trans_data[0];
tga_data[i*req_comp+1] = trans_data[1];
tga_data[i*req_comp+2] = trans_data[2];
break;
case 4:
// RGBA => RGBA
tga_data[i*req_comp+0] = trans_data[0];
tga_data[i*req_comp+1] = trans_data[1];
tga_data[i*req_comp+2] = trans_data[2];
tga_data[i*req_comp+3] = trans_data[3];
break;
}
// in case we're in RLE mode, keep counting down
--RLE_count;
}
// do I need to invert the image?
if ( tga_inverted )
{
for (j = 0; j*2 < tga_height; ++j)
{
#ifdef PANDORA
const int taille = tga_width * req_comp;
unsigned char* index1 = tga_data + (j * taille);
unsigned char* index2 = tga_data + ((tga_height - 1 - j) * taille);
unsigned char temp[taille];
memcpy(temp, index1, taille);
memcpy(index1, index2, taille);
memcpy(index2, temp, taille);
#else
int index1 = j * tga_width * req_comp;
int index2 = (tga_height - 1 - j) * tga_width * req_comp;
for (i = tga_width * req_comp; i > 0; --i)
{
unsigned char temp = tga_data[index1];
tga_data[index1] = tga_data[index2];
tga_data[index2] = temp;
++index1;
++index2;
}
#endif
}
}
// clear my palette, if I had one
free(tga_palette);
// OK, done
return tga_data;
}
static stbi_uc *stbi_tga_load(stbi *s, int *x, int *y, int *comp, int req_comp)
{
return tga_load(s,x,y,comp,req_comp);
}
// *************************************************************************************************
// Photoshop PSD loader -- PD by Thatcher Ulrich, integration by Nicolas Schulz, tweaked by STB
static int psd_test(stbi *s)
{
if (get32(s) != 0x38425053) return 0; // "8BPS"
else return 1;
}
static int stbi_psd_test(stbi *s)
{
int r = psd_test(s);
stbi_rewind(s);
return r;
}
static stbi_uc *psd_load(stbi *s, int *x, int *y, int *comp, int req_comp)
{
int pixelCount;
int channelCount, compression;
int channel, i, count, len;
int w,h;
uint8 *out;
// Check identifier
if (get32(s) != 0x38425053) // "8BPS"
return stbi_error_puc("not PSD", "Corrupt PSD image");
// Check file type version.
if (get16(s) != 1)
return stbi_error_puc("wrong version", "Unsupported version of PSD image");
// Skip 6 reserved bytes.
skip(s, 6 );
// Read the number of channels (R, G, B, A, etc).
channelCount = get16(s);
if (channelCount < 0 || channelCount > 16)
return stbi_error_puc("wrong channel count", "Unsupported number of channels in PSD image");
// Read the rows and columns of the image.
h = get32(s);
w = get32(s);
// Make sure the depth is 8 bits.
if (get16(s) != 8)
return stbi_error_puc("unsupported bit depth", "PSD bit depth is not 8 bit");
// Make sure the color mode is RGB.
// Valid options are:
// 0: Bitmap
// 1: Grayscale
// 2: Indexed color
// 3: RGB color
// 4: CMYK color
// 7: Multichannel
// 8: Duotone
// 9: Lab color
if (get16(s) != 3)
return stbi_error_puc("wrong color format", "PSD is not in RGB color format");
// Skip the Mode Data. (It's the palette for indexed color; other info for other modes.)
skip(s,get32(s) );
// Skip the image resources. (resolution, pen tool paths, etc)
skip(s, get32(s) );
// Skip the reserved data.
skip(s, get32(s) );
// Find out if the data is compressed.
// Known values:
// 0: no compression
// 1: RLE compressed
compression = get16(s);
if (compression > 1)
return stbi_error_puc("bad compression", "PSD has an unknown compression format");
// Create the destination image.
out = (stbi_uc *) malloc(4 * w*h);
if (!out) return stbi_error_puc("outofmem", "Out of memory");
pixelCount = w*h;
// Initialize the data to zero.
//memset( out, 0, pixelCount * 4 );
// Finally, the image data.
if (compression) {
// RLE as used by .PSD and .TIFF
// Loop until you get the number of unpacked bytes you are expecting:
// Read the next source byte into n.
// If n is between 0 and 127 inclusive, copy the next n+1 bytes literally.
// Else if n is between -127 and -1 inclusive, copy the next byte -n+1 times.
// Else if n is 128, noop.
// Endloop
// The RLE-compressed data is preceeded by a 2-byte data count for each row in the data,
// which we're going to just skip.
skip(s, h * channelCount * 2 );
// Read the RLE data by channel.
for (channel = 0; channel < 4; channel++) {
uint8 *p;
p = out+channel;
if (channel >= channelCount) {
// Fill this channel with default data.
for (i = 0; i < pixelCount; i++) *p = (channel == 3 ? 255 : 0), p += 4;
} else {
// Read the RLE data.
count = 0;
while (count < pixelCount) {
len = get8(s);
if (len == 128) {
// No-op.
} else if (len < 128) {
// Copy next len+1 bytes literally.
len++;
count += len;
while (len) {
*p = get8u(s);
p += 4;
len--;
}
} else if (len > 128) {
uint8 val;
// Next -len+1 bytes in the dest are replicated from next source byte.
// (Interpret len as a negative 8-bit int.)
len ^= 0x0FF;
len += 2;
val = get8u(s);
count += len;
while (len) {
*p = val;
p += 4;
len--;
}
}
}
}
}
} else {
// We're at the raw image data. It's each channel in order (Red, Green, Blue, Alpha, ...)
// where each channel consists of an 8-bit value for each pixel in the image.
// Read the data by channel.
for (channel = 0; channel < 4; channel++) {
uint8 *p;
p = out + channel;
if (channel > channelCount) {
// Fill this channel with default data.
for (i = 0; i < pixelCount; i++) *p = channel == 3 ? 255 : 0, p += 4;
} else {
// Read the data.
for (i = 0; i < pixelCount; i++)
*p = get8u(s), p += 4;
}
}
}
if (req_comp && req_comp != 4) {
out = convert_format(out, 4, req_comp, w, h);
if (out == NULL) return out; // convert_format frees input on failure
}
if (comp) *comp = channelCount;
*y = h;
*x = w;
return out;
}
static stbi_uc *stbi_psd_load(stbi *s, int *x, int *y, int *comp, int req_comp)
{
return psd_load(s,x,y,comp,req_comp);
}
// *************************************************************************************************
// Softimage PIC loader
// by Tom Seddon
//
// See http://softimage.wiki.softimage.com/index.php/INFO:_PIC_file_format
// See http://ozviz.wasp.uwa.edu.au/~pbourke/dataformats/softimagepic/
static int pic_is4(stbi *s,const char *str)
{
int i;
for (i=0; i<4; ++i)
if (get8(s) != (stbi_uc)str[i])
return 0;
return 1;
}
static int pic_test(stbi *s)
{
int i;
if (!pic_is4(s,"\x53\x80\xF6\x34"))
return 0;
for(i=0;i<84;++i)
get8(s);
if (!pic_is4(s,"PICT"))
return 0;
return 1;
}
typedef struct
{
stbi_uc size,type,channel;
} pic_packet_t;
static stbi_uc *pic_readval(stbi *s, int channel, stbi_uc *dest)
{
int mask=0x80, i;
for (i=0; i<4; ++i, mask>>=1) {
if (channel & mask) {
if (at_eof(s)) return stbi_error_puc("bad file","PIC file too short");
dest[i]=get8u(s);
}
}
return dest;
}
static void pic_copyval(int channel,stbi_uc *dest,const stbi_uc *src)
{
int mask=0x80,i;
for (i=0;i<4; ++i, mask>>=1)
if (channel&mask)
dest[i]=src[i];
}
static stbi_uc *pic_load2(stbi *s,int width,int height,int *comp, stbi_uc *result)
{
int act_comp=0,num_packets=0,y,chained;
pic_packet_t packets[10];
// this will (should...) cater for even some bizarre stuff like having data
// for the same channel in multiple packets.
do {
pic_packet_t *packet;
if (num_packets==sizeof(packets)/sizeof(packets[0]))
return stbi_error_puc("bad format","too many packets");
packet = &packets[num_packets++];
chained = get8(s);
packet->size = get8u(s);
packet->type = get8u(s);
packet->channel = get8u(s);
act_comp |= packet->channel;
if (at_eof(s)) return stbi_error_puc("bad file","file too short (reading packets)");
if (packet->size != 8) return stbi_error_puc("bad format","packet isn't 8bpp");
} while (chained);
*comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel?
for(y=0; y<height; ++y) {
int packet_idx;
for(packet_idx=0; packet_idx < num_packets; ++packet_idx) {
pic_packet_t *packet = &packets[packet_idx];
stbi_uc *dest = result+y*width*4;
switch (packet->type) {
default:
return stbi_error_puc("bad format","packet has bad compression type");
case 0: {//uncompressed
int x;
for(x=0;x<width;++x, dest+=4)
if (!pic_readval(s,packet->channel,dest))
return 0;
break;
}
case 1://Pure RLE
{
int left=width, i;
while (left>0) {
stbi_uc count,value[4];
count=get8u(s);
if (at_eof(s)) return stbi_error_puc("bad file","file too short (pure read count)");
if (count > left)
count = (uint8) left;
if (!pic_readval(s,packet->channel,value)) return 0;
for(i=0; i<count; ++i,dest+=4)
pic_copyval(packet->channel,dest,value);
left -= count;
}
}
break;
case 2: {//Mixed RLE
int left=width;
while (left>0) {
int count = get8(s), i;
if (at_eof(s)) return stbi_error_puc("bad file","file too short (mixed read count)");
if (count >= 128) { // Repeated
stbi_uc value[4];
int i;
if (count==128)
count = get16(s);
else
count -= 127;
if (count > left)
return stbi_error_puc("bad file","scanline overrun");
if (!pic_readval(s,packet->channel,value))
return 0;
for(i=0;i<count;++i, dest += 4)
pic_copyval(packet->channel,dest,value);
} else { // Raw
++count;
if (count>left) return stbi_error_puc("bad file","scanline overrun");
for(i=0;i<count;++i, dest+=4)
if (!pic_readval(s,packet->channel,dest))
return 0;
}
left-=count;
}
break;
}
}
}
}
return result;
}
static stbi_uc *pic_load(stbi *s,int *px,int *py,int *comp,int req_comp)
{
stbi_uc *result;
int i, x,y;
for (i=0; i<92; ++i)
get8(s);
x = get16(s);
y = get16(s);
if (at_eof(s)) return stbi_error_puc("bad file","file too short (pic header)");
if ((1 << 28) / x < y) return stbi_error_puc("too large", "Image too large to decode");
get32(s); //skip `ratio'
get16(s); //skip `fields'
get16(s); //skip `pad'
// intermediate buffer is RGBA
result = (stbi_uc *) malloc(x*y*4);
memset(result, 0xff, x*y*4);
if (!pic_load2(s,x,y,comp, result)) {
free(result);
result=0;
}
*px = x;
*py = y;
if (req_comp == 0) req_comp = *comp;
result=convert_format(result,4,req_comp,x,y);
return result;
}
static int stbi_pic_test(stbi *s)
{
int r = pic_test(s);
stbi_rewind(s);
return r;
}
static stbi_uc *stbi_pic_load(stbi *s, int *x, int *y, int *comp, int req_comp)
{
return pic_load(s,x,y,comp,req_comp);
}
#ifndef STBI_NO_GIF
// *************************************************************************************************
// GIF loader -- public domain by Jean-Marc Lienher -- simplified/shrunk by stb
typedef struct stbi_gif_lzw_struct {
int16 prefix;
uint8 first;
uint8 suffix;
} stbi_gif_lzw;
typedef struct stbi_gif_struct
{
int w,h;
stbi_uc *out; // output buffer (always 4 components)
int flags, bgindex, ratio, transparent, eflags;
uint8 pal[256][4];
uint8 lpal[256][4];
stbi_gif_lzw codes[4096];
uint8 *color_table;
int parse, step;
int lflags;
int start_x, start_y;
int max_x, max_y;
int cur_x, cur_y;
int line_size;
} stbi_gif;
static int gif_test(stbi *s)
{
int sz;
if (get8(s) != 'G' || get8(s) != 'I' || get8(s) != 'F' || get8(s) != '8') return 0;
sz = get8(s);
if (sz != '9' && sz != '7') return 0;
if (get8(s) != 'a') return 0;
return 1;
}
static int stbi_gif_test(stbi *s)
{
int r = gif_test(s);
stbi_rewind(s);
return r;
}
static void stbi_gif_parse_colortable(stbi *s, uint8 pal[256][4], int num_entries, int transp)
{
int i;
for (i=0; i < num_entries; ++i) {
pal[i][2] = get8u(s);
pal[i][1] = get8u(s);
pal[i][0] = get8u(s);
pal[i][3] = transp ? 0 : 255;
}
}
static int stbi_gif_header(stbi *s, stbi_gif *g, int *comp, int is_info)
{
uint8 version;
if (get8(s) != 'G' || get8(s) != 'I' || get8(s) != 'F' || get8(s) != '8')
return stbi_error("not GIF", "Corrupt GIF");
version = get8u(s);
if (version != '7' && version != '9') return stbi_error("not GIF", "Corrupt GIF");
if (get8(s) != 'a') return stbi_error("not GIF", "Corrupt GIF");
failure_reason = "";
g->w = get16le(s);
g->h = get16le(s);
g->flags = get8(s);
g->bgindex = get8(s);
g->ratio = get8(s);
g->transparent = -1;
if (comp != 0) *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments
if (is_info) return 1;
if (g->flags & 0x80)
stbi_gif_parse_colortable(s,g->pal, 2 << (g->flags & 7), -1);
return 1;
}
static int stbi_gif_info_raw(stbi *s, int *x, int *y, int *comp)
{
stbi_gif g;
if (!stbi_gif_header(s, &g, comp, 1)) {
stbi_rewind( s );
return 0;
}
if (x) *x = g.w;
if (y) *y = g.h;
return 1;
}
static void stbi_out_gif_code(stbi_gif *g, uint16 code)
{
uint8 *p, *c;
// recurse to decode the prefixes, since the linked-list is backwards,
// and working backwards through an interleaved image would be nasty
if (g->codes[code].prefix >= 0)
stbi_out_gif_code(g, g->codes[code].prefix);
if (g->cur_y >= g->max_y) return;
p = &g->out[g->cur_x + g->cur_y];
c = &g->color_table[g->codes[code].suffix * 4];
if (c[3] >= 128) {
p[0] = c[2];
p[1] = c[1];
p[2] = c[0];
p[3] = c[3];
}
g->cur_x += 4;
if (g->cur_x >= g->max_x) {
g->cur_x = g->start_x;
g->cur_y += g->step;
while (g->cur_y >= g->max_y && g->parse > 0) {
g->step = (1 << g->parse) * g->line_size;
g->cur_y = g->start_y + (g->step >> 1);
--g->parse;
}
}
}
static uint8 *stbi_process_gif_raster(stbi *s, stbi_gif *g)
{
uint8 lzw_cs;
int32 len, code;
uint32 first;
int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear;
stbi_gif_lzw *p;
lzw_cs = get8u(s);
clear = 1 << lzw_cs;
first = 1;
codesize = lzw_cs + 1;
codemask = (1 << codesize) - 1;
bits = 0;
valid_bits = 0;
for (code = 0; code < clear; code++) {
g->codes[code].prefix = -1;
g->codes[code].first = (uint8) code;
g->codes[code].suffix = (uint8) code;
}
// support no starting clear code
avail = clear+2;
oldcode = -1;
len = 0;
for(;;) {
if (valid_bits < codesize) {
if (len == 0) {
len = get8(s); // start new block
if (len == 0)
return g->out;
}
--len;
bits |= (int32) get8(s) << valid_bits;
valid_bits += 8;
} else {
int32 code = bits & codemask;
bits >>= codesize;
valid_bits -= codesize;
// @OPTIMIZE: is there some way we can accelerate the non-clear path?
if (code == clear) { // clear code
codesize = lzw_cs + 1;
codemask = (1 << codesize) - 1;
avail = clear + 2;
oldcode = -1;
first = 0;
} else if (code == clear + 1) { // end of stream code
skip(s, len);
while ((len = get8(s)) > 0)
skip(s,len);
return g->out;
} else if (code <= avail) {
if (first) return stbi_error_puc("no clear code", "Corrupt GIF");
if (oldcode >= 0) {
p = &g->codes[avail++];
if (avail > 4096) return stbi_error_puc("too many codes", "Corrupt GIF");
p->prefix = (int16) oldcode;
p->first = g->codes[oldcode].first;
p->suffix = (code == avail) ? p->first : g->codes[code].first;
} else if (code == avail) {
return stbi_error_puc("illegal code in raster", "Corrupt GIF");
}
stbi_out_gif_code(g, (uint16) code);
if ((avail & codemask) == 0 && avail <= 0x0FFF) {
codesize++;
codemask = (1 << codesize) - 1;
}
oldcode = code;
} else {
return stbi_error_puc("illegal code in raster", "Corrupt GIF");
}
}
}
}
static void stbi_fill_gif_background(stbi_gif *g)
{
int i;
uint8 *c = g->pal[g->bgindex];
// @OPTIMIZE: write a dword at a time
for (i = 0; i < g->w * g->h * 4; i += 4) {
uint8 *p = &g->out[i];
p[0] = c[2];
p[1] = c[1];
p[2] = c[0];
p[3] = c[3];
}
}
// this function is designed to support animated gifs, although stb_image doesn't support it
static uint8 *stbi_gif_load_next(stbi *s, stbi_gif *g, int *comp, int req_comp)
{
int i;
uint8 *old_out = 0;
if (g->out == 0) {
if (!stbi_gif_header(s, g, comp,0)) return 0; // failure_reason set by stbi_gif_header
g->out = (uint8 *) malloc(4 * g->w * g->h);
if (g->out == 0) return stbi_error_puc("outofmem", "Out of memory");
stbi_fill_gif_background(g);
} else {
// animated-gif-only path
if (((g->eflags & 0x1C) >> 2) == 3) {
old_out = g->out;
g->out = (uint8 *) malloc(4 * g->w * g->h);
if (g->out == 0) return stbi_error_puc("outofmem", "Out of memory");
memcpy(g->out, old_out, g->w*g->h*4);
}
}
for (;;) {
switch (get8(s)) {
case 0x2C: /* Image Descriptor */
{
int32 x, y, w, h;
uint8 *o;
x = get16le(s);
y = get16le(s);
w = get16le(s);
h = get16le(s);
if (((x + w) > (g->w)) || ((y + h) > (g->h)))
return stbi_error_puc("bad Image Descriptor", "Corrupt GIF");
g->line_size = g->w * 4;
g->start_x = x * 4;
g->start_y = y * g->line_size;
g->max_x = g->start_x + w * 4;
g->max_y = g->start_y + h * g->line_size;
g->cur_x = g->start_x;
g->cur_y = g->start_y;
g->lflags = get8(s);
if (g->lflags & 0x40) {
g->step = 8 * g->line_size; // first interlaced spacing
g->parse = 3;
} else {
g->step = g->line_size;
g->parse = 0;
}
if (g->lflags & 0x80) {
stbi_gif_parse_colortable(s,g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1);
g->color_table = (uint8 *) g->lpal;
} else if (g->flags & 0x80) {
for (i=0; i < 256; ++i) // @OPTIMIZE: reset only the previous transparent
g->pal[i][3] = 255;
if (g->transparent >= 0 && (g->eflags & 0x01))
g->pal[g->transparent][3] = 0;
g->color_table = (uint8 *) g->pal;
} else {
return stbi_error_puc("missing color table", "Corrupt GIF");
}
o = stbi_process_gif_raster(s, g);
if (o == NULL) return NULL;
if (req_comp && req_comp != 4)
o = convert_format(o, 4, req_comp, g->w, g->h);
return o;
}
case 0x21: // Comment Extension.
{
int len;
if (get8(s) == 0xF9) { // Graphic Control Extension.
len = get8(s);
if (len == 4) {
g->eflags = get8(s);
get16le(s); // delay
g->transparent = get8(s);
} else {
skip(s, len);
break;
}
}
while ((len = get8(s)) != 0)
skip(s, len);
break;
}
case 0x3B: // gif stream termination code
return (uint8 *) 1;
default:
return stbi_error_puc("unknown code", "Corrupt GIF");
}
}
}
static stbi_uc *stbi_gif_load(stbi *s, int *x, int *y, int *comp, int req_comp)
{
uint8 *u = 0;
stbi_gif g={0};
u = stbi_gif_load_next(s, &g, comp, req_comp);
if (u == (void *) 1) u = 0; // end of animated gif marker
if (u) {
*x = g.w;
*y = g.h;
}
return u;
}
static int stbi_gif_info(stbi *s, int *x, int *y, int *comp)
{
return stbi_gif_info_raw(s,x,y,comp);
}
#endif // ! STBI_NO_GIF
// *************************************************************************************************
// Radiance RGBE HDR loader
// originally by Nicolas Schulz
#ifndef STBI_NO_HDR
static int hdr_test(stbi *s)
{
const char *signature = "#?RADIANCE\n";
int i;
for (i=0; signature[i]; ++i)
if (get8(s) != signature[i])
return 0;
return 1;
}
static int stbi_hdr_test(stbi* s)
{
int r = hdr_test(s);
stbi_rewind(s);
return r;
}
#define STBI_HDR_BUFLEN 1024
static char *hdr_gettoken(stbi *z, char *buffer)
{
int len=0;
char c = '\0';
c = (char) get8(z);
while (!at_eof(z) && c != '\n') {
buffer[len++] = c;
if (len == STBI_HDR_BUFLEN-1) {
// flush to end of line
while (!at_eof(z) && get8(z) != '\n')
;
break;
}
c = (char) get8(z);
}
buffer[len] = 0;
return buffer;
}
static void hdr_convert(float *output, stbi_uc *input, int req_comp)
{
if ( input[3] != 0 ) {
float f1;
// Exponent
f1 = (float) ldexp(1.0f, input[3] - (int)(128 + 8));
if (req_comp <= 2) {
output[0] = (input[0] + input[1] + input[2]) * f1 / 3;
} else {
output[0] = input[0] * f1;
output[1] = input[1] * f1;
output[2] = input[2] * f1;
}
if (req_comp == 2) output[1] = 1;
if (req_comp == 4) output[3] = 1;
} else {
switch (req_comp) {
case 4: output[3] = 1; /* fallthrough */
case 3: output[0] = output[1] = output[2] = 0;
break;
case 2: output[1] = 1; /* fallthrough */
case 1: output[0] = 0;
break;
}
}
}
static float *hdr_load(stbi *s, int *x, int *y, int *comp, int req_comp)
{
char buffer[STBI_HDR_BUFLEN];
char *token;
int valid = 0;
int width, height;
stbi_uc *scanline;
float *hdr_data;
int len;
unsigned char count, value;
int i, j, k, c1,c2, z;
// Check identifier
if (strcmp(hdr_gettoken(s,buffer), "#?RADIANCE") != 0)
return stbi_error_pf("not HDR", "Corrupt HDR image");
// Parse header
for(;;) {
token = hdr_gettoken(s,buffer);
if (token[0] == 0) break;
if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1;
}
if (!valid) return stbi_error_pf("unsupported format", "Unsupported HDR format");
// Parse width and height
// can't use sscanf() if we're not using stdio!
token = hdr_gettoken(s,buffer);
if (strncmp(token, "-Y ", 3)) return stbi_error_pf("unsupported data layout", "Unsupported HDR format");
token += 3;
height = strtol(token, &token, 10);
while (*token == ' ') ++token;
if (strncmp(token, "+X ", 3)) return stbi_error_pf("unsupported data layout", "Unsupported HDR format");
token += 3;
width = strtol(token, NULL, 10);
*x = width;
*y = height;
*comp = 3;
if (req_comp == 0) req_comp = 3;
// Read data
hdr_data = (float *) malloc(height * width * req_comp * sizeof(*hdr_data));
// Load image data
// image data is stored as some number of sca
if ( width < 8 || width >= 32768) {
// Read flat data
for (j=0; j < height; ++j) {
for (i=0; i < width; ++i) {
stbi_uc rgbe[4];
main_decode_loop:
getn(s, rgbe, 4);
hdr_convert(hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp);
}
}
} else {
// Read RLE-encoded data
scanline = NULL;
for (j = 0; j < height; ++j) {
c1 = get8(s);
c2 = get8(s);
len = get8(s);
if (c1 != 2 || c2 != 2 || (len & 0x80)) {
// not run-length encoded, so we have to actually use THIS data as a decoded
// pixel (note this can't be a valid pixel--one of RGB must be >= 128)
uint8 rgbe[4];
rgbe[0] = (uint8) c1;
rgbe[1] = (uint8) c2;
rgbe[2] = (uint8) len;
rgbe[3] = (uint8) get8u(s);
hdr_convert(hdr_data, rgbe, req_comp);
i = 1;
j = 0;
free(scanline);
goto main_decode_loop; // yes, this makes no sense
}
len <<= 8;
len |= get8(s);
if (len != width) { free(hdr_data); free(scanline); return stbi_error_pf("invalid decoded scanline length", "corrupt HDR"); }
if (scanline == NULL) scanline = (stbi_uc *) malloc(width * 4);
for (k = 0; k < 4; ++k) {
i = 0;
while (i < width) {
count = get8u(s);
if (count > 128) {
// Run
value = get8u(s);
count -= 128;
for (z = 0; z < count; ++z)
scanline[i++ * 4 + k] = value;
} else {
// Dump
for (z = 0; z < count; ++z)
scanline[i++ * 4 + k] = get8u(s);
}
}
}
for (i=0; i < width; ++i)
hdr_convert(hdr_data+(j*width + i)*req_comp, scanline + i*4, req_comp);
}
free(scanline);
}
return hdr_data;
}
static float *stbi_hdr_load(stbi *s, int *x, int *y, int *comp, int req_comp)
{
return hdr_load(s,x,y,comp,req_comp);
}
static int stbi_hdr_info(stbi *s, int *x, int *y, int *comp)
{
char buffer[STBI_HDR_BUFLEN];
char *token;
int valid = 0;
if (strcmp(hdr_gettoken(s,buffer), "#?RADIANCE") != 0) {
stbi_rewind( s );
return 0;
}
for(;;) {
token = hdr_gettoken(s,buffer);
if (token[0] == 0) break;
if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1;
}
if (!valid) {
stbi_rewind( s );
return 0;
}
token = hdr_gettoken(s,buffer);
if (strncmp(token, "-Y ", 3)) {
stbi_rewind( s );
return 0;
}
token += 3;
*y = strtol(token, &token, 10);
while (*token == ' ') ++token;
if (strncmp(token, "+X ", 3)) {
stbi_rewind( s );
return 0;
}
token += 3;
*x = strtol(token, NULL, 10);
*comp = 3;
return 1;
}
#endif // STBI_NO_HDR
static int stbi_bmp_info(stbi *s, int *x, int *y, int *comp)
{
int hsz;
if (get8(s) != 'B' || get8(s) != 'M') {
stbi_rewind( s );
return 0;
}
skip(s,12);
hsz = get32le(s);
if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108) {
stbi_rewind( s );
return 0;
}
if (hsz == 12) {
*x = get16le(s);
*y = get16le(s);
} else {
*x = get32le(s);
*y = get32le(s);
}
if (get16le(s) != 1) {
stbi_rewind( s );
return 0;
}
*comp = get16le(s) / 8;
return 1;
}
static int stbi_psd_info(stbi *s, int *x, int *y, int *comp)
{
int channelCount;
if (get32(s) != 0x38425053) {
stbi_rewind( s );
return 0;
}
if (get16(s) != 1) {
stbi_rewind( s );
return 0;
}
skip(s, 6);
channelCount = get16(s);
if (channelCount < 0 || channelCount > 16) {
stbi_rewind( s );
return 0;
}
*y = get32(s);
*x = get32(s);
if (get16(s) != 8) {
stbi_rewind( s );
return 0;
}
if (get16(s) != 3) {
stbi_rewind( s );
return 0;
}
*comp = 4;
return 1;
}
static int stbi_pic_info(stbi *s, int *x, int *y, int *comp)
{
int act_comp=0,num_packets=0,chained;
pic_packet_t packets[10];
skip(s, 92);
*x = get16(s);
*y = get16(s);
if (at_eof(s)) return 0;
if ( (*x) != 0 && (1 << 28) / (*x) < (*y)) {
stbi_rewind( s );
return 0;
}
skip(s, 8);
do {
pic_packet_t *packet;
if (num_packets==sizeof(packets)/sizeof(packets[0]))
return 0;
packet = &packets[num_packets++];
chained = get8(s);
packet->size = get8u(s);
packet->type = get8u(s);
packet->channel = get8u(s);
act_comp |= packet->channel;
if (at_eof(s)) {
stbi_rewind( s );
return 0;
}
if (packet->size != 8) {
stbi_rewind( s );
return 0;
}
} while (chained);
*comp = (act_comp & 0x10 ? 4 : 3);
return 1;
}
static int stbi_info_main(stbi *s, int *x, int *y, int *comp, int *fmt)
{
if (stbi_jpeg_test(s)) { *fmt = STBI_jpeg; return stbi_jpeg_info(s, x, y, comp); }
if (stbi_png_test(s)) { *fmt = STBI_png; return stbi_png_info(s, x, y, comp); }
#ifndef STBI_NO_GIF
if (stbi_gif_test(s)) { *fmt = STBI_gif; return stbi_gif_info(s, x, y, comp); }
#endif // !STBI_NO_GIF
if (stbi_bmp_test(s)) { *fmt = STBI_bmp; return stbi_bmp_info(s, x, y, comp); }
if (stbi_psd_test(s)) { *fmt = STBI_psd; return stbi_psd_info(s, x, y, comp); }
if (stbi_pic_test(s)) { *fmt = STBI_pic; return stbi_pic_info(s, x, y, comp); }
#ifndef STBI_NO_HDR
if (stbi_hdr_test(s)) { *fmt = STBI_hdr; return stbi_hdr_info(s, x, y, comp); }
#endif
// test tga last because it's a crappy test!
if (stbi_tga_test(s)) { *fmt = STBI_tga; return stbi_tga_info(s, x, y, comp); }
*fmt = STBI_unknown;
return stbi_error("unknown image type", "Image not of any known type, or corrupt");
}
#ifndef STBI_NO_STDIO
int stbi_info(char const *filename, int *x, int *y, int *comp, int *fmt)
{
FILE *f = fopen(filename, "rb");
int result;
if (!f) return stbi_error("can't fopen", "Unable to open file");
result = stbi_info_from_file(f, x, y, comp, fmt);
fclose(f);
return result;
}
int stbi_info_from_file(FILE *f, int *x, int *y, int *comp, int *fmt)
{
int r;
stbi s;
long pos = ftell(f);
start_file(&s, f);
r = stbi_info_main(&s,x,y,comp,fmt);
fseek(f,pos,SEEK_SET);
return r;
}
#endif // !STBI_NO_STDIO
int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int *fmt)
{
stbi s;
start_mem(&s,buffer,len);
return stbi_info_main(&s,x,y,comp,fmt);
}
#ifndef STBI_NO_CALLBACK
int stbi_info_from_callbacks(stbi_io_callbacks const *c, void *user, int *x, int *y, int *comp, int *fmt)
{
stbi s;
start_callbacks(&s, (stbi_io_callbacks *) c, user);
return stbi_info_main(&s,x,y,comp,fmt);
}
#endif // !STBI_NO_CALLBACK
} // namespace stbi<|fim▁end|> | return feof((FILE*) user); |
<|file_name|>TestErfcRelaxed.rs<|end_file_name|><|fim▁begin|><|fim▁hole|> * you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Don't edit this file! It is auto-generated by frameworks/rs/api/generate.sh.
#include "TestErfc.rs"
#pragma rs_fp_relaxed<|fim▁end|> | /*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License"); |
<|file_name|>gulpfile.js<|end_file_name|><|fim▁begin|>var gulp = require('gulp')
var mocha = require('gulp-mocha')
var nodemon = require('gulp-nodemon')
var env = require('gulp-env');
gulp.task('API-Server', (cb) => {
let started = false
env({
vars: {
httpPort: 8080
}
});
<|fim▁hole|> })
.on('start', () => {
if (!started) {
started = true
return cb()
}
})
.on('restart', () => {
console.log('restarting')
})
})
gulp.task('test', ['API-Server'], function() {
return gulp.src('./test/index.js')
.pipe(mocha())
.once('error', function() {
process.exit(1)
})
.once('end', function() {
process.exit()
})
})<|fim▁end|> | return nodemon({
script: 'index.js' |
<|file_name|>Controller.java<|end_file_name|><|fim▁begin|>package uk.co.lucelle;
import org.springframework.web.bind.annotation.*;
@RestController
public class Controller {
@RequestMapping("/")
public @ResponseBody String index(@RequestBody String data) {
// echo<|fim▁hole|>
}<|fim▁end|> | return data;
} |
<|file_name|>Crafting.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
'''
Copyright (c) 2012 Jeremy Parks ( xanthic.9478 )
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Purpose: Generates a crafting guide based on current market prices
'''
import urllib, json, time, threading, datetime, math
import items_l, name_list
from Queue import Queue
from collections import defaultdict
from ftplib import FTP
# FTP Login
ftp_url = "text"
ftp_user = "goes"
ftp_pass = "here"
# Dictionary of all the items we need prices of
recipeDict = items_l.items
# list of items we compute the cost of that is used by every craft
'''
Dictionary structure
Key: tier(t1,t2,t3,t4,t5)
Key: ore, wood, cloth, leather, bone, claw, fang, scale, totem, venom, blood, ingot, plank, bolt, dowel, plated_dowel, thread, small_haft, large_haft, leather_section, string, lump(sometimes)
name: full name
cost: buy cost or computed make cost
recipe: items to build, or None for base items
'''
items = {}
insigs = {}
total_buy = defaultdict(int)
total_cost = 0
# Store our xp needed to reach each level of crafting
xp_to_level = [0]
#threaded function to get info about an item
class getItem(threading.Thread):
def __init__(self,itemid,tier,sub_type,level,name):
self.url1 = "http://www.gw2spidy.com/api/v0.9/json/item/"
self.url2 = "http://www.guildwarstrade.com/api/public/item?id="
self.itemid = itemid
self.tier = tier
self.sub_type = sub_type
self.level = level
self.nm = name
self.result = None
threading.Thread.__init__(self)
def get_result(self):
self.result['result']['tier'] = self.tier
self.result['result']['sub_type'] = self.sub_type
if not self.level == None:
self.result['result']['level'] = self.level
self.result['result']['sname'] = self.nm
return self.result
# Function for Guildwarstrade prices
def gwt(self,result,item):
f = json.load(result)
self.result = {}
self.result['result'] = {}
self.result['result']['min_sale_unit_price'] = f['sell']
self.result['result']['name'] = name_list.names[item]
self.result['result']['data_id'] = item
def run(self):
while(1):
try:
f = urllib.urlopen(self.url1+self.itemid)
self.result = json.load(f)
break
except Exception, err:
print 'ERROR: %s. Trying backup website.\n' % str(err)#"Error getting url, trying again "+ self.url + self.item
try:
f = urllib.urlopen(self.url2+self.itemid)
self.gwt(f,self.itemid)
break
except Exception, err:
print 'ERROR: %s. Backup website failed.\n' % str(err)#"Error getting url, trying again "+ self.url + self.item
time.sleep(1)
# Get our item data using threads and a Queue
def getItemDict():
def producer(q):
for tier in recipeDict:
for sub_type in recipeDict[tier]:
thread = getItem(recipeDict[tier][sub_type],tier,sub_type,None,None)
thread.start()
q.put(thread,True)
def consumer(q):
num = 1
den = 0
for tier in recipeDict:
den += len(recipeDict[tier])
while num <= den:
thread = q.get(True)
thread.join()
tmp = thread.get_result()
items.setdefault(tmp['result']['tier'],{})
items[tmp['result']['tier']].setdefault(tmp['result']['sub_type'],{})
items[tmp['result']['tier']][tmp['result']['sub_type']]['name'] = tmp['result']['name']
items[tmp['result']['tier']][tmp['result']['sub_type']]['cost'] = tmp['result']['min_sale_unit_price']
items[tmp['result']['tier']][tmp['result']['sub_type']]['recipe'] = None
print str(num) +' of '+ str(den)
num += 1
q = Queue(3)
p_thread = threading.Thread(target=producer, args=(q,))
c_thread = threading.Thread(target=consumer, args=(q,))
p_thread.start()
c_thread.start()
p_thread.join()
c_thread.join()
# Get our insignia price data using threads and a Queue
def getInsigDict():
def producer2(q):
for tier in items_l.insig_list:
for sub_type in items_l.insig_list[tier]:
for level in items_l.insig_list[tier][sub_type]:
for name in items_l.insig_list[tier][sub_type][level]:
thread = getItem(items_l.insig_list[tier][sub_type][level][name],tier,sub_type,level,name)
thread.start()
q.put(thread,True)
def consumer2(q):
num = 1
den = 0
for tier in items_l.insig_list:
for sub_type in items_l.insig_list[tier]:
for level in items_l.insig_list[tier][sub_type]:
den += len(items_l.insig_list[tier][sub_type][level])
while num <= den:
thread = q.get(True)
thread.join()
tmp = thread.get_result()
tier = tmp['result']['tier']
sub_type = tmp['result']['sub_type']
level = tmp['result']['level']
name = tmp['result']['sname']
insigs.setdefault(tier,{})
insigs[tier].setdefault(sub_type,{})
insigs[tier][sub_type].setdefault(level,{})
insigs[tier][sub_type][level].setdefault(name,{})
insigs[tier][sub_type][level][name]['name'] = tmp['result']['name']
insigs[tier][sub_type][level][name]['cost'] = tmp['result']['min_sale_unit_price']
insigs[tier][sub_type][level][name]['recipe'] = None
print str(num) +' of '+ str(den)
num += 1
q = Queue(3)
p_thread = threading.Thread(target=producer2, args=(q,))
c_thread = threading.Thread(target=consumer2, args=(q,))
p_thread.start()
c_thread.start()
p_thread.join()
c_thread.join()
# add some costs to the dict
def appendCosts():
items['t1']['thread'] = {'name':'Spool of Jute Thread','cost':8,'recipe':None}
items['t1']['lump'] = {'name':'Lump of Tin','cost':8,'recipe':None}
items['t2']['thread'] = {'name':'Spool of Wool Thread','cost':16,'recipe':None}
items['t3']['thread'] = {'name':'Spool of Cotton Thread','cost':24,'recipe':None}
items['t3']['lump'] = {'name':'Lump of Coal','cost':16,'recipe':None}
items['t4']['thread'] = {'name':'Spool of Linen Thread','cost':32,'recipe':None}
items['t4']['lump'] = {'name':'Lump of Primordium','cost':48,'recipe':None}
items['t5']['thread'] = {'name':'Spool of Silk Thread','cost':48,'recipe':None}
# generate placeholders in items for parts
for tier in items:
for o in ['ingot','plank','bolt','dowel','plated_dowel','leather_section','small_haft','large_haft','string']:
items[tier][o] = {'name':tier+'_'+o,'cost':0,'recipe':items_l.itec[tier][o]}
for t in items[tier][o]['recipe']:
items[tier][o]['cost'] += items[tier][o]['recipe'][t]*items[tier][t]['cost']
<|fim▁hole|> items[tier]['insig'] = {'fine1':{},'fine2':{},'master':{}}
if tier == 't5':
items[tier]['insc']['rare'] = {}
items[tier]['insig']['rare'] = {}
for typ in ['insc','insig']:
for stier in items_l.itec[tier][typ]:
for keyv in items_l.itec[tier][typ][stier]:
items[tier][typ][stier][keyv] = {'name':tier+'_'+keyv,'cost':0,'recipe':items_l.itec[tier][typ][stier][keyv]}
for o in items[tier][typ][stier][keyv]['recipe']:
items[tier][typ][stier][keyv]['cost'] += items[tier][typ][stier][keyv]['recipe'][o]*items[tier][o]['cost']
for o in items_l.wc:
items[tier][o] = {'name':tier+'_'+o,'cost':0,'recipe':items_l.wc[o]}
for t in items[tier][o]['recipe']:
items[tier][o]['cost'] += items[tier][o]['recipe'][t]*items[tier][t]['cost']
for o in items_l.ht:
items[tier][o] = {'name':tier+'_'+o,'cost':0,'recipe':items_l.ht[o]}
for t in items[tier][o]['recipe']:
items[tier][o]['cost'] += items[tier][o]['recipe'][t]*items[tier][t]['cost']
for o in items_l.ac:
items[tier][o] = {'name':tier+'_ac_'+o,'cost':0,'recipe':items_l.ac[o]}
for t in items[tier][o]['recipe']:
items[tier][o]['cost'] += items[tier][o]['recipe'][t]*items[tier][t]['cost']
for o in items_l.lw:
items[tier][o] = {'name':tier+'_lw_'+o,'cost':0,'recipe':items_l.lw[o]}
for t in items[tier][o]['recipe']:
items[tier][o]['cost'] += items[tier][o]['recipe'][t]*items[tier][t]['cost']
for o in items_l.tl:
items[tier][o] = {'name':tier+'_tl_'+o,'cost':0,'recipe':items_l.tl[o]}
for t in items[tier][o]['recipe']:
items[tier][o]['cost'] += items[tier][o]['recipe'][t]*items[tier][t]['cost']
# Format copper values so they are easier to read
def mFormat(line):
line = int(line)
tmp = ''
rStr = ''
if line < 0:
tmp = '-'
line *= -1
mStr = str(line)
mLen = len(mStr)
if mLen > 4:
rStr += "%2dg" % int(mStr[0:mLen-4])
if mLen > 3:
rStr += '%2ds' % int(mStr[mLen-4:mLen-2])
elif mLen == 3:
rStr += '%2ds' % int(mStr[mLen-3:mLen-2])
if mLen == 1:
rStr += '%2dc' % int(mStr)
else:
rStr += '%2dc' % int(mStr[mLen-2:])
return tmp + rStr
def xpreq(level):
tmp = 500
for i in range(1,level):
tmp = math.floor(tmp * 1.01)
return tmp
def xpgain(_level,_type,_min):
span = 0.0
gain = 0.0
if _type == 1: # refinement
span = 25.0
mult = .3
if _type == 2: # part
span = 25.0
mult = .6
if _type == 3: # item
span = 40.0
mult = 1.4
# xp_gain(N) = xp_req(N+1) * multiplier * (1.0 - (N - N_min) / span)
gain = xpreq(_level+1) * mult * (1.0 - (_level - _min) / span)
return math.ceil(gain)
base_level = {'t1':0,'t2':75,'t3':150,'t4':225,'t5':300}
mod_level = {'fine1':0,'fine2':25,'master':50,'rare':75}
# Helper class to compute a tier of crafting
class craftTier:
# takes current level and xp as input
def __init__(self,xp):
self.xp = xp
self.cost = 0
self.log = []
# returns cost and make log for this tier
def getResult(self):
return self.cost, self.log, self.xp
# compute what level would be after crafting items, assume order is refine > p_discovery > parts > discovery
def compute_level(self,_xp, refine, part, discovery,tier_mult,tlvl):
level = tlvl
while xp_to_level[level] < _xp:
level += 1
for i in range(0,int(refine)):
_xp += tier_mult*xpgain(level,1,tlvl)
while xp_to_level[level] < _xp:
level += 1
for i in range(0,part):
_xp += tier_mult*xpgain(level,2,tlvl)
while xp_to_level[level] < _xp:
level += 1
for i in range(0,discovery):
_xp += (tier_mult+1)*xpgain(level,3,tlvl)
while xp_to_level[level] < _xp:
level += 1
return _xp
# calculate the xp per gold given the following items
# cost || num refines/level, num parts/level1, num pars/level2, num insig/level(discovery is assumed same level)
def xp_calc(self,refines,parts,plated,base_level,insig_level,mod):
weight = 0.0
weight += xpgain(base_level,1,base_level)*refines
weight += xpgain(base_level,1,base_level)*parts
weight += xpgain(base_level,1,base_level+50)*plated*2
weight += xpgain(base_level,1,insig_level)*mod # insignia
weight += xpgain(base_level,1,insig_level)*(1+mod) # discovery
return weight
# recursively compute the number of refinements in a recipe
def calcRefine(self,recipe,count,tier):
if items[tier][recipe]['recipe'] == None:
return 0.0
if recipe == 'ingot' and tier == 't1':
return count/5.0
if recipe in ['ingot','plank','bolt','leather_section']:
return count
r = 0.0
for item in items[tier][recipe]['recipe']:
r += self.calcRefine(item,items[tier][recipe]['recipe'][item],tier)*count
return r
# compute the costs for our current tier
# requires flag, tier and base item list
def generateTier(self,pList, flag, tier):
# things to buy
buy = defaultdict(int)
# parts to make
make = defaultdict(int)
# what tiers we are crafing
lvls = ['fine1','fine2','master']
if tier == 't5':
lvls.append('rare')
# dictionary to hold all our costs
costs = {}
# Dict to hold craft queue while we compute what to make
craft_queue = {}
#figure out the cost lists for the tiers we are working with
for lvl in lvls:
scost = {}
for item in pList:
if not (('helm' in item and tier == 't1' and lvl == 'fine1') or ('shoulder' in item and tier == 't1' and lvl in ['fine1','fine2'])):
mod = 1.0
refines = 1 # default for insignia
parts = 0
plated = 0
if 'fine' in lvl:
if flag == 'insc':
parts = 1
elif lvl == 'master':
mod = 2.0
if flag == 'insc':
refines = 5
plated = 1
else: # lvl == 'rare':
mod = 3.25
if flag == 'insc':
plated = 2
refines = 10
#adjust for copper ore refines
if tier == 't1' and plated > 0:
refines = math.ceil(plated*2.6)
make_w = self.xp_calc(refines,parts,plated,base_level[tier],base_level[tier]+mod_level[lvl],mod)
buy_w = self.xp_calc(0,0,0,base_level[tier],base_level[tier]+mod_level[lvl],mod)
for insig in items[tier][flag][lvl]:
mcost = items[tier][item]['cost']+items[tier][flag][lvl][insig]['cost']
bcost = items[tier][item]['cost']+insigs[tier][flag][lvl][insig]['cost']
if make_w/float(mcost) > buy_w/float(bcost):
scost[tier+'_'+lvl+'_'+insig+'{craft)_'+item] = {'cost':mcost,'part':items[tier][item]['recipe'],'insig':items[tier][flag][lvl][insig]['recipe'],'weight':make_w/float(mcost),'part_name':item,'insig_name':lvl+'_'+insig}
else:
scost[tier+'_'+lvl+'_'+insig+'(buy)_'+item] = {'cost':bcost,'part':items[tier][item]['recipe'],'insig':None,'weight':buy_w/float(bcost),'part_name':item,'insig_name':insig}
costs[lvl] = scost
craft_queue[lvl] = sorted(costs[lvl], key=lambda k: costs[lvl][k]['weight'], reverse=True)
# queue for printing make order
make_q = {}
cqc = {}
# copy craft_queue
for lvl in lvls:
make_q[lvl] = []
cqc[lvl] = craft_queue[lvl][:]
_xp = xp_to_level[base_level[tier]+mod_level['rare']]
refine = 0.0
part = 0
plated = 0
insc = 0
discovery = 0
# fill rare(if needed)
if tier == 't5':
_xp = xp_to_level[375]
while self.compute_level(_xp, 0, insc, discovery,3.25,base_level[tier]+mod_level['rare']) < xp_to_level[base_level[tier]+mod_level['rare'] + 25]:
item = cqc['rare'].pop(0)
make_q['rare'].append(item)
for sitem in costs['rare'][item]['part']:
refine += self.calcRefine(sitem,costs['rare'][item]['part'][sitem],tier)
discovery += 1
part += 2
if not costs['rare'][item]['insig'] == None:
for sitem in costs['rare'][item]['insig']:
refine += self.calcRefine(sitem,costs['rare'][item]['insig'][sitem],tier)
if flag == 'insc':
plated += 2
else:
part += 2
insc += 1
insc = 0
discovery = 0
spart = 0 # shoulders are 50 skill in tier 1
hpart = 0 # helmets are 25 points in tier 1
_xp = xp_to_level[base_level[tier]+mod_level['master']]
# fill master
while self.compute_level(_xp, 0, spart+insc+plated, discovery,2.0,base_level[tier]+mod_level['master']) < xp_to_level[base_level[tier]+mod_level['master'] + 25]:
item = cqc['master'].pop(0)
make_q['master'].append(item)
if tier == 't1' and 'shoulder' in item:
spart += 2
part -= 2
elif tier == 't1' and 'helm' in item:
hpart += 2
part -= 2
discovery += 1
part += 2
for sitem in costs['master'][item]['part']:
refine += self.calcRefine(sitem,costs['master'][item]['part'][sitem],tier)
if not costs['master'][item]['insig'] == None:
for sitem in costs['master'][item]['insig']:
refine += self.calcRefine(sitem,costs['master'][item]['insig'][sitem],tier)
if flag == 'insc':
plated += 1
else:
part += 1
insc += 1
insc = 0
discovery = 0
_xp = xp_to_level[base_level[tier]+mod_level['fine2']]
# fill fine2
while self.compute_level(_xp, 0, hpart+insc, discovery,1.0,base_level[tier]+mod_level['fine2']) < xp_to_level[base_level[tier]+mod_level['fine2'] + 25]:
item = cqc['fine2'].pop(0)
make_q['fine2'].append(item)
if tier == 't1' and 'helm' in item:
hpart += 2
part -= 2
discovery += 1
part += 2
for sitem in costs['fine2'][item]['part']:
refine += self.calcRefine(sitem,costs['fine2'][item]['part'][sitem],tier)
if not costs['fine2'][item]['insig'] == None:
for sitem in costs['fine2'][item]['insig']:
refine += self.calcRefine(sitem,costs['fine2'][item]['insig'][sitem],tier)
part += 1
insc += 1
insc = 0
discovery = 0
_xp = xp_to_level[base_level[tier]+mod_level['fine1']]
# fill fine1
while self.compute_level(_xp, math.ceil(refine), part+insc, discovery,1.0,base_level[tier]) < xp_to_level[base_level[tier] + 25]:
item = cqc['fine1'].pop(0)
make_q['fine1'].append(item)
part += 2
discovery += 1
for sitem in costs['fine1'][item]['part']:
refine += self.calcRefine(sitem,costs['fine1'][item]['part'][sitem],tier)
if not costs['fine1'][item]['insig'] == None:
for sitem in costs['fine1'][item]['insig']:
refine += self.calcRefine(sitem,costs['fine1'][item]['insig'][sitem],tier)
part += 1
insc += 1
# start loop
# recopy queue, empty make_q
# fill fine1 (assume fine2, master, rare unchanged) : refine > parts/dowels > fine1_insc > fine1_discovery
# fill fine2 (assume nothing) : fine2_fine1_discovery > fine2_insc > fine2_discovery
# fill master (assume rare is unchanged) master_fine2_discovery > plated_dowels > master_insc > master_discovery
# fill rare rare_master_discovery > rare_insc > rare_discovery
# end loop if same result as last time(check first item in each tier of copied queue)
t_buff = []
for ll in lvls:
t_buff.append('\nLevel: %i' % (base_level[tier]+mod_level[ll]))
l_queue = []
for o in sorted(make_q[ll]):
t_buff.append(str(o))
self.cost += costs[ll][o]['cost']
make[costs[ll][o]['part_name']] += 1
for item in costs[ll][o]['part']:
if items[tier][item]['recipe'] == None:
buy[item] += costs[ll][o]['part'][item]
else:
make[item] += costs[ll][o]['part'][item]
l_queue.append((item,costs[ll][o]['part'][item]))
if costs[ll][o]['insig'] == None:
buy[ll+'_'+costs[ll][o]['insig_name']] += 1
else:
make[costs[ll][o]['insig_name']] += 1
for item in costs[ll][o]['insig']:
if items[tier][item]['recipe'] == None:
buy[item] += costs[ll][o]['insig'][item]
else:
make[item] += costs[ll][o]['insig'][item]
l_queue.append((item,costs[ll][o]['insig'][item]))
while l_queue:
item, count = l_queue.pop()
if items[tier][item]['recipe'] == None:
buy[item] += count
else:
for sitem in items[tier][item]['recipe']:
if items[tier][sitem]['recipe'] == None:
buy[sitem] += count*items[tier][item]['recipe'][sitem]
else:
make[sitem] += items[tier][item]['recipe'][sitem]*count
l_queue.append((sitem,items[tier][item]['recipe'][sitem]*count))
self.log.append('\n***BUY***\n')
bcost = 0
for line in sorted(buy, key=lambda k: buy[k], reverse=True):
if line in items[tier]:
total_buy[items[tier][line]['name']] += buy[line]
self.log.append("%3i - %s (%s per)\n"% (buy[line],items[tier][line]['name'],mFormat(items[tier][line]['cost'])))
bcost += items[tier][line]['cost']*buy[line]
else:
t = line.split('_')
total_buy[insigs[tier][flag][t[0]][t[1]]['name']] += buy[line]
self.log.append("%3i - %s (%s per)\n"% (buy[line],insigs[tier][flag][t[0]][t[1]]['name'],mFormat(insigs[tier][flag][t[0]][t[1]]['cost'])))
bcost += insigs[tier][flag][t[0]][t[1]]['cost']*buy[line]
global total_cost
total_cost += bcost
self.log.append('Cost for this tier: %s\n' % mFormat(bcost))
self.log.append('\n***MAKE***'+'\n')
sub = {}
for line in sorted(make, key=lambda k: make[k], reverse=True):
if 'fine1_' not in line and 'fine2_' not in line and 'master_' not in line and 'rare_' not in line:
self.log.append("%3i - %s\n"% (make[line],line))
else:
sub[line] = make[line]
for line in sorted(sub):
self.log.append("%3i - %s (%s per)\n"% (sub[line],line, mFormat(items[tier][flag][line.split('_')[0]][line.split('_')[1]] ['cost'])))
for line in t_buff:
self.log.append(line+'\n')
# Compute the costs for leveling a craft
def computeCraft(pList,fname,flag):
with open(fname,'wb') as f:
f.write('Last updated: '+datetime.datetime.now().strftime('%H:%M:%S %m-%d-%Y')+' PST\n')
totalcost = 0
tnum = 0
xp = 0
for tier in ['t1','t2','t3','t4','t5']:
c_tier = craftTier(xp)
c_tier.generateTier(pList,flag,tier)
cost, log, xp = c_tier.getResult()
totalcost += cost
for line in log:
f.write(line)
f.write(fname+':'+mFormat(totalcost))
def main():
# populate the xp chart
for i in range(1,441):
xp_to_level.append(xpreq(i)+xp_to_level[i-1])
getItemDict()
appendCosts()
getInsigDict()
computeCraft(items_l.wc,"Weapon2.txt",'insc')
computeCraft(items_l.ht,"Huntsman2.txt",'insc')
computeCraft(items_l.ac,"Armorcraft2.txt",'insig')
computeCraft(items_l.lw,"Leatherwork2.txt",'insig')
computeCraft(items_l.tl,"Tailor2.txt",'insig')
with open("Totals.txt", 'wb') as f:
for line in sorted(total_buy):
f.write("%4i - %s \n"% (total_buy[line],line))
f.write("Total cost: %s" % mFormat(total_cost))
# myFtp = FTP(ftp_url)
# myFtp.login(ftp_user,ftp_pass)
# for item in ["Weapon2.txt","Huntsman2.txt","Armorcraft2.txt","Leatherwork2.txt","Tailor2.txt","Totals.txt"]:
# with open(item,'rb') as f:
# myFtp.storbinary('STOR '+item,f)
# myFtp.close()
# If ran directly, call main
if __name__ == '__main__':
main()<|fim▁end|> | items[tier]['insc'] = {'fine1':{},'fine2':{},'master':{}} |
<|file_name|>GetText.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
class GetText():
_file_path = None
_body_list = None
_target = None
def __init__(self, file_path):
#self._file_path = open(file_path, "r+").read().replace("<br","\n<br")
self._file_path = file_path.replace("<br />", "<br />\n")
#self._file_path = (self._file_path.replace("\n",";;")).split(";;")
#self._file_path = file_path
#print(self._file_path)
self._file_path = ((self._file_path.replace("\n", "123")).replace(" ", "")).replace("> <", "")
self._file_path = (self._file_path).replace("<p", "\n<p")
#print (self._file_path)
self._body_list = self.get_body().split("123")
self.set_target_content(self.get_body())
self.get_beginning()
self.remove_tags()
#super(leGetText, self).__init__()
def get_html(self):
return self._file_path
def get_body(self):
return self.get_html().split("</head>", -1)[1]
def get_first_br_line(self):
br_index = 0
for i in self._body_list:
if(i.find("<br") > -1):
return (self._body_list.index(i))
else:
++br_index
return br_index
def get_since_first_br(self):
since_first_br = self._body_list
del since_first_br[0:self.get_first_br_line()]
self.set_target_content(since_first_br)<|fim▁hole|>
return since_first_br
def set_target_content(self, content):
self._target = content
def get_target(self):
return self._target
def br_check(self, info):
if(info == "<br>"):
return True
elif(info == "<br />"):
return True
elif(info == "</ br>"):
return True
elif(info == "< br>"):
return True
else:
return False
def get_beginning(self):
# verifying whether there's any br in the next index
since_last_br = self.get_since_first_br()
#getting beginning of the lyrics
#print(since_last_br)
i = 0
for j in since_last_br:
if (
j.find("<br") > -1 and
since_last_br[i+1].find("<br") > -1 and
since_last_br[i+2].find("<br") > -1 and
since_last_br[i+3].find("<br") > -1 and
self.br_check(since_last_br[i]) == False and
self.br_check(since_last_br[i+1]) == False and
self.br_check(since_last_br[i+2]) == False and
self.br_check(since_last_br[i+3]) == False
):
del since_last_br[0:i]
break
else:
i = i +1
if (since_last_br[i].find("<br") > -1 and i+3< len(since_last_br) and self.br_check(since_last_br[i+3]) == False):
#print("i + 1 contains <br>")
#print(since_last_br[i])
del since_last_br[0:i]
# print (since_last_br)
break
self.set_target_content(since_last_br[0:200])
def remove_tags(self):
#removing tags
#removing b
html_file = "ç\n".join(self.get_target())
while(html_file.find("<b>") > -1):
#print("occur")
op = html_file.index("<b>")
cl = html_file.index("/b>")+3
html_file = list(html_file)
#for i in range(op, cl):
del html_file[op:cl]
html_file = "".join(html_file)
#removing [] (brackets) => from "[" to "\n"
while(html_file.find("[") > -1):
op = html_file.index("[")
cl = html_file.find("]")+1
bracket_line = html_file.split("ç")
l = 0
for k in bracket_line:
if(k.find("[") > -1):
break
l = l +1
del bracket_line[l]
html_file = ""
for k in bracket_line:
html_file = html_file + k+"ç"
'''
html_file = list(html_file)
#for i in range(op, cl):
del html_file[op:cl]
html_file = "".join(html_file)'''
self.set_target_content(html_file.split("ç"))
def get_end(self):
#getting the end of the lyrics (check if the next tag
#being opened is the same as the one being close
broken_since = "".join(self.get_target())
broken_since = broken_since.split("\n")
new_broken_since = [] #turning <p> into <br>
for x in broken_since:
la = x.replace("<p", "<br")
la = la.replace("</p>", "")
new_broken_since.append(la)
broken_since = new_broken_since
#checking if we are still in the lyrics block
until_the_end = []
l = 0
for k in broken_since:
kk = list(k)
if len(k) > 0:
'''
print("\n\n")
print(broken_since[l+1].find("<br"))
print(broken_since[l+1])
print("< l1 \n l2 >")
print(broken_since[l + 2].find("<br"))
print("\n\n")'''
if(l < 3 or k[0] != "<" or k[1] == "b"
or (broken_since[l+1].find("<br") > -1 and broken_since[l+2].find("<br"))
):
if (k.find("><br") == -1):
#print(k)
until_the_end.append("\n"+k)
else:
break
else:
#print("\n\n")
break
l = l + 1
#removing tags
final = ""
block = False
for j in until_the_end:
i = 0
moral = list(j)
for i in range(0, len(moral)):
if(moral[i] == "<"):
block = True
elif(moral[i] == ">"):
block = False
if(block==False and moral[i]!="<" and moral[i]!=">"):
final=final+moral[i]
return final
'''
oo = open("../../tmp/lrc", "r").read()
#print(oo)
get_text = _GetText(oo)
#print(get_text.get_target())
final = get_text.get_end()
final = final.encode("latin1").decode("utf-8")
'''
#print(final)
'''
#rs = open("../../tmp/lrc", "w+")
#rs.write(final)'
'''<|fim▁end|> | |
<|file_name|>iam.py<|end_file_name|><|fim▁begin|># Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import OrderedDict
import csv
import datetime
import functools
import json
import io
from datetime import timedelta
import itertools
import time
from concurrent.futures import as_completed
from dateutil.tz import tzutc
from dateutil.parser import parse as parse_date
import six
from botocore.exceptions import ClientError
from c7n.actions import BaseAction
from c7n.exceptions import PolicyValidationError
from c7n.filters import ValueFilter, Filter
from c7n.filters.multiattr import MultiAttrFilter
from c7n.filters.iamaccess import CrossAccountAccessFilter
from c7n.manager import resources
from c7n.query import QueryResourceManager, DescribeSource, TypeInfo
from c7n.resolver import ValuesFrom
from c7n.tags import TagActionFilter, TagDelayedAction, Tag, RemoveTag
from c7n.utils import local_session, type_schema, chunks, filter_empty, QueryParser
from c7n.resources.aws import Arn
from c7n.resources.securityhub import OtherResourcePostFinding
@resources.register('iam-group')
class Group(QueryResourceManager):
class resource_type(TypeInfo):
service = 'iam'
arn_type = 'group'
enum_spec = ('list_groups', 'Groups', None)
id = name = 'GroupName'
date = 'CreateDate'
config_type = "AWS::IAM::Group"
# Denotes this resource type exists across regions
global_resource = True
arn = 'Arn'
def get_resources(self, resource_ids, cache=True):
"""For IAM Groups on events, resource ids are Group Names."""
client = local_session(self.session_factory).client('iam')
resources = []
for rid in resource_ids:
try:
result = client.get_group(GroupName=rid)
except client.exceptions.NoSuchEntityException:
continue
group = result.pop('Group')
group['c7n:Users'] = result['Users']
resources.append(group)
return resources
@resources.register('iam-role')
class Role(QueryResourceManager):
class resource_type(TypeInfo):
service = 'iam'
arn_type = 'role'
enum_spec = ('list_roles', 'Roles', None)
detail_spec = ('get_role', 'RoleName', 'RoleName', 'Role')
id = name = 'RoleName'
date = 'CreateDate'
config_type = "AWS::IAM::Role"
# Denotes this resource type exists across regions
global_resource = True
arn = 'Arn'
@Role.action_registry.register('tag')
class RoleTag(Tag):
"""Tag an iam role."""
permissions = ('iam:TagRole',)
def process_resource_set(self, client, roles, tags):
for role in roles:
try:
self.manager.retry(
client.tag_role, RoleName=role['RoleName'], Tags=tags)
except client.exceptions.NoSuchEntityException:
continue
@Role.action_registry.register('remove-tag')
class RoleRemoveTag(RemoveTag):
"""Remove tags from an iam role."""
permissions = ('iam:UntagRole',)
def process_resource_set(self, client, roles, tags):
for role in roles:
try:
self.manager.retry(
client.untag_role, RoleName=role['RoleName'], TagKeys=tags)
except client.exceptions.NoSuchEntityException:
continue
@resources.register('iam-user')
class User(QueryResourceManager):
class resource_type(TypeInfo):
service = 'iam'
arn_type = 'user'
detail_spec = ('get_user', 'UserName', 'UserName', 'User')
enum_spec = ('list_users', 'Users', None)
id = name = 'UserName'
date = 'CreateDate'
config_type = "AWS::IAM::User"
# Denotes this resource type exists across regions
global_resource = True
arn = 'Arn'
def get_source(self, source_type):
if source_type == 'describe':
return DescribeUser(self)
return super(User, self).get_source(source_type)
class DescribeUser(DescribeSource):
def get_resources(self, resource_ids, cache=True):
client = local_session(self.manager.session_factory).client('iam')
results = []
for r in resource_ids:
try:
results.append(client.get_user(UserName=r)['User'])
except client.exceptions.NoSuchEntityException:
continue
return results
@User.action_registry.register('tag')
class UserTag(Tag):
"""Tag an iam user."""
permissions = ('iam:TagUser',)
def process_resource_set(self, client, users, tags):
for u in users:
try:
self.manager.retry(
client.tag_user, UserName=u['UserName'], Tags=tags)
except client.exceptions.NoSuchEntityException:
continue
@User.action_registry.register('remove-tag')
class UserRemoveTag(RemoveTag):
"""Remove tags from an iam user."""
permissions = ('iam:UntagUser',)
def process_resource_set(self, client, users, tags):
for u in users:
try:
self.manager.retry(
client.untag_user, UserName=u['UserName'], TagKeys=tags)
except client.exceptions.NoSuchEntityException:
continue
User.action_registry.register('mark-for-op', TagDelayedAction)
User.filter_registry.register('marked-for-op', TagActionFilter)
@User.action_registry.register('set-groups')
class SetGroups(BaseAction):
"""Set a specific IAM user as added/removed from a group
:example:
.. code-block:: yaml
- name: iam-user-add-remove
resource: iam-user
filters:
- type: value
key: UserName
value: Bob
actions:
- type: set-groups
state: remove
group: Admin
"""
schema = type_schema(
'set-groups',
state={'enum': ['add', 'remove']},
group={'type': 'string'},
required=['state', 'group']
)
permissions = ('iam:AddUserToGroup', 'iam:RemoveUserFromGroup',)
def validate(self):
if self.data.get('group') == '':
raise PolicyValidationError('group cannot be empty on %s'
% (self.manager.data))
def process(self, resources):
group_name = self.data['group']
state = self.data['state']
client = local_session(self.manager.session_factory).client('iam')
op_map = {
'add': client.add_user_to_group,
'remove': client.remove_user_from_group
}
for r in resources:
try:
op_map[state](GroupName=group_name, UserName=r['UserName'])
except client.exceptions.NoSuchEntityException:
continue
@resources.register('iam-policy')
class Policy(QueryResourceManager):
class resource_type(TypeInfo):
service = 'iam'
arn_type = 'policy'
enum_spec = ('list_policies', 'Policies', None)
id = 'PolicyId'
name = 'PolicyName'
date = 'CreateDate'
config_type = "AWS::IAM::Policy"
# Denotes this resource type exists across regions
global_resource = True
arn = 'Arn'
def get_source(self, source_type):
if source_type == 'describe':
return DescribePolicy(self)
return super(Policy, self).get_source(source_type)
class DescribePolicy(DescribeSource):
def resources(self, query=None):
qfilters = PolicyQueryParser.parse(self.manager.data.get('query', []))
query = query or {}
if qfilters:
query = {t['Name']: t['Value'] for t in qfilters}
return super(DescribePolicy, self).resources(query=query)
def get_resources(self, resource_ids, cache=True):
client = local_session(self.manager.session_factory).client('iam')
results = []
for r in resource_ids:
try:
results.append(client.get_policy(PolicyArn=r)['Policy'])
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntityException':
continue
return results
class PolicyQueryParser(QueryParser):
QuerySchema = {
'Scope': ('All', 'AWS', 'Local'),
'PolicyUsageFilter': ('PermissionsPolicy', 'PermissionsBoundary'),
'PathPrefix': six.string_types,
'OnlyAttached': bool
}
multi_value = False
value_key = 'Value'
@resources.register('iam-profile')
class InstanceProfile(QueryResourceManager):
class resource_type(TypeInfo):
service = 'iam'
arn_type = 'instance-profile'
enum_spec = ('list_instance_profiles', 'InstanceProfiles', None)
id = 'InstanceProfileId'
name = 'InstanceProfileId'
date = 'CreateDate'
# Denotes this resource type exists across regions
global_resource = True
arn = 'Arn'
@resources.register('iam-certificate')
class ServerCertificate(QueryResourceManager):
class resource_type(TypeInfo):
service = 'iam'
arn_type = 'server-certificate'
enum_spec = ('list_server_certificates',
'ServerCertificateMetadataList',
None)
id = 'ServerCertificateId'
name = 'ServerCertificateName'
date = 'Expiration'
# Denotes this resource type exists across regions
global_resource = True
@User.filter_registry.register('usage')
@Role.filter_registry.register('usage')
@Group.filter_registry.register('usage')
@Policy.filter_registry.register('usage')
class ServiceUsage(Filter):
"""Filter iam resources by their api/service usage.
Note recent activity (last 4hrs) may not be shown, evaluation
is against the last 365 days of data.
Each service access record is evaluated against all specified
attributes. Attribute filters can be specified in short form k:v
pairs or in long form as a value type filter.
match-operator allows to specify how a resource is treated across
service access record matches. 'any' means a single matching
service record will return the policy resource as matching. 'all'
means all service access records have to match.
Find iam users that have not used any services in the last year
:example:
.. code-block:: yaml
- name: unused-users
resource: iam-user
filters:
- type: usage
match-operator: all
LastAuthenticated: null
Find iam users that have used dynamodb in last 30 days
:example:
.. code-block:: yaml
- name: unused-users
resource: iam-user
filters:
- type: usage
ServiceNamespace: dynamodb
TotalAuthenticatedEntities: 1
LastAuthenticated:
type: value
value_type: age
op: less-than
value: 30
match-operator: any
https://aws.amazon.com/blogs/security/automate-analyzing-permissions-using-iam-access-advisor/
"""
JOB_COMPLETE = 'COMPLETED'
SERVICE_ATTR = set((
'ServiceName', 'ServiceNamespace', 'TotalAuthenticatedEntities',
'LastAuthenticated', 'LastAuthenticatedEntity'))
schema_alias = True
schema_attr = {
sa: {'oneOf': [
{'type': 'string'},
{'type': 'boolean'},
{'type': 'number'},
{'type': 'null'},
{'$ref': '#/definitions/filters/value'}]}
for sa in SERVICE_ATTR}
schema_attr['match-operator'] = {'enum': ['all', 'any']}
schema_attr['poll-delay'] = {'type': 'number'}
schema = type_schema(
'usage',
required=('match-operator',),
**schema_attr)
permissions = ('iam:GenerateServiceLastAccessedDetails',
'iam:GetServiceLastAccessedDetails')
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('iam')
job_resource_map = {}
for arn, r in zip(self.manager.get_arns(resources), resources):
try:
jid = self.manager.retry(
client.generate_service_last_accessed_details,
Arn=arn)['JobId']
job_resource_map[jid] = r
except client.exceptions.NoSuchEntityException:
continue
conf = dict(self.data)
conf.pop('match-operator')
saf = MultiAttrFilter(conf)
saf.multi_attrs = self.SERVICE_ATTR
results = []
match_operator = self.data.get('match-operator', 'all')
while job_resource_map:
job_results_map = {}
for jid, r in job_resource_map.items():
result = self.manager.retry(
client.get_service_last_accessed_details, JobId=jid)
if result['JobStatus'] != self.JOB_COMPLETE:
continue
job_results_map[jid] = result['ServicesLastAccessed']
for jid, saf_results in job_results_map.items():
r = job_resource_map.pop(jid)
saf_matches = saf.process(saf_results)
if match_operator == 'all' and len(saf_matches) == len(saf_results):
results.append(r)
elif saf_matches:
results.append(r)
time.sleep(self.data.get('poll-delay', 2))
return results
@User.filter_registry.register('check-permissions')
@Group.filter_registry.register('check-permissions')
@Role.filter_registry.register('check-permissions')
@Policy.filter_registry.register('check-permissions')
class CheckPermissions(Filter):
"""Check IAM permissions associated with a resource.
:example:
Find users that can create other users
.. code-block:: yaml
policies:
- name: super-users
resource: iam-user
filters:
- type: check-permissions
match: allowed
actions:
- iam:CreateUser
"""
schema = type_schema(
'check-permissions', **{
'match': {'oneOf': [
{'enum': ['allowed', 'denied']},
{'$ref': '#/definitions/filters/valuekv'},
{'$ref': '#/definitions/filters/value'}]},
'match-operator': {'enum': ['and', 'or']},
'actions': {'type': 'array', 'items': {'type': 'string'}},
'required': ('actions', 'match')})
schema_alias = True
policy_annotation = 'c7n:policy'
eval_annotation = 'c7n:perm-matches'
def get_permissions(self):
if self.manager.type == 'iam-policy':
return ('iam:SimulateCustomPolicy',)
return ('iam:SimulatePrincipalPolicy',)
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('iam')
actions = self.data['actions']<|fim▁hole|> results = []
eval_cache = {}
for arn, r in zip(self.get_iam_arns(resources), resources):
if arn is None:
continue
if arn in eval_cache:
evaluations = eval_cache[arn]
else:
evaluations = self.get_evaluations(client, arn, r, actions)
eval_cache[arn] = evaluations
matches = []
matched = []
for e in evaluations:
match = matcher(e)
if match:
matched.append(e)
matches.append(match)
if operator(matches):
r[self.eval_annotation] = matched
results.append(r)
return results
def get_iam_arns(self, resources):
return self.manager.get_arns(resources)
def get_evaluations(self, client, arn, r, actions):
if self.manager.type == 'iam-policy':
policy = r.get(self.policy_annotation)
if policy is None:
r['c7n:policy'] = policy = client.get_policy_version(
PolicyArn=r['Arn'],
VersionId=r['DefaultVersionId']).get('PolicyVersion', {})
evaluations = self.manager.retry(
client.simulate_custom_policy,
PolicyInputList=[json.dumps(policy['Document'])],
ActionNames=actions).get('EvaluationResults', ())
else:
evaluations = self.manager.retry(
client.simulate_principal_policy,
PolicySourceArn=arn,
ActionNames=actions).get('EvaluationResults', ())
return evaluations
def get_eval_matcher(self):
if isinstance(self.data['match'], six.string_types):
if self.data['match'] == 'denied':
values = ['explicitDeny', 'implicitDeny']
else:
values = ['allowed']
vf = ValueFilter({'type': 'value', 'key':
'EvalDecision', 'value': values,
'op': 'in'})
else:
vf = ValueFilter(self.data['match'])
vf.annotate = False
return vf
class IamRoleUsage(Filter):
def get_permissions(self):
perms = list(itertools.chain(*[
self.manager.get_resource_manager(m).get_permissions()
for m in ['lambda', 'launch-config', 'ec2']]))
perms.extend(['ecs:DescribeClusters', 'ecs:DescribeServices'])
return perms
def service_role_usage(self):
results = set()
results.update(self.scan_lambda_roles())
results.update(self.scan_ecs_roles())
results.update(self.collect_profile_roles())
return results
def instance_profile_usage(self):
results = set()
results.update(self.scan_asg_roles())
results.update(self.scan_ec2_roles())
return results
def scan_lambda_roles(self):
manager = self.manager.get_resource_manager('lambda')
return [r['Role'] for r in manager.resources() if 'Role' in r]
def scan_ecs_roles(self):
results = []
client = local_session(self.manager.session_factory).client('ecs')
for cluster in client.describe_clusters()['clusters']:
services = client.list_services(
cluster=cluster['clusterName'])['serviceArns']
if services:
for service in client.describe_services(
cluster=cluster['clusterName'],
services=services)['services']:
if 'roleArn' in service:
results.append(service['roleArn'])
return results
def collect_profile_roles(self):
# Collect iam roles attached to instance profiles of EC2/ASG resources
profiles = set()
profiles.update(self.scan_asg_roles())
profiles.update(self.scan_ec2_roles())
manager = self.manager.get_resource_manager('iam-profile')
iprofiles = manager.resources()
results = []
for p in iprofiles:
if p['InstanceProfileName'] not in profiles:
continue
for role in p.get('Roles', []):
results.append(role['RoleName'])
return results
def scan_asg_roles(self):
manager = self.manager.get_resource_manager('launch-config')
return [r['IamInstanceProfile'] for r in manager.resources() if (
'IamInstanceProfile' in r)]
def scan_ec2_roles(self):
manager = self.manager.get_resource_manager('ec2')
results = []
for e in manager.resources():
# do not include instances that have been recently terminated
if e['State']['Name'] == 'terminated':
continue
profile_arn = e.get('IamInstanceProfile', {}).get('Arn', None)
if not profile_arn:
continue
# split arn to get the profile name
results.append(profile_arn.split('/')[-1])
return results
###################
# IAM Roles #
###################
@Role.filter_registry.register('used')
class UsedIamRole(IamRoleUsage):
"""Filter IAM roles that are either being used or not
Checks for usage on EC2, Lambda, ECS only
:example:
.. code-block:: yaml
policies:
- name: iam-role-in-use
resource: iam-role
filters:
- type: used
state: true
"""
schema = type_schema(
'used',
state={'type': 'boolean'})
def process(self, resources, event=None):
roles = self.service_role_usage()
if self.data.get('state', True):
return [r for r in resources if (
r['Arn'] in roles or r['RoleName'] in roles)]
return [r for r in resources if (
r['Arn'] not in roles and r['RoleName'] not in roles)]
@Role.filter_registry.register('unused')
class UnusedIamRole(IamRoleUsage):
"""Filter IAM roles that are either being used or not
This filter has been deprecated. Please use the 'used' filter
with the 'state' attribute to get unused iam roles
Checks for usage on EC2, Lambda, ECS only
:example:
.. code-block:: yaml
policies:
- name: iam-roles-not-in-use
resource: iam-role
filters:
- type: used
state: false
"""
schema = type_schema('unused')
def process(self, resources, event=None):
return UsedIamRole({'state': False}, self.manager).process(resources)
@Role.filter_registry.register('cross-account')
class RoleCrossAccountAccess(CrossAccountAccessFilter):
policy_attribute = 'AssumeRolePolicyDocument'
permissions = ('iam:ListRoles',)
schema = type_schema(
'cross-account',
# white list accounts
whitelist_from=ValuesFrom.schema,
whitelist={'type': 'array', 'items': {'type': 'string'}})
@Role.filter_registry.register('has-inline-policy')
class IamRoleInlinePolicy(Filter):
"""Filter IAM roles that have an inline-policy attached
True: Filter roles that have an inline-policy
False: Filter roles that do not have an inline-policy
:example:
.. code-block:: yaml
policies:
- name: iam-roles-with-inline-policies
resource: iam-role
filters:
- type: has-inline-policy
value: True
"""
schema = type_schema('has-inline-policy', value={'type': 'boolean'})
permissions = ('iam:ListRolePolicies',)
def _inline_policies(self, client, resource):
policies = client.list_role_policies(
RoleName=resource['RoleName'])['PolicyNames']
resource['c7n:InlinePolicies'] = policies
return resource
def process(self, resources, event=None):
c = local_session(self.manager.session_factory).client('iam')
res = []
value = self.data.get('value', True)
for r in resources:
r = self._inline_policies(c, r)
if len(r['c7n:InlinePolicies']) > 0 and value:
res.append(r)
if len(r['c7n:InlinePolicies']) == 0 and not value:
res.append(r)
return res
@Role.filter_registry.register('has-specific-managed-policy')
class SpecificIamRoleManagedPolicy(Filter):
"""Filter IAM roles that has a specific policy attached
For example, if the user wants to check all roles with 'admin-policy':
:example:
.. code-block:: yaml
policies:
- name: iam-roles-have-admin
resource: iam-role
filters:
- type: has-specific-managed-policy
value: admin-policy
"""
schema = type_schema('has-specific-managed-policy', value={'type': 'string'})
permissions = ('iam:ListAttachedRolePolicies',)
def _managed_policies(self, client, resource):
return [r['PolicyName'] for r in client.list_attached_role_policies(
RoleName=resource['RoleName'])['AttachedPolicies']]
def process(self, resources, event=None):
c = local_session(self.manager.session_factory).client('iam')
if self.data.get('value'):
return [r for r in resources if self.data.get('value') in self._managed_policies(c, r)]
return []
@Role.filter_registry.register('no-specific-managed-policy')
class NoSpecificIamRoleManagedPolicy(Filter):
"""Filter IAM roles that do not have a specific policy attached
For example, if the user wants to check all roles without 'ip-restriction':
:example:
.. code-block:: yaml
policies:
- name: iam-roles-no-ip-restriction
resource: iam-role
filters:
- type: no-specific-managed-policy
value: ip-restriction
"""
schema = type_schema('no-specific-managed-policy', value={'type': 'string'})
permissions = ('iam:ListAttachedRolePolicies',)
def _managed_policies(self, client, resource):
return [r['PolicyName'] for r in client.list_attached_role_policies(
RoleName=resource['RoleName'])['AttachedPolicies']]
def process(self, resources, event=None):
c = local_session(self.manager.session_factory).client('iam')
if self.data.get('value'):
return [r for r in resources if not self.data.get('value') in
self._managed_policies(c, r)]
return []
@Role.action_registry.register('set-policy')
class SetPolicy(BaseAction):
"""Set a specific IAM policy as attached or detached on a role.
You will identify the policy by its arn.
Returns a list of roles modified by the action.
For example, if you want to automatically attach a policy to all roles which don't have it...
:example:
.. code-block:: yaml
- name: iam-attach-role-policy
resource: iam-role
filters:
- type: no-specific-managed-policy
value: my-iam-policy
actions:
- type: set-policy
state: detached
arn: "*"
- type: set-policy
state: attached
arn: arn:aws:iam::123456789012:policy/my-iam-policy
"""
schema = type_schema(
'set-policy',
state={'enum': ['attached', 'detached']},
arn={'type': 'string'},
required=['state', 'arn'])
permissions = ('iam:AttachRolePolicy', 'iam:DetachRolePolicy',)
def validate(self):
if self.data.get('state') == 'attached' and self.data.get('arn') == "*":
raise PolicyValidationError(
'* operator is not supported for state: attached on %s' % (self.manager.data))
def process(self, resources):
client = local_session(self.manager.session_factory).client('iam')
policy_arn = self.data['arn']
state = self.data['state']
for r in resources:
if state == 'attached':
client.attach_role_policy(
RoleName=r['RoleName'],
PolicyArn=policy_arn)
elif state == 'detached' and policy_arn != "*":
try:
client.detach_role_policy(
RoleName=r['RoleName'],
PolicyArn=policy_arn)
except client.exceptions.NoSuchEntityException:
continue
elif state == 'detached' and policy_arn == "*":
try:
self.detach_all_policies(client, r)
except client.exceptions.NoSuchEntityException:
continue
def detach_all_policies(self, client, resource):
attached_policy = client.list_attached_role_policies(RoleName=resource['RoleName'])
policy_arns = [p.get('PolicyArn') for p in attached_policy['AttachedPolicies']]
for parn in policy_arns:
client.detach_role_policy(RoleName=resource['RoleName'], PolicyArn=parn)
@Role.action_registry.register('delete')
class RoleDelete(BaseAction):
"""Delete an IAM Role.
For example, if you want to automatically delete an unused IAM role.
:example:
.. code-block:: yaml
- name: iam-delete-unused-role
resource: iam-role
filters:
- type: usage
match-operator: all
LastAuthenticated: null
actions:
- type: delete
force: true
"""
schema = type_schema('delete', force={'type': 'boolean'})
permissions = ('iam:DeleteRole',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('iam')
error = None
if self.data.get('force', False):
policy_setter = self.manager.action_registry['set-policy'](
{'state': 'detached', 'arn': '*'}, self.manager)
policy_setter.process(resources)
for r in resources:
try:
client.delete_role(RoleName=r['RoleName'])
except client.exceptions.DeleteConflictException as e:
self.log.warning(
"Role:%s cannot be deleted, set force to detach policy and delete"
% r['Arn'])
error = e
except client.exceptions.NoSuchEntityException:
continue
except client.exceptions.UnmodifiableEntityException:
continue
if error:
raise error
######################
# IAM Policies #
######################
@Policy.filter_registry.register('used')
class UsedIamPolicies(Filter):
"""Filter IAM policies that are being used
(either attached to some roles or used as a permissions boundary).
:example:
.. code-block:: yaml
policies:
- name: iam-policy-used
resource: iam-policy
filters:
- type: used
"""
schema = type_schema('used')
permissions = ('iam:ListPolicies',)
def process(self, resources, event=None):
return [r for r in resources if
r['AttachmentCount'] > 0 or r.get('PermissionsBoundaryUsageCount', 0) > 0]
@Policy.filter_registry.register('unused')
class UnusedIamPolicies(Filter):
"""Filter IAM policies that are not being used
(neither attached to any roles nor used as a permissions boundary).
:example:
.. code-block:: yaml
policies:
- name: iam-policy-unused
resource: iam-policy
filters:
- type: unused
"""
schema = type_schema('unused')
permissions = ('iam:ListPolicies',)
def process(self, resources, event=None):
return [r for r in resources if
r['AttachmentCount'] == 0 and r.get('PermissionsBoundaryUsageCount', 0) == 0]
@Policy.filter_registry.register('has-allow-all')
class AllowAllIamPolicies(Filter):
"""Check if IAM policy resource(s) have allow-all IAM policy statement block.
This allows users to implement CIS AWS check 1.24 which states that no
policy must exist with the following requirements.
Policy must have 'Action' and Resource = '*' with 'Effect' = 'Allow'
The policy will trigger on the following IAM policy (statement).
For example:
.. code-block:: json
{
"Version": "2012-10-17",
"Statement": [{
"Action": "*",
"Resource": "*",
"Effect": "Allow"
}]
}
Additionally, the policy checks if the statement has no 'Condition' or
'NotAction'.
For example, if the user wants to check all used policies and filter on
allow all:
.. code-block:: yaml
- name: iam-no-used-all-all-policy
resource: iam-policy
filters:
- type: used
- type: has-allow-all
Note that scanning and getting all policies and all statements can take
a while. Use it sparingly or combine it with filters such as 'used' as
above.
"""
schema = type_schema('has-allow-all')
permissions = ('iam:ListPolicies', 'iam:ListPolicyVersions')
def has_allow_all_policy(self, client, resource):
statements = client.get_policy_version(
PolicyArn=resource['Arn'],
VersionId=resource['DefaultVersionId']
)['PolicyVersion']['Document']['Statement']
if isinstance(statements, dict):
statements = [statements]
for s in statements:
if ('Condition' not in s and
'Action' in s and
isinstance(s['Action'], six.string_types) and
s['Action'] == "*" and
'Resource' in s and
isinstance(s['Resource'], six.string_types) and
s['Resource'] == "*" and
s['Effect'] == "Allow"):
return True
return False
def process(self, resources, event=None):
c = local_session(self.manager.session_factory).client('iam')
results = [r for r in resources if self.has_allow_all_policy(c, r)]
self.log.info(
"%d of %d iam policies have allow all.",
len(results), len(resources))
return results
@Policy.action_registry.register('delete')
class PolicyDelete(BaseAction):
"""Delete an IAM Policy.
For example, if you want to automatically delete all unused IAM policies.
:example:
.. code-block:: yaml
- name: iam-delete-unused-policies
resource: iam-policy
filters:
- type: unused
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('iam:DeletePolicy',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('iam')
rcount = len(resources)
resources = [r for r in resources if Arn.parse(r['Arn']).account_id != 'aws']
if len(resources) != rcount:
self.log.warning("Implicitly filtering AWS managed policies: %d -> %d",
rcount, len(resources))
for r in resources:
if r.get('DefaultVersionId', '') != 'v1':
versions = [v['VersionId'] for v in client.list_policy_versions(
PolicyArn=r['Arn']).get('Versions') if not v.get('IsDefaultVersion')]
for v in versions:
client.delete_policy_version(PolicyArn=r['Arn'], VersionId=v)
client.delete_policy(PolicyArn=r['Arn'])
###############################
# IAM Instance Profiles #
###############################
@InstanceProfile.filter_registry.register('used')
class UsedInstanceProfiles(IamRoleUsage):
"""Filter IAM profiles that are being used.
:example:
.. code-block:: yaml
policies:
- name: iam-instance-profiles-in-use
resource: iam-profile
filters:
- type: used
"""
schema = type_schema('used')
def process(self, resources, event=None):
results = []
profiles = self.instance_profile_usage()
for r in resources:
if r['Arn'] in profiles or r['InstanceProfileName'] in profiles:
results.append(r)
self.log.info(
"%d of %d instance profiles currently in use." % (
len(results), len(resources)))
return results
@InstanceProfile.filter_registry.register('unused')
class UnusedInstanceProfiles(IamRoleUsage):
"""Filter IAM profiles that are not being used
:example:
.. code-block:: yaml
policies:
- name: iam-instance-profiles-not-in-use
resource: iam-profile
filters:
- type: unused
"""
schema = type_schema('unused')
def process(self, resources, event=None):
results = []
profiles = self.instance_profile_usage()
for r in resources:
if (r['Arn'] not in profiles or r['InstanceProfileName'] not in profiles):
results.append(r)
self.log.info(
"%d of %d instance profiles currently not in use." % (
len(results), len(resources)))
return results
###################
# IAM Users #
###################
class CredentialReport(Filter):
"""Use IAM Credential report to filter users.
The IAM Credential report aggregates multiple pieces of
information on iam users. This makes it highly efficient for
querying multiple aspects of a user that would otherwise require
per user api calls.
https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html
For example if we wanted to retrieve all users with mfa who have
never used their password but have active access keys from the
last month
.. code-block:: yaml
- name: iam-mfa-active-keys-no-login
resource: iam-user
filters:
- type: credential
key: mfa_active
value: true
- type: credential
key: password_last_used
value: absent
- type: credential
key: access_keys.last_used
value_type: age
value: 30
op: less-than
Credential Report Transforms
We perform some default transformations from the raw
credential report. Sub-objects (access_key_1, cert_2)
are turned into array of dictionaries for matching
purposes with their common prefixes stripped.
N/A values are turned into None, TRUE/FALSE are turned
into boolean values.
"""
schema = type_schema(
'credential',
value_type={'$ref': '#/definitions/filters_common/value_types'},
key={'type': 'string',
'title': 'report key to search',
'enum': [
'user',
'arn',
'user_creation_time',
'password_enabled',
'password_last_used',
'password_last_changed',
'password_next_rotation',
'mfa_active',
'access_keys',
'access_keys.active',
'access_keys.last_used_date',
'access_keys.last_used_region',
'access_keys.last_used_service',
'access_keys.last_rotated',
'certs',
'certs.active',
'certs.last_rotated',
]},
value={'$ref': '#/definitions/filters_common/value'},
op={'$ref': '#/definitions/filters_common/comparison_operators'},
report_generate={
'title': 'Generate a report if none is present.',
'default': True,
'type': 'boolean'},
report_delay={
'title': 'Number of seconds to wait for report generation.',
'default': 10,
'type': 'number'},
report_max_age={
'title': 'Number of seconds to consider a report valid.',
'default': 60 * 60 * 24,
'type': 'number'})
list_sub_objects = (
('access_key_1_', 'access_keys'),
('access_key_2_', 'access_keys'),
('cert_1_', 'certs'),
('cert_2_', 'certs'))
# for access keys only
matched_annotation_key = 'c7n:matched-keys'
permissions = ('iam:GenerateCredentialReport',
'iam:GetCredentialReport')
def get_value_or_schema_default(self, k):
if k in self.data:
return self.data[k]
return self.schema['properties'][k]['default']
def get_credential_report(self):
report = self.manager._cache.get('iam-credential-report')
if report:
return report
data = self.fetch_credential_report()
report = {}
if isinstance(data, six.binary_type):
reader = csv.reader(io.StringIO(data.decode('utf-8')))
else:
reader = csv.reader(io.StringIO(data))
headers = next(reader)
for line in reader:
info = dict(zip(headers, line))
report[info['user']] = self.process_user_record(info)
self.manager._cache.save('iam-credential-report', report)
return report
@classmethod
def process_user_record(cls, info):
"""Type convert the csv record, modifies in place."""
keys = list(info.keys())
# Value conversion
for k in keys:
v = info[k]
if v in ('N/A', 'no_information'):
info[k] = None
elif v == 'false':
info[k] = False
elif v == 'true':
info[k] = True
# Object conversion
for p, t in cls.list_sub_objects:
obj = dict([(k[len(p):], info.pop(k))
for k in keys if k.startswith(p)])
if obj.get('active', False):
info.setdefault(t, []).append(obj)
return info
def fetch_credential_report(self):
client = local_session(self.manager.session_factory).client('iam')
try:
report = client.get_credential_report()
except ClientError as e:
if e.response['Error']['Code'] != 'ReportNotPresent':
raise
report = None
if report:
threshold = datetime.datetime.now(tz=tzutc()) - timedelta(
seconds=self.get_value_or_schema_default(
'report_max_age'))
if not report['GeneratedTime'].tzinfo:
threshold = threshold.replace(tzinfo=None)
if report['GeneratedTime'] < threshold:
report = None
if report is None:
if not self.get_value_or_schema_default('report_generate'):
raise ValueError("Credential Report Not Present")
client.generate_credential_report()
time.sleep(self.get_value_or_schema_default('report_delay'))
report = client.get_credential_report()
return report['Content']
def process(self, resources, event=None):
if '.' in self.data['key']:
self.matcher_config = dict(self.data)
self.matcher_config['key'] = self.data['key'].split('.', 1)[1]
return []
def match(self, resource, info):
if info is None:
return False
k = self.data.get('key')
if '.' not in k:
vf = ValueFilter(self.data)
vf.annotate = False
return vf(info)
# access key matching
prefix, sk = k.split('.', 1)
vf = ValueFilter(self.matcher_config)
vf.annotate = False
# annotation merging with previous respecting block operators
k_matched = []
for v in info.get(prefix, ()):
if vf.match(v):
k_matched.append(v)
for k in k_matched:
k['c7n:match-type'] = 'credential'
self.merge_annotation(resource, self.matched_annotation_key, k_matched)
return bool(k_matched)
@User.filter_registry.register('credential')
class UserCredentialReport(CredentialReport):
def process(self, resources, event=None):
super(UserCredentialReport, self).process(resources, event)
report = self.get_credential_report()
if report is None:
return []
results = []
for r in resources:
info = report.get(r['UserName'])
if self.match(r, info):
r['c7n:credential-report'] = info
results.append(r)
return results
@User.filter_registry.register('has-inline-policy')
class IamUserInlinePolicy(Filter):
"""
Filter IAM users that have an inline-policy attached
True: Filter users that have an inline-policy
False: Filter users that do not have an inline-policy
"""
schema = type_schema('has-inline-policy', value={'type': 'boolean'})
permissions = ('iam:ListUserPolicies',)
def _inline_policies(self, client, resource):
resource['c7n:InlinePolicies'] = client.list_user_policies(
UserName=resource['UserName'])['PolicyNames']
return resource
def process(self, resources, event=None):
c = local_session(self.manager.session_factory).client('iam')
value = self.data.get('value', True)
res = []
for r in resources:
r = self._inline_policies(c, r)
if len(r['c7n:InlinePolicies']) > 0 and value:
res.append(r)
if len(r['c7n:InlinePolicies']) == 0 and not value:
res.append(r)
return res
@User.filter_registry.register('policy')
class UserPolicy(ValueFilter):
"""Filter IAM users based on attached policy values
:example:
.. code-block:: yaml
policies:
- name: iam-users-with-admin-access
resource: iam-user
filters:
- type: policy
key: PolicyName
value: AdministratorAccess
"""
schema = type_schema('policy', rinherit=ValueFilter.schema)
schema_alias = False
permissions = ('iam:ListAttachedUserPolicies',)
def user_policies(self, user_set):
client = local_session(self.manager.session_factory).client('iam')
for u in user_set:
if 'c7n:Policies' not in u:
u['c7n:Policies'] = []
aps = client.list_attached_user_policies(
UserName=u['UserName'])['AttachedPolicies']
for ap in aps:
u['c7n:Policies'].append(
client.get_policy(PolicyArn=ap['PolicyArn'])['Policy'])
def process(self, resources, event=None):
user_set = chunks(resources, size=50)
with self.executor_factory(max_workers=2) as w:
self.log.debug(
"Querying %d users policies" % len(resources))
list(w.map(self.user_policies, user_set))
matched = []
for r in resources:
for p in r['c7n:Policies']:
if self.match(p) and r not in matched:
matched.append(r)
return matched
@User.filter_registry.register('group')
class GroupMembership(ValueFilter):
"""Filter IAM users based on attached group values
:example:
.. code-block:: yaml
policies:
- name: iam-users-in-admin-group
resource: iam-user
filters:
- type: group
key: GroupName
value: Admins
"""
schema = type_schema('group', rinherit=ValueFilter.schema)
schema_alias = False
permissions = ('iam:ListGroupsForUser',)
def get_user_groups(self, client, user_set):
for u in user_set:
u['c7n:Groups'] = client.list_groups_for_user(
UserName=u['UserName'])['Groups']
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('iam')
with self.executor_factory(max_workers=2) as w:
futures = []
for user_set in chunks(
[r for r in resources if 'c7n:Groups' not in r], size=50):
futures.append(
w.submit(self.get_user_groups, client, user_set))
for f in as_completed(futures):
pass
matched = []
for r in resources:
for p in r.get('c7n:Groups', []):
if self.match(p) and r not in matched:
matched.append(r)
return matched
@User.filter_registry.register('access-key')
class UserAccessKey(ValueFilter):
"""Filter IAM users based on access-key values
:example:
.. code-block:: yaml
policies:
- name: iam-users-with-active-keys
resource: iam-user
filters:
- type: access-key
key: Status
value: Active
"""
schema = type_schema('access-key', rinherit=ValueFilter.schema)
schema_alias = False
permissions = ('iam:ListAccessKeys',)
annotation_key = 'c7n:AccessKeys'
matched_annotation_key = 'c7n:matched-keys'
annotate = False
def get_user_keys(self, client, user_set):
for u in user_set:
u[self.annotation_key] = self.manager.retry(
client.list_access_keys,
UserName=u['UserName'])['AccessKeyMetadata']
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('iam')
with self.executor_factory(max_workers=2) as w:
augment_set = [r for r in resources if self.annotation_key not in r]
self.log.debug(
"Querying %d users' api keys" % len(augment_set))
list(w.map(
functools.partial(self.get_user_keys, client),
chunks(augment_set, 50)))
matched = []
for r in resources:
k_matched = []
for k in r[self.annotation_key]:
if self.match(k):
k_matched.append(k)
for k in k_matched:
k['c7n:matched-type'] = 'access'
self.merge_annotation(r, self.matched_annotation_key, k_matched)
if k_matched:
matched.append(r)
return matched
# Mfa-device filter for iam-users
@User.filter_registry.register('mfa-device')
class UserMfaDevice(ValueFilter):
"""Filter iam-users based on mfa-device status
:example:
.. code-block:: yaml
policies:
- name: mfa-enabled-users
resource: iam-user
filters:
- type: mfa-device
key: UserName
value: not-null
"""
schema = type_schema('mfa-device', rinherit=ValueFilter.schema)
schema_alias = False
permissions = ('iam:ListMfaDevices',)
def __init__(self, *args, **kw):
super(UserMfaDevice, self).__init__(*args, **kw)
self.data['key'] = 'MFADevices'
def process(self, resources, event=None):
def _user_mfa_devices(resource):
client = local_session(self.manager.session_factory).client('iam')
resource['MFADevices'] = client.list_mfa_devices(
UserName=resource['UserName'])['MFADevices']
with self.executor_factory(max_workers=2) as w:
query_resources = [
r for r in resources if 'MFADevices' not in r]
self.log.debug(
"Querying %d users' mfa devices" % len(query_resources))
list(w.map(_user_mfa_devices, query_resources))
matched = []
for r in resources:
if self.match(r):
matched.append(r)
return matched
@User.action_registry.register('post-finding')
class UserFinding(OtherResourcePostFinding):
def format_resource(self, r):
if any(filter(lambda x: isinstance(x, UserAccessKey), self.manager.iter_filters())):
details = {
"UserName": "arn:aws:iam:{}:user/{}".format(
self.manager.config.account_id, r["c7n:AccessKeys"][0]["UserName"]
),
"Status": r["c7n:AccessKeys"][0]["Status"],
"CreatedAt": r["c7n:AccessKeys"][0]["CreateDate"].isoformat(),
}
accesskey = {
"Type": "AwsIamAccessKey",
"Id": r["c7n:AccessKeys"][0]["AccessKeyId"],
"Region": self.manager.config.region,
"Details": {"AwsIamAccessKey": filter_empty(details)},
}
return filter_empty(accesskey)
else:
return super(UserFinding, self).format_resource(r)
@User.action_registry.register('delete')
class UserDelete(BaseAction):
"""Delete a user or properties of a user.
For example if you want to have a whitelist of valid (machine-)users
and want to ensure that no users have been clicked without documentation.
You can use both the 'credential' or the 'username'
filter. 'credential' will have an SLA of 4h,
(http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html),
but the added benefit of performing less API calls, whereas
'username' will make more API calls, but have a SLA of your cache.
:example:
.. code-block:: yaml
# using a 'credential' filter'
- name: iam-only-whitelisted-users
resource: iam-user
filters:
- type: credential
key: user
op: not-in
value:
- valid-user-1
- valid-user-2
actions:
- delete
# using a 'username' filter with 'UserName'
- name: iam-only-whitelisted-users
resource: iam-user
filters:
- type: value
key: UserName
op: not-in
value:
- valid-user-1
- valid-user-2
actions:
- delete
# using a 'username' filter with 'Arn'
- name: iam-only-whitelisted-users
resource: iam-user
filters:
- type: value
key: Arn
op: not-in
value:
- arn:aws:iam:123456789012:user/valid-user-1
- arn:aws:iam:123456789012:user/valid-user-2
actions:
- delete
Additionally, you can specify the options to delete properties of an iam-user,
including console-access, access-keys, attached-user-policies,
inline-user-policies, mfa-devices, groups,
ssh-keys, signing-certificates, and service-specific-credentials.
Note: using options will _not_ delete the user itself, only the items specified
by ``options`` that are attached to the respective iam-user. To delete a user
completely, use the ``delete`` action without specifying ``options``.
:example:
.. code-block:: yaml
- name: delete-console-access-unless-valid
comment: |
finds iam-users with console access and deletes console access unless
the username is included in whitelist
resource: iam-user
filters:
- type: username
key: UserName
op: not-in
value:
- valid-user-1
- valid-user-2
- type: credential
key: Status
value: Active
actions:
- type: delete
options:
- console-access
- name: delete-misc-access-for-iam-user
comment: |
deletes multiple options from test_user
resource: iam-user
filters:
- UserName: test_user
actions:
- type: delete
options:
- mfa-devices
- access-keys
- ssh-keys
"""
ORDERED_OPTIONS = OrderedDict([
('console-access', 'delete_console_access'),
('access-keys', 'delete_access_keys'),
('attached-user-policies', 'delete_attached_user_policies'),
('inline-user-policies', 'delete_inline_user_policies'),
('mfa-devices', 'delete_hw_mfa_devices'),
('groups', 'delete_groups'),
('ssh-keys', 'delete_ssh_keys'),
('signing-certificates', 'delete_signing_certificates'),
('service-specific-credentials', 'delete_service_specific_credentials'),
])
COMPOUND_OPTIONS = {
'user-policies': ['attached-user-policies', 'inline-user-policies'],
}
schema = type_schema(
'delete',
options={
'type': 'array',
'items': {
'type': 'string',
'enum': list(ORDERED_OPTIONS.keys()) + list(COMPOUND_OPTIONS.keys()),
}
})
permissions = (
'iam:ListAttachedUserPolicies',
'iam:ListAccessKeys',
'iam:ListGroupsForUser',
'iam:ListMFADevices',
'iam:ListServiceSpecificCredentials',
'iam:ListSigningCertificates',
'iam:ListSSHPublicKeys',
'iam:DeactivateMFADevice',
'iam:DeleteAccessKey',
'iam:DeleteLoginProfile',
'iam:DeleteSigningCertificate',
'iam:DeleteSSHPublicKey',
'iam:DeleteUser',
'iam:DeleteUserPolicy',
'iam:DetachUserPolicy',
'iam:RemoveUserFromGroup')
@staticmethod
def delete_console_access(client, r):
try:
client.delete_login_profile(
UserName=r['UserName'])
except ClientError as e:
if e.response['Error']['Code'] not in ('NoSuchEntity',):
raise
@staticmethod
def delete_access_keys(client, r):
response = client.list_access_keys(UserName=r['UserName'])
for access_key in response['AccessKeyMetadata']:
client.delete_access_key(UserName=r['UserName'],
AccessKeyId=access_key['AccessKeyId'])
@staticmethod
def delete_attached_user_policies(client, r):
response = client.list_attached_user_policies(UserName=r['UserName'])
for user_policy in response['AttachedPolicies']:
client.detach_user_policy(
UserName=r['UserName'], PolicyArn=user_policy['PolicyArn'])
@staticmethod
def delete_inline_user_policies(client, r):
response = client.list_user_policies(UserName=r['UserName'])
for user_policy_name in response['PolicyNames']:
client.delete_user_policy(
UserName=r['UserName'], PolicyName=user_policy_name)
@staticmethod
def delete_hw_mfa_devices(client, r):
response = client.list_mfa_devices(UserName=r['UserName'])
for mfa_device in response['MFADevices']:
client.deactivate_mfa_device(
UserName=r['UserName'], SerialNumber=mfa_device['SerialNumber'])
@staticmethod
def delete_groups(client, r):
response = client.list_groups_for_user(UserName=r['UserName'])
for user_group in response['Groups']:
client.remove_user_from_group(
UserName=r['UserName'], GroupName=user_group['GroupName'])
@staticmethod
def delete_ssh_keys(client, r):
response = client.list_ssh_public_keys(UserName=r['UserName'])
for key in response.get('SSHPublicKeys', ()):
client.delete_ssh_public_key(
UserName=r['UserName'], SSHPublicKeyId=key['SSHPublicKeyId'])
@staticmethod
def delete_signing_certificates(client, r):
response = client.list_signing_certificates(UserName=r['UserName'])
for cert in response.get('Certificates', ()):
client.delete_signing_certificate(
UserName=r['UserName'], CertificateId=cert['CertificateId'])
@staticmethod
def delete_service_specific_credentials(client, r):
# Service specific user credentials (codecommit)
response = client.list_service_specific_credentials(UserName=r['UserName'])
for screds in response.get('ServiceSpecificCredentials', ()):
client.delete_service_specific_credential(
UserName=r['UserName'],
ServiceSpecificCredentialId=screds['ServiceSpecificCredentialId'])
@staticmethod
def delete_user(client, r):
client.delete_user(UserName=r['UserName'])
def process(self, resources):
client = local_session(self.manager.session_factory).client('iam')
self.log.debug('Deleting user %s options: %s' %
(len(resources), self.data.get('options', 'all')))
for r in resources:
self.process_user(client, r)
def process_user(self, client, r):
user_options = self.data.get('options', list(self.ORDERED_OPTIONS.keys()))
# resolve compound options
for cmd in self.COMPOUND_OPTIONS:
if cmd in user_options:
user_options += self.COMPOUND_OPTIONS[cmd]
# process options in ordered fashion
for cmd in self.ORDERED_OPTIONS:
if cmd in user_options:
op = getattr(self, self.ORDERED_OPTIONS[cmd])
op(client, r)
if not self.data.get('options'):
self.delete_user(client, r)
@User.action_registry.register('remove-keys')
class UserRemoveAccessKey(BaseAction):
"""Delete or disable user's access keys.
For example if we wanted to disable keys after 90 days of non-use and
delete them after 180 days of nonuse:
:example:
.. code-block:: yaml
- name: iam-mfa-active-key-no-login
resource: iam-user
actions:
- type: remove-keys
disable: true
age: 90
- type: remove-keys
age: 180
"""
schema = type_schema(
'remove-keys',
matched={'type': 'boolean'},
age={'type': 'number'},
disable={'type': 'boolean'})
permissions = ('iam:ListAccessKeys', 'iam:UpdateAccessKey',
'iam:DeleteAccessKey')
def validate(self):
if self.data.get('matched') and self.data.get('age'):
raise PolicyValidationError(
"policy:%s cant mix matched and age parameters")
ftypes = {f.type for f in self.manager.iter_filters()}
if 'credential' in ftypes and 'access-key' in ftypes:
raise PolicyValidationError(
"policy:%s cant mix credential and access-key filters w/ delete action")
return self
def process(self, resources):
client = local_session(self.manager.session_factory).client('iam')
age = self.data.get('age')
disable = self.data.get('disable')
matched = self.data.get('matched')
if age:
threshold_date = datetime.datetime.now(tz=tzutc()) - timedelta(age)
for r in resources:
if 'c7n:AccessKeys' not in r:
r['c7n:AccessKeys'] = client.list_access_keys(
UserName=r['UserName'])['AccessKeyMetadata']
keys = r['c7n:AccessKeys']
if matched:
m_keys = resolve_credential_keys(
r.get(CredentialReport.matched_annotation_key),
keys)
assert m_keys, "shouldn't have gotten this far without keys"
keys = m_keys
for k in keys:
if age:
if not k['CreateDate'] < threshold_date:
continue
if disable:
client.update_access_key(
UserName=r['UserName'],
AccessKeyId=k['AccessKeyId'],
Status='Inactive')
else:
client.delete_access_key(
UserName=r['UserName'],
AccessKeyId=k['AccessKeyId'])
def resolve_credential_keys(m_keys, keys):
res = []
for k in m_keys:
if k['c7n:match-type'] == 'credential':
c_date = parse_date(k['last_rotated'])
for ak in keys:
if c_date == ak['CreateDate']:
ak = dict(ak)
ak['c7n:match-type'] = 'access'
if ak not in res:
res.append(ak)
elif k not in res:
res.append(k)
return res
#################
# IAM Groups #
#################
@Group.filter_registry.register('has-users')
class IamGroupUsers(Filter):
"""Filter IAM groups that have users attached based on True/False value:
True: Filter all IAM groups with users assigned to it
False: Filter all IAM groups without any users assigned to it
:example:
.. code-block:: yaml
- name: empty-iam-group
resource: iam-group
filters:
- type: has-users
value: False
"""
schema = type_schema('has-users', value={'type': 'boolean'})
permissions = ('iam:GetGroup',)
def _user_count(self, client, resource):
return len(client.get_group(GroupName=resource['GroupName'])['Users'])
def process(self, resources, events=None):
c = local_session(self.manager.session_factory).client('iam')
if self.data.get('value', True):
return [r for r in resources if self._user_count(c, r) > 0]
return [r for r in resources if self._user_count(c, r) == 0]
@Group.filter_registry.register('has-inline-policy')
class IamGroupInlinePolicy(Filter):
"""Filter IAM groups that have an inline-policy based on boolean value:
True: Filter all groups that have an inline-policy attached
False: Filter all groups that do not have an inline-policy attached
:example:
.. code-block:: yaml
- name: iam-groups-with-inline-policy
resource: iam-group
filters:
- type: has-inline-policy
value: True
"""
schema = type_schema('has-inline-policy', value={'type': 'boolean'})
permissions = ('iam:ListGroupPolicies',)
def _inline_policies(self, client, resource):
resource['c7n:InlinePolicies'] = client.list_group_policies(
GroupName=resource['GroupName'])['PolicyNames']
return resource
def process(self, resources, events=None):
c = local_session(self.manager.session_factory).client('iam')
value = self.data.get('value', True)
res = []
for r in resources:
r = self._inline_policies(c, r)
if len(r['c7n:InlinePolicies']) > 0 and value:
res.append(r)
if len(r['c7n:InlinePolicies']) == 0 and not value:
res.append(r)
return res<|fim▁end|> | matcher = self.get_eval_matcher()
operator = self.data.get('match-operator', 'and') == 'and' and all or any
|
<|file_name|>AggregatableDataElementFilter.java<|end_file_name|><|fim▁begin|>package org.hisp.dhis.system.filter;
/*
* Copyright (c) 2004-2015, University of Oslo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of the HISP project nor the names of its contributors may
<|fim▁hole|> * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
import com.google.common.collect.Sets;
import org.hisp.dhis.common.ValueType;
import org.hisp.dhis.commons.filter.Filter;
import org.hisp.dhis.dataelement.DataElement;
import java.util.Set;
/**
* @author Lars Helge Overland
*/
public class AggregatableDataElementFilter
implements Filter<DataElement>
{
public static final AggregatableDataElementFilter INSTANCE = new AggregatableDataElementFilter();
private static final Set<ValueType> VALUE_TYPES = Sets.newHashSet(
ValueType.BOOLEAN, ValueType.TRUE_ONLY, ValueType.TEXT, ValueType.LONG_TEXT, ValueType.LETTER,
ValueType.INTEGER, ValueType.INTEGER_POSITIVE, ValueType.INTEGER_NEGATIVE, ValueType.INTEGER_ZERO_OR_POSITIVE,
ValueType.NUMBER, ValueType.UNIT_INTERVAL, ValueType.PERCENTAGE, ValueType.COORDINATE
);
@Override
public boolean retain( DataElement object )
{
return object != null && VALUE_TYPES.contains( object.getValueType() );
}
}<|fim▁end|> | * be used to endorse or promote products derived from this software without
* specific prior written permission.
*
|
<|file_name|>DialogsSearchAdapter.java<|end_file_name|><|fim▁begin|>/*
* This is the source code of Telegram for Android v. 5.x.x.
* It is licensed under GNU GPL v. 2 or later.
* You should have received a copy of the license in this archive (see LICENSE).
*
* Copyright Nikolai Kudashov, 2013-2018.
*/
package org.telegram.ui.Adapters;
import android.content.Context;
import android.text.SpannableStringBuilder;
import android.text.Spanned;
import android.text.TextUtils;
import android.util.LongSparseArray;
import android.util.SparseArray;
import android.view.MotionEvent;
import android.view.View;
import android.view.ViewGroup;
import org.telegram.PhoneFormat.PhoneFormat;
import org.telegram.SQLite.SQLiteCursor;
import org.telegram.SQLite.SQLitePreparedStatement;
import org.telegram.messenger.AndroidUtilities;
import org.telegram.messenger.ChatObject;
import org.telegram.messenger.ContactsController;
import org.telegram.messenger.MediaDataController;
import org.telegram.messenger.LocaleController;
import org.telegram.messenger.MessageObject;
import org.telegram.messenger.MessagesController;
import org.telegram.messenger.MessagesStorage;
import org.telegram.messenger.UserConfig;
import org.telegram.messenger.UserObject;
import org.telegram.messenger.Utilities;
import org.telegram.messenger.FileLog;
import org.telegram.messenger.R;
import org.telegram.tgnet.ConnectionsManager;
import org.telegram.tgnet.TLObject;
import org.telegram.tgnet.TLRPC;
import org.telegram.ui.ActionBar.Theme;
import org.telegram.ui.Cells.DialogCell;
import org.telegram.ui.Cells.GraySectionCell;
import org.telegram.ui.Cells.HashtagSearchCell;
import org.telegram.ui.Cells.HintDialogCell;
import org.telegram.ui.Cells.ProfileSearchCell;
import org.telegram.ui.Cells.TextCell;
import org.telegram.ui.Components.FlickerLoadingView;
import org.telegram.ui.Components.ForegroundColorSpanThemable;
import org.telegram.ui.Components.RecyclerListView;
import org.telegram.ui.FilteredSearchView;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.concurrent.ConcurrentHashMap;
import androidx.recyclerview.widget.LinearLayoutManager;
import androidx.recyclerview.widget.RecyclerView;
public class DialogsSearchAdapter extends RecyclerListView.SelectionAdapter {
private Context mContext;
private Runnable searchRunnable;
private Runnable searchRunnable2;
private ArrayList<TLObject> searchResult = new ArrayList<>();
private ArrayList<CharSequence> searchResultNames = new ArrayList<>();
private ArrayList<MessageObject> searchResultMessages = new ArrayList<>();
private ArrayList<String> searchResultHashtags = new ArrayList<>();
private String lastSearchText;
private boolean searchWas;
private int reqId = 0;
private int lastReqId;
private DialogsSearchAdapterDelegate delegate;
private int needMessagesSearch;
private boolean messagesSearchEndReached;
private String lastMessagesSearchString;
private String currentMessagesQuery;
private int nextSearchRate;
private int lastSearchId;
private int lastGlobalSearchId;
private int lastLocalSearchId;
private int lastMessagesSearchId;
private int dialogsType;
private SearchAdapterHelper searchAdapterHelper;
private RecyclerListView innerListView;
private int selfUserId;
private int currentAccount = UserConfig.selectedAccount;
private ArrayList<RecentSearchObject> recentSearchObjects = new ArrayList<>();
private LongSparseArray<RecentSearchObject> recentSearchObjectsById = new LongSparseArray<>();
private ArrayList<TLRPC.User> localTipUsers = new ArrayList<>();
private ArrayList<FiltersView.DateData> localTipDates = new ArrayList<>();
private FilteredSearchView.Delegate filtersDelegate;
private int folderId;
public boolean isSearching() {
return waitingResponseCount > 0;
}
public static class DialogSearchResult {
public TLObject object;
public int date;
public CharSequence name;
}
protected static class RecentSearchObject {
TLObject object;
int date;
long did;
}
public interface DialogsSearchAdapterDelegate {
void searchStateChanged(boolean searching, boolean animated);
void didPressedOnSubDialog(long did);
void needRemoveHint(int did);
void needClearList();
void runResultsEnterAnimation();
}
private class CategoryAdapterRecycler extends RecyclerListView.SelectionAdapter {
public void setIndex(int value) {
notifyDataSetChanged();
}
@Override
public RecyclerView.ViewHolder onCreateViewHolder(ViewGroup parent, int viewType) {
View view = new HintDialogCell(mContext);
view.setLayoutParams(new RecyclerView.LayoutParams(AndroidUtilities.dp(80), AndroidUtilities.dp(86)));
return new RecyclerListView.Holder(view);
}
@Override
public boolean isEnabled(RecyclerView.ViewHolder holder) {
return true;
}
@Override
public void onBindViewHolder(RecyclerView.ViewHolder holder, int position) {
HintDialogCell cell = (HintDialogCell) holder.itemView;
TLRPC.TL_topPeer peer = MediaDataController.getInstance(currentAccount).hints.get(position);
TLRPC.Dialog dialog = new TLRPC.TL_dialog();
TLRPC.Chat chat = null;
TLRPC.User user = null;
int did = 0;
if (peer.peer.user_id != 0) {
did = peer.peer.user_id;
user = MessagesController.getInstance(currentAccount).getUser(peer.peer.user_id);
} else if (peer.peer.channel_id != 0) {
did = -peer.peer.channel_id;
chat = MessagesController.getInstance(currentAccount).getChat(peer.peer.channel_id);
} else if (peer.peer.chat_id != 0) {
did = -peer.peer.chat_id;
chat = MessagesController.getInstance(currentAccount).getChat(peer.peer.chat_id);
}
cell.setTag(did);
String name = "";
if (user != null) {
name = UserObject.getFirstName(user);
} else if (chat != null) {
name = chat.title;
}
cell.setDialog(did, true, name);
}
@Override
public int getItemCount() {
return MediaDataController.getInstance(currentAccount).hints.size();
}
}
public DialogsSearchAdapter(Context context, int messagesSearch, int type, int folderId) {
this.folderId = folderId;
searchAdapterHelper = new SearchAdapterHelper(false);
searchAdapterHelper.setDelegate(new SearchAdapterHelper.SearchAdapterHelperDelegate() {
@Override
public void onDataSetChanged(int searchId) {
waitingResponseCount--;
lastGlobalSearchId = searchId;
if (lastLocalSearchId != searchId) {
searchResult.clear();
}
if (lastMessagesSearchId != searchId) {
searchResultMessages.clear();
}
searchWas = true;
if (delegate != null) {
delegate.searchStateChanged(waitingResponseCount > 0, true);
}
notifyDataSetChanged();
if (delegate != null) {
delegate.runResultsEnterAnimation();
}
}
@Override
public void onSetHashtags(ArrayList<SearchAdapterHelper.HashtagObject> arrayList, HashMap<String, SearchAdapterHelper.HashtagObject> hashMap) {<|fim▁hole|> for (int a = 0; a < arrayList.size(); a++) {
searchResultHashtags.add(arrayList.get(a).hashtag);
}
if (delegate != null) {
delegate.searchStateChanged(waitingResponseCount > 0, false);
}
notifyDataSetChanged();
}
@Override
public boolean canApplySearchResults(int searchId) {
return searchId == lastSearchId;
}
});
mContext = context;
needMessagesSearch = messagesSearch;
dialogsType = type;
selfUserId = UserConfig.getInstance(currentAccount).getClientUserId();
loadRecentSearch();
MediaDataController.getInstance(currentAccount).loadHints(true);
}
public RecyclerListView getInnerListView() {
return innerListView;
}
public void setDelegate(DialogsSearchAdapterDelegate delegate) {
this.delegate = delegate;
}
public boolean isMessagesSearchEndReached() {
return messagesSearchEndReached;
}
public void loadMoreSearchMessages() {
if (reqId != 0) {
return;
}
searchMessagesInternal(lastMessagesSearchString, lastMessagesSearchId);
}
public String getLastSearchString() {
return lastMessagesSearchString;
}
private void searchMessagesInternal(final String query, int searchId) {
if (needMessagesSearch == 0 || TextUtils.isEmpty(lastMessagesSearchString) && TextUtils.isEmpty(query)) {
return;
}
if (reqId != 0) {
ConnectionsManager.getInstance(currentAccount).cancelRequest(reqId, true);
reqId = 0;
}
if (TextUtils.isEmpty(query)) {
searchResultMessages.clear();
lastReqId = 0;
lastMessagesSearchString = null;
searchWas = false;
notifyDataSetChanged();
return;
}
final TLRPC.TL_messages_searchGlobal req = new TLRPC.TL_messages_searchGlobal();
req.limit = 20;
req.q = query;
req.filter = new TLRPC.TL_inputMessagesFilterEmpty();
if (query.equals(lastMessagesSearchString) && !searchResultMessages.isEmpty()) {
MessageObject lastMessage = searchResultMessages.get(searchResultMessages.size() - 1);
req.offset_id = lastMessage.getId();
req.offset_rate = nextSearchRate;
int id;
if (lastMessage.messageOwner.peer_id.channel_id != 0) {
id = -lastMessage.messageOwner.peer_id.channel_id;
} else if (lastMessage.messageOwner.peer_id.chat_id != 0) {
id = -lastMessage.messageOwner.peer_id.chat_id;
} else {
id = lastMessage.messageOwner.peer_id.user_id;
}
req.offset_peer = MessagesController.getInstance(currentAccount).getInputPeer(id);
} else {
req.offset_rate = 0;
req.offset_id = 0;
req.offset_peer = new TLRPC.TL_inputPeerEmpty();
}
lastMessagesSearchString = query;
final int currentReqId = ++lastReqId;
reqId = ConnectionsManager.getInstance(currentAccount).sendRequest(req, (response, error) -> {
final ArrayList<MessageObject> messageObjects = new ArrayList<>();
if (error == null) {
TLRPC.messages_Messages res = (TLRPC.messages_Messages) response;
SparseArray<TLRPC.Chat> chatsMap = new SparseArray<>();
SparseArray<TLRPC.User> usersMap = new SparseArray<>();
for (int a = 0; a < res.chats.size(); a++) {
TLRPC.Chat chat = res.chats.get(a);
chatsMap.put(chat.id, chat);
}
for (int a = 0; a < res.users.size(); a++) {
TLRPC.User user = res.users.get(a);
usersMap.put(user.id, user);
}
for (int a = 0; a < res.messages.size(); a++) {
TLRPC.Message message = res.messages.get(a);
MessageObject messageObject = new MessageObject(currentAccount, message, usersMap, chatsMap, false, true);
messageObjects.add(messageObject);
messageObject.setQuery(query);
}
}
AndroidUtilities.runOnUIThread(() -> {
if (currentReqId == lastReqId && (searchId <= 0 || searchId == lastSearchId)) {
waitingResponseCount--;
if (error == null) {
currentMessagesQuery = query;
TLRPC.messages_Messages res = (TLRPC.messages_Messages) response;
MessagesStorage.getInstance(currentAccount).putUsersAndChats(res.users, res.chats, true, true);
MessagesController.getInstance(currentAccount).putUsers(res.users, false);
MessagesController.getInstance(currentAccount).putChats(res.chats, false);
if (req.offset_id == 0) {
searchResultMessages.clear();
}
nextSearchRate = res.next_rate;
for (int a = 0; a < res.messages.size(); a++) {
TLRPC.Message message = res.messages.get(a);
long did = MessageObject.getDialogId(message);
Integer maxId = MessagesController.getInstance(currentAccount).deletedHistory.get(did);
if (maxId != null && message.id <= maxId) {
continue;
}
searchResultMessages.add(messageObjects.get(a));
long dialog_id = MessageObject.getDialogId(message);
ConcurrentHashMap<Long, Integer> read_max = message.out ? MessagesController.getInstance(currentAccount).dialogs_read_outbox_max : MessagesController.getInstance(currentAccount).dialogs_read_inbox_max;
Integer value = read_max.get(dialog_id);
if (value == null) {
value = MessagesStorage.getInstance(currentAccount).getDialogReadMax(message.out, dialog_id);
read_max.put(dialog_id, value);
}
message.unread = value < message.id;
}
searchWas = true;
messagesSearchEndReached = res.messages.size() != 20;
if (searchId > 0) {
lastMessagesSearchId = searchId;
if (lastLocalSearchId != searchId) {
searchResult.clear();
}
if (lastGlobalSearchId != searchId) {
searchAdapterHelper.clear();
}
}
notifyDataSetChanged();
if (delegate != null) {
delegate.searchStateChanged(waitingResponseCount > 0, true);
delegate.runResultsEnterAnimation();
}
}
}
reqId = 0;
});
}, ConnectionsManager.RequestFlagFailOnServerErrors);
}
public boolean hasRecentSearch() {
return dialogsType != 2 && dialogsType != 4 && dialogsType != 5 && dialogsType != 6 && (!recentSearchObjects.isEmpty() || !MediaDataController.getInstance(currentAccount).hints.isEmpty());
}
public boolean isRecentSearchDisplayed() {
return needMessagesSearch != 2 && !searchWas && (!recentSearchObjects.isEmpty() || !MediaDataController.getInstance(currentAccount).hints.isEmpty()) && dialogsType != 2 && dialogsType != 4 && dialogsType != 5 && dialogsType != 6;
}
public void loadRecentSearch() {
MessagesStorage.getInstance(currentAccount).getStorageQueue().postRunnable(() -> {
try {
SQLiteCursor cursor = MessagesStorage.getInstance(currentAccount).getDatabase().queryFinalized("SELECT did, date FROM search_recent WHERE 1");
ArrayList<Integer> usersToLoad = new ArrayList<>();
ArrayList<Integer> chatsToLoad = new ArrayList<>();
ArrayList<Integer> encryptedToLoad = new ArrayList<>();
ArrayList<TLRPC.User> encUsers = new ArrayList<>();
final ArrayList<RecentSearchObject> arrayList = new ArrayList<>();
final LongSparseArray<RecentSearchObject> hashMap = new LongSparseArray<>();
while (cursor.next()) {
long did = cursor.longValue(0);
boolean add = false;
int lower_id = (int) did;
int high_id = (int) (did >> 32);
if (lower_id != 0) {
if (lower_id > 0) {
if (dialogsType != 2 && !usersToLoad.contains(lower_id)) {
usersToLoad.add(lower_id);
add = true;
}
} else {
if (!chatsToLoad.contains(-lower_id)) {
chatsToLoad.add(-lower_id);
add = true;
}
}
} else if (dialogsType == 0 || dialogsType == 3) {
if (!encryptedToLoad.contains(high_id)) {
encryptedToLoad.add(high_id);
add = true;
}
}
if (add) {
RecentSearchObject recentSearchObject = new RecentSearchObject();
recentSearchObject.did = did;
recentSearchObject.date = cursor.intValue(1);
arrayList.add(recentSearchObject);
hashMap.put(recentSearchObject.did, recentSearchObject);
}
}
cursor.dispose();
ArrayList<TLRPC.User> users = new ArrayList<>();
if (!encryptedToLoad.isEmpty()) {
ArrayList<TLRPC.EncryptedChat> encryptedChats = new ArrayList<>();
MessagesStorage.getInstance(currentAccount).getEncryptedChatsInternal(TextUtils.join(",", encryptedToLoad), encryptedChats, usersToLoad);
for (int a = 0; a < encryptedChats.size(); a++) {
hashMap.get((long) encryptedChats.get(a).id << 32).object = encryptedChats.get(a);
}
}
if (!chatsToLoad.isEmpty()) {
ArrayList<TLRPC.Chat> chats = new ArrayList<>();
MessagesStorage.getInstance(currentAccount).getChatsInternal(TextUtils.join(",", chatsToLoad), chats);
for (int a = 0; a < chats.size(); a++) {
TLRPC.Chat chat = chats.get(a);
long did = -chat.id;
if (chat.migrated_to != null) {
RecentSearchObject recentSearchObject = hashMap.get(did);
hashMap.remove(did);
if (recentSearchObject != null) {
arrayList.remove(recentSearchObject);
}
} else {
hashMap.get(did).object = chat;
}
}
}
if (!usersToLoad.isEmpty()) {
MessagesStorage.getInstance(currentAccount).getUsersInternal(TextUtils.join(",", usersToLoad), users);
for (int a = 0; a < users.size(); a++) {
TLRPC.User user = users.get(a);
RecentSearchObject recentSearchObject = hashMap.get(user.id);
if (recentSearchObject != null) {
recentSearchObject.object = user;
}
}
}
Collections.sort(arrayList, (lhs, rhs) -> {
if (lhs.date < rhs.date) {
return 1;
} else if (lhs.date > rhs.date) {
return -1;
} else {
return 0;
}
});
AndroidUtilities.runOnUIThread(() -> setRecentSearch(arrayList, hashMap));
} catch (Exception e) {
FileLog.e(e);
}
});
}
public void putRecentSearch(final long did, TLObject object) {
RecentSearchObject recentSearchObject = recentSearchObjectsById.get(did);
if (recentSearchObject == null) {
recentSearchObject = new RecentSearchObject();
recentSearchObjectsById.put(did, recentSearchObject);
} else {
recentSearchObjects.remove(recentSearchObject);
}
recentSearchObjects.add(0, recentSearchObject);
recentSearchObject.did = did;
recentSearchObject.object = object;
recentSearchObject.date = (int) (System.currentTimeMillis() / 1000);
notifyDataSetChanged();
MessagesStorage.getInstance(currentAccount).getStorageQueue().postRunnable(() -> {
try {
SQLitePreparedStatement state = MessagesStorage.getInstance(currentAccount).getDatabase().executeFast("REPLACE INTO search_recent VALUES(?, ?)");
state.requery();
state.bindLong(1, did);
state.bindInteger(2, (int) (System.currentTimeMillis() / 1000));
state.step();
state.dispose();
} catch (Exception e) {
FileLog.e(e);
}
});
}
public void clearRecentSearch() {
recentSearchObjectsById = new LongSparseArray<>();
recentSearchObjects = new ArrayList<>();
notifyDataSetChanged();
MessagesStorage.getInstance(currentAccount).getStorageQueue().postRunnable(() -> {
try {
MessagesStorage.getInstance(currentAccount).getDatabase().executeFast("DELETE FROM search_recent WHERE 1").stepThis().dispose();
} catch (Exception e) {
FileLog.e(e);
}
});
}
public void removeRecentSearch(long did) {
RecentSearchObject object = recentSearchObjectsById.get(did);
if (object == null) {
return;
}
recentSearchObjectsById.remove(did);
recentSearchObjects.remove(object);
notifyDataSetChanged();
MessagesStorage.getInstance(currentAccount).getStorageQueue().postRunnable(() -> {
try {
MessagesStorage.getInstance(currentAccount).getDatabase().executeFast("DELETE FROM search_recent WHERE did = " + did).stepThis().dispose();
} catch (Exception e) {
FileLog.e(e);
}
});
}
public void addHashtagsFromMessage(CharSequence message) {
searchAdapterHelper.addHashtagsFromMessage(message);
}
private void setRecentSearch(ArrayList<RecentSearchObject> arrayList, LongSparseArray<RecentSearchObject> hashMap) {
recentSearchObjects = arrayList;
recentSearchObjectsById = hashMap;
for (int a = 0; a < recentSearchObjects.size(); a++) {
RecentSearchObject recentSearchObject = recentSearchObjects.get(a);
if (recentSearchObject.object instanceof TLRPC.User) {
MessagesController.getInstance(currentAccount).putUser((TLRPC.User) recentSearchObject.object, true);
} else if (recentSearchObject.object instanceof TLRPC.Chat) {
MessagesController.getInstance(currentAccount).putChat((TLRPC.Chat) recentSearchObject.object, true);
} else if (recentSearchObject.object instanceof TLRPC.EncryptedChat) {
MessagesController.getInstance(currentAccount).putEncryptedChat((TLRPC.EncryptedChat) recentSearchObject.object, true);
}
}
notifyDataSetChanged();
}
private void searchDialogsInternal(final String query, final int searchId) {
if (needMessagesSearch == 2) {
return;
}
String q = query.trim().toLowerCase();
if (q.length() == 0) {
lastSearchId = 0;
updateSearchResults(new ArrayList<>(), new ArrayList<>(), new ArrayList<>(), lastSearchId);
return;
}
MessagesStorage.getInstance(currentAccount).getStorageQueue().postRunnable(() -> {
ArrayList<TLObject> resultArray = new ArrayList<>();
ArrayList<CharSequence> resultArrayNames = new ArrayList<>();
ArrayList<TLRPC.User> encUsers = new ArrayList<>();
MessagesStorage.getInstance(currentAccount).localSearch(dialogsType, q, resultArray, resultArrayNames, encUsers, -1);
updateSearchResults(resultArray, resultArrayNames, encUsers, searchId);
FiltersView.fillTipDates(q, localTipDates);
AndroidUtilities.runOnUIThread(() -> {
if (filtersDelegate != null) {
filtersDelegate.updateFiltersView(false, null, localTipDates);
}
});
});
}
private void updateSearchResults(final ArrayList<TLObject> result, final ArrayList<CharSequence> names, final ArrayList<TLRPC.User> encUsers, final int searchId) {
AndroidUtilities.runOnUIThread(() -> {
waitingResponseCount--;
if (searchId != lastSearchId) {
return;
}
lastLocalSearchId = searchId;
if (lastGlobalSearchId != searchId) {
searchAdapterHelper.clear();
}
if (lastMessagesSearchId != searchId) {
searchResultMessages.clear();
}
searchWas = true;
for (int a = 0; a < result.size(); a++) {
TLObject obj = result.get(a);
if (obj instanceof TLRPC.User) {
TLRPC.User user = (TLRPC.User) obj;
MessagesController.getInstance(currentAccount).putUser(user, true);
} else if (obj instanceof TLRPC.Chat) {
TLRPC.Chat chat = (TLRPC.Chat) obj;
MessagesController.getInstance(currentAccount).putChat(chat, true);
} else if (obj instanceof TLRPC.EncryptedChat) {
TLRPC.EncryptedChat chat = (TLRPC.EncryptedChat) obj;
MessagesController.getInstance(currentAccount).putEncryptedChat(chat, true);
}
}
MessagesController.getInstance(currentAccount).putUsers(encUsers, true);
searchResult = result;
searchResultNames = names;
searchAdapterHelper.mergeResults(searchResult);
notifyDataSetChanged();
if (delegate != null) {
delegate.searchStateChanged(waitingResponseCount > 0, true);
delegate.runResultsEnterAnimation();
}
});
}
public boolean isHashtagSearch() {
return !searchResultHashtags.isEmpty();
}
public void clearRecentHashtags() {
searchAdapterHelper.clearRecentHashtags();
searchResultHashtags.clear();
notifyDataSetChanged();
}
int waitingResponseCount;
public void searchDialogs(String text) {
if (text != null && text.equals(lastSearchText)) {
return;
}
lastSearchText = text;
if (searchRunnable != null) {
Utilities.searchQueue.cancelRunnable(searchRunnable);
searchRunnable = null;
}
if (searchRunnable2 != null) {
AndroidUtilities.cancelRunOnUIThread(searchRunnable2);
searchRunnable2 = null;
}
String query;
if (text != null) {
query = text.trim();
} else {
query = null;
}
if (TextUtils.isEmpty(query)) {
searchAdapterHelper.unloadRecentHashtags();
searchResult.clear();
searchResultNames.clear();
searchResultHashtags.clear();
searchAdapterHelper.mergeResults(null);
searchAdapterHelper.queryServerSearch(null, true, true, true, true, dialogsType == 2, 0, dialogsType == 0, 0, 0);
searchWas = false;
lastSearchId = 0;
waitingResponseCount = 0;
if (delegate != null) {
delegate.searchStateChanged(false, true);
}
searchMessagesInternal(null, 0);
notifyDataSetChanged();
localTipDates.clear();
if (filtersDelegate != null) {
filtersDelegate.updateFiltersView(false, null, localTipDates);
}
} else {
if (needMessagesSearch != 2 && (query.startsWith("#") && query.length() == 1)) {
messagesSearchEndReached = true;
if (searchAdapterHelper.loadRecentHashtags()) {
searchResultMessages.clear();
searchResultHashtags.clear();
ArrayList<SearchAdapterHelper.HashtagObject> hashtags = searchAdapterHelper.getHashtags();
for (int a = 0; a < hashtags.size(); a++) {
searchResultHashtags.add(hashtags.get(a).hashtag);
}
waitingResponseCount = 0;
notifyDataSetChanged();
if (delegate != null) {
delegate.searchStateChanged(false, false);
}
}
} else {
searchResultHashtags.clear();
}
final int searchId = ++lastSearchId;
waitingResponseCount = 3;
notifyDataSetChanged();
if (delegate != null) {
delegate.searchStateChanged(true, false);
}
Utilities.searchQueue.postRunnable(searchRunnable = () -> {
searchRunnable = null;
searchDialogsInternal(query, searchId);
AndroidUtilities.runOnUIThread(searchRunnable2 = () -> {
searchRunnable2 = null;
if (searchId != lastSearchId) {
return;
}
if (needMessagesSearch != 2) {
searchAdapterHelper.queryServerSearch(query, true, dialogsType != 4, true, dialogsType != 4, dialogsType == 2, 0, dialogsType == 0, 0, searchId);
} else {
waitingResponseCount -= 2;
}
if (needMessagesSearch == 0) {
waitingResponseCount--;
} else {
searchMessagesInternal(text, searchId);
}
});
}, 300);
}
}
@Override
public int getItemCount() {
if (waitingResponseCount == 3) {
return 0;
}
if (isRecentSearchDisplayed()) {
return (!recentSearchObjects.isEmpty() ? recentSearchObjects.size() + 1 : 0) + (!MediaDataController.getInstance(currentAccount).hints.isEmpty() ? 1 : 0);
}
int count = 0;
if (!searchResultHashtags.isEmpty()) {
count += searchResultHashtags.size() + 1;
return count;
}
count += searchResult.size();
int localServerCount = searchAdapterHelper.getLocalServerSearch().size();
int globalCount = searchAdapterHelper.getGlobalSearch().size();
int phoneCount = searchAdapterHelper.getPhoneSearch().size();
int messagesCount = searchResultMessages.size();
count += localServerCount;
if (globalCount != 0) {
count += globalCount + 1;
}
if (phoneCount != 0) {
count += phoneCount;
}
if (messagesCount != 0) {
count += messagesCount + 1 + (messagesSearchEndReached ? 0 : 1);
}
return count;
}
public Object getItem(int i) {
if (isRecentSearchDisplayed()) {
int offset = (!MediaDataController.getInstance(currentAccount).hints.isEmpty() ? 1 : 0);
if (i > offset && i - 1 - offset < recentSearchObjects.size()) {
TLObject object = recentSearchObjects.get(i - 1 - offset).object;
if (object instanceof TLRPC.User) {
TLRPC.User user = MessagesController.getInstance(currentAccount).getUser(((TLRPC.User) object).id);
if (user != null) {
object = user;
}
} else if (object instanceof TLRPC.Chat) {
TLRPC.Chat chat = MessagesController.getInstance(currentAccount).getChat(((TLRPC.Chat) object).id);
if (chat != null) {
object = chat;
}
}
return object;
} else {
return null;
}
}
if (!searchResultHashtags.isEmpty()) {
if (i > 0) {
return searchResultHashtags.get(i - 1);
} else {
return null;
}
}
ArrayList<TLObject> globalSearch = searchAdapterHelper.getGlobalSearch();
ArrayList<TLObject> localServerSearch = searchAdapterHelper.getLocalServerSearch();
ArrayList<Object> phoneSearch = searchAdapterHelper.getPhoneSearch();
int localCount = searchResult.size();
int localServerCount = localServerSearch.size();
int phoneCount = phoneSearch.size();
int globalCount = globalSearch.isEmpty() ? 0 : globalSearch.size() + 1;
int messagesCount = searchResultMessages.isEmpty() ? 0 : searchResultMessages.size() + 1;
if (i >= 0 && i < localCount) {
return searchResult.get(i);
} else {
i -= localCount;
if (i >= 0 && i < localServerCount) {
return localServerSearch.get(i);
} else {
i -= localServerCount;
if (i >= 0 && i < phoneCount) {
return phoneSearch.get(i);
} else {
i -= phoneCount;
if (i > 0 && i < globalCount) {
return globalSearch.get(i - 1);
} else {
i -= globalCount;
if (i > 0 && i < messagesCount) {
return searchResultMessages.get(i - 1);
}
}
}
}
}
return null;
}
public boolean isGlobalSearch(int i) {
if (isRecentSearchDisplayed()) {
return false;
}
if (!searchResultHashtags.isEmpty()) {
return false;
}
ArrayList<TLObject> globalSearch = searchAdapterHelper.getGlobalSearch();
ArrayList<TLObject> localServerSearch = searchAdapterHelper.getLocalServerSearch();
int localCount = searchResult.size();
int localServerCount = localServerSearch.size();
int phoneCount = searchAdapterHelper.getPhoneSearch().size();
int globalCount = globalSearch.isEmpty() ? 0 : globalSearch.size() + 1;
int messagesCount = searchResultMessages.isEmpty() ? 0 : searchResultMessages.size() + 1;
if (i >= 0 && i < localCount) {
return false;
} else {
i -= localCount;
if (i >= 0 && i < localServerCount) {
return false;
} else {
i -= localServerCount;
if (i > 0 && i < phoneCount) {
return false;
} else {
i -= phoneCount;
if (i > 0 && i < globalCount) {
return true;
} else {
i -= globalCount;
if (i > 0 && i < messagesCount) {
return false;
}
}
}
}
}
return false;
}
@Override
public long getItemId(int i) {
return i;
}
@Override
public boolean isEnabled(RecyclerView.ViewHolder holder) {
int type = holder.getItemViewType();
return type != 1 && type != 3;
}
@Override
public RecyclerView.ViewHolder onCreateViewHolder(ViewGroup parent, int viewType) {
View view = null;
switch (viewType) {
case 0:
view = new ProfileSearchCell(mContext);
break;
case 1:
view = new GraySectionCell(mContext);
break;
case 2:
view = new DialogCell(mContext, false, true);
break;
case 3:
FlickerLoadingView flickerLoadingView = new FlickerLoadingView(mContext);
flickerLoadingView.setViewType(FlickerLoadingView.DIALOG_TYPE);
flickerLoadingView.setIsSingleCell(true);
view = flickerLoadingView;
break;
case 4:
view = new HashtagSearchCell(mContext);
break;
case 5:
RecyclerListView horizontalListView = new RecyclerListView(mContext) {
@Override
public boolean onInterceptTouchEvent(MotionEvent e) {
if (getParent() != null && getParent().getParent() != null) {
getParent().getParent().requestDisallowInterceptTouchEvent(canScrollHorizontally(-1) || canScrollHorizontally(1));
}
return super.onInterceptTouchEvent(e);
}
};
horizontalListView.setTag(9);
horizontalListView.setItemAnimator(null);
horizontalListView.setLayoutAnimation(null);
LinearLayoutManager layoutManager = new LinearLayoutManager(mContext) {
@Override
public boolean supportsPredictiveItemAnimations() {
return false;
}
};
layoutManager.setOrientation(LinearLayoutManager.HORIZONTAL);
horizontalListView.setLayoutManager(layoutManager);
//horizontalListView.setDisallowInterceptTouchEvents(true);
horizontalListView.setAdapter(new CategoryAdapterRecycler());
horizontalListView.setOnItemClickListener((view1, position) -> {
if (delegate != null) {
delegate.didPressedOnSubDialog((Integer) view1.getTag());
}
});
horizontalListView.setOnItemLongClickListener((view12, position) -> {
if (delegate != null) {
delegate.needRemoveHint((Integer) view12.getTag());
}
return true;
});
view = horizontalListView;
innerListView = horizontalListView;
break;
case 6:
view = new TextCell(mContext, 16, false);
break;
}
if (viewType == 5) {
view.setLayoutParams(new RecyclerView.LayoutParams(RecyclerView.LayoutParams.MATCH_PARENT, AndroidUtilities.dp(86)));
} else {
view.setLayoutParams(new RecyclerView.LayoutParams(RecyclerView.LayoutParams.MATCH_PARENT, RecyclerView.LayoutParams.WRAP_CONTENT));
}
return new RecyclerListView.Holder(view);
}
@Override
public void onBindViewHolder(RecyclerView.ViewHolder holder, int position) {
switch (holder.getItemViewType()) {
case 0: {
ProfileSearchCell cell = (ProfileSearchCell) holder.itemView;
TLRPC.User user = null;
TLRPC.Chat chat = null;
TLRPC.EncryptedChat encryptedChat = null;
CharSequence username = null;
CharSequence name = null;
boolean isRecent = false;
String un = null;
Object obj = getItem(position);
if (obj instanceof TLRPC.User) {
user = (TLRPC.User) obj;
un = user.username;
} else if (obj instanceof TLRPC.Chat) {
chat = MessagesController.getInstance(currentAccount).getChat(((TLRPC.Chat) obj).id);
if (chat == null) {
chat = (TLRPC.Chat) obj;
}
un = chat.username;
} else if (obj instanceof TLRPC.EncryptedChat) {
encryptedChat = MessagesController.getInstance(currentAccount).getEncryptedChat(((TLRPC.EncryptedChat) obj).id);
user = MessagesController.getInstance(currentAccount).getUser(encryptedChat.user_id);
}
if (isRecentSearchDisplayed()) {
isRecent = true;
cell.useSeparator = position != getItemCount() - 1;
} else {
ArrayList<TLObject> globalSearch = searchAdapterHelper.getGlobalSearch();
ArrayList<Object> phoneSearch = searchAdapterHelper.getPhoneSearch();
int localCount = searchResult.size();
int localServerCount = searchAdapterHelper.getLocalServerSearch().size();
int phoneCount = phoneSearch.size();
int phoneCount2 = phoneCount;
if (phoneCount > 0 && phoneSearch.get(phoneCount - 1) instanceof String) {
phoneCount2 -= 2;
}
int globalCount = globalSearch.isEmpty() ? 0 : globalSearch.size() + 1;
cell.useSeparator = (position != getItemCount() - 1 && position != localCount + phoneCount2 + localServerCount - 1 && position != localCount + globalCount + phoneCount + localServerCount - 1);
if (position < searchResult.size()) {
name = searchResultNames.get(position);
if (name != null && user != null && user.username != null && user.username.length() > 0) {
if (name.toString().startsWith("@" + user.username)) {
username = name;
name = null;
}
}
} else {
String foundUserName = searchAdapterHelper.getLastFoundUsername();
if (!TextUtils.isEmpty(foundUserName)) {
String nameSearch = null;
String nameSearchLower = null;
int index;
if (user != null) {
nameSearch = ContactsController.formatName(user.first_name, user.last_name);
} else if (chat != null) {
nameSearch = chat.title;
}
if (nameSearch != null && (index = AndroidUtilities.indexOfIgnoreCase(nameSearch, foundUserName)) != -1) {
SpannableStringBuilder spannableStringBuilder = new SpannableStringBuilder(nameSearch);
spannableStringBuilder.setSpan(new ForegroundColorSpanThemable(Theme.key_windowBackgroundWhiteBlueText4), index, index + foundUserName.length(), Spanned.SPAN_EXCLUSIVE_EXCLUSIVE);
name = spannableStringBuilder;
} else if (un != null) {
if (foundUserName.startsWith("@")) {
foundUserName = foundUserName.substring(1);
}
try {
SpannableStringBuilder spannableStringBuilder = new SpannableStringBuilder();
spannableStringBuilder.append("@");
spannableStringBuilder.append(un);
if ((index = AndroidUtilities.indexOfIgnoreCase(un, foundUserName)) != -1) {
int len = foundUserName.length();
if (index == 0) {
len++;
} else {
index++;
}
spannableStringBuilder.setSpan(new ForegroundColorSpanThemable(Theme.key_windowBackgroundWhiteBlueText4), index, index + len, Spanned.SPAN_EXCLUSIVE_EXCLUSIVE);
}
username = spannableStringBuilder;
} catch (Exception e) {
username = un;
FileLog.e(e);
}
}
}
}
}
boolean savedMessages = false;
if (user != null && user.id == selfUserId) {
name = LocaleController.getString("SavedMessages", R.string.SavedMessages);
username = null;
savedMessages = true;
}
if (chat != null && chat.participants_count != 0) {
String membersString;
if (ChatObject.isChannel(chat) && !chat.megagroup) {
membersString = LocaleController.formatPluralString("Subscribers", chat.participants_count);
} else {
membersString = LocaleController.formatPluralString("Members", chat.participants_count);
}
if (username instanceof SpannableStringBuilder) {
((SpannableStringBuilder) username).append(", ").append(membersString);
} else if (!TextUtils.isEmpty(username)) {
username = TextUtils.concat(username, ", ", membersString);
} else {
username = membersString;
}
}
cell.setData(user != null ? user : chat, encryptedChat, name, username, isRecent, savedMessages);
break;
}
case 1: {
GraySectionCell cell = (GraySectionCell) holder.itemView;
if (isRecentSearchDisplayed()) {
int offset = (!MediaDataController.getInstance(currentAccount).hints.isEmpty() ? 1 : 0);
if (position < offset) {
cell.setText(LocaleController.getString("ChatHints", R.string.ChatHints));
} else {
cell.setText(LocaleController.getString("Recent", R.string.Recent), LocaleController.getString("ClearButton", R.string.ClearButton), v -> {
if (delegate != null) {
delegate.needClearList();
}
});
}
} else if (!searchResultHashtags.isEmpty()) {
cell.setText(LocaleController.getString("Hashtags", R.string.Hashtags), LocaleController.getString("ClearButton", R.string.ClearButton), v -> {
if (delegate != null) {
delegate.needClearList();
}
});
} else {
ArrayList<TLObject> globalSearch = searchAdapterHelper.getGlobalSearch();
int localCount = searchResult.size();
int localServerCount = searchAdapterHelper.getLocalServerSearch().size();
int phoneCount = searchAdapterHelper.getPhoneSearch().size();
int globalCount = globalSearch.isEmpty() ? 0 : globalSearch.size() + 1;
int messagesCount = searchResultMessages.isEmpty() ? 0 : searchResultMessages.size() + 1;
position -= localCount + localServerCount;
if (position >= 0 && position < phoneCount) {
cell.setText(LocaleController.getString("PhoneNumberSearch", R.string.PhoneNumberSearch));
} else {
position -= phoneCount;
if (position >= 0 && position < globalCount) {
cell.setText(LocaleController.getString("GlobalSearch", R.string.GlobalSearch));
} else {
cell.setText(LocaleController.getString("SearchMessages", R.string.SearchMessages));
}
}
}
break;
}
case 2: {
DialogCell cell = (DialogCell) holder.itemView;
cell.useSeparator = (position != getItemCount() - 1);
MessageObject messageObject = (MessageObject) getItem(position);
cell.setDialog(messageObject.getDialogId(), messageObject, messageObject.messageOwner.date, false);
break;
}
case 4: {
HashtagSearchCell cell = (HashtagSearchCell) holder.itemView;
cell.setText(searchResultHashtags.get(position - 1));
cell.setNeedDivider(position != searchResultHashtags.size());
break;
}
case 5: {
RecyclerListView recyclerListView = (RecyclerListView) holder.itemView;
((CategoryAdapterRecycler) recyclerListView.getAdapter()).setIndex(position / 2);
break;
}
case 6: {
String str = (String) getItem(position);
TextCell cell = (TextCell) holder.itemView;
cell.setColors(null, Theme.key_windowBackgroundWhiteBlueText2);
cell.setText(LocaleController.formatString("AddContactByPhone", R.string.AddContactByPhone, PhoneFormat.getInstance().format("+" + str)), false);
break;
}
}
}
@Override
public int getItemViewType(int i) {
if (isRecentSearchDisplayed()) {
int offset = (!MediaDataController.getInstance(currentAccount).hints.isEmpty() ? 1 : 0);
if (i < offset) {
return 5;
}
if (i == offset) {
return 1;
}
return 0;
}
if (!searchResultHashtags.isEmpty()) {
return i == 0 ? 1 : 4;
}
ArrayList<TLObject> globalSearch = searchAdapterHelper.getGlobalSearch();
int localCount = searchResult.size();
int localServerCount = searchAdapterHelper.getLocalServerSearch().size();
int phoneCount = searchAdapterHelper.getPhoneSearch().size();
int globalCount = globalSearch.isEmpty() ? 0 : globalSearch.size() + 1;
int messagesCount = searchResultMessages.isEmpty() ? 0 : searchResultMessages.size() + 1;
if (i >= 0 && i < localCount) {
return 0;
} else {
i -= localCount;
if (i >= 0 && i < localServerCount) {
return 0;
} else {
i -= localServerCount;
if (i >= 0 && i < phoneCount) {
Object object = getItem(i);
if (object instanceof String) {
String str = (String) object;
if ("section".equals(str)) {
return 1;
} else {
return 6;
}
}
return 0;
} else {
i -= phoneCount;
if (i >= 0 && i < globalCount) {
if (i == 0) {
return 1;
} else {
return 0;
}
} else {
i -= globalCount;
if (i >= 0 && i < messagesCount) {
if (i == 0) {
return 1;
} else {
return 2;
}
}
}
}
}
}
return 3;
}
public void setFiltersDelegate(FilteredSearchView.Delegate filtersDelegate, boolean update) {
this.filtersDelegate = filtersDelegate;
if (filtersDelegate != null && update) {
filtersDelegate.updateFiltersView(false, null, localTipDates);
}
}
}<|fim▁end|> | |
<|file_name|>views.py<|end_file_name|><|fim▁begin|>from django.shortcuts import render, render_to_response
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.views.generic import ListView
from bookrental.forms import UserCreateForm
from bookrental.models import Book
from bookrental.tables import BookTable
from bookrental.models import Cart
from bookrental.tables import CartTable
from bookrental.models import Prices
from bookrental.tables import PriceTable
from django_tables2 import RequestConfig
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.db.models import F
from django.db.models import Q
from bookrental.models import Returns
from bookrental.tables import ReturnTable
# Create your views here.
def book(request):
c = {}
c.update(csrf(request))
# select all the books with the user's current category selected
select_books_from = request.POST.get('books')
table = BookTable(Book.objects.filter(category=request.POST.get('books'))) # request.session['category']))
RequestConfig(request).configure(table)
if request.method == "GET":
#pks = request.POST.getlist("selection")
pks = request.GET.getlist("selection")
selected_books = Book.objects.filter(pk__in=pks)
# put selected books in cart
# TODO: Doesn't work; not saving to the cart table!!!
#for p in pks:
kcart = Cart(isbn='978-123456', quantity=1, price=0)
#for p in Prices.objects.all():
# if b.isbn == p.isbn:
# kcart.price = p.price<|fim▁hole|> # break
kcart.save()
#table = CartTable(Cart.objects.all())))))
#RequestConfig(request).configure(table)
# pass these books to cart page
return HttpResponseRedirect(reverse('cart'))#, c, {'table': table})
return render(request, 'bookrental/Books.html', {'table': table, 'select_books_from': select_books_from})
def checkout(request):
# displays a successful checkout page
return render_to_response('bookrental/Checkout.html')
def info(request):
return render_to_response('bookrental/InfoPage.html')
def login_page(request):
c = {}
c.update(csrf(request))
if request.method == 'POST':
# if the login button was clicked, authenticate the given user/pass combo
username1 = request.POST.get('username')
password1 = request.POST.get('password')
user = authenticate(username=username1, password=password1)
if user is not None:
login(request, user)
# update session
request.session['username'] = username1
# good login, so go to warning page
return HttpResponseRedirect('warning/')
else:
# bad login, so go to failure
return HttpResponseRedirect('login_failure/')
return render_to_response('bookrental/Login.html', c)
def return_confirm(request):
# display a return confirmation page
return render_to_response('bookrental/ReturnConfirm.html')
def returns(request):
c = {}
c.update(csrf(request))
# Create a table of all returnable objects
table = ReturnTable(Returns.objects.all())
RequestConfig(request).configure(table)
if request.method == "POST":
# get list of returning books, delete from total returns
pks = request.POST.getlist("returning")
returned_books = Returns.objects.filter(~Q(pk__in=pks))
# pass these books to return confirmation page as table
table = ReturnTable(returned_books)
RequestConfig(request).configure(table)
return render(request, 'bookrental/ReturnConfirm.html', {'table': table})
return render(request, 'bookrental/Returns.html', {'table': table})
def warning(request):
# displays the disclaimer page
return render_to_response('bookrental/Warning.html')
def cart(request):
c = {}
c.update(csrf(request))
pks = request.GET.getlist("selection")
# get new books to add, join with price table
new_cart = Cart.objects.all()
for c in new_cart:
for p in pks:
# if a cart item is not selected, delete it
if c.isbn != p:
c.delete()
table = CartTable(new_cart)
RequestConfig(request).configure(table)
if request.method == "POST":
pks = request.POST.getlist("removed")
# add all books NOT in removed
removed_books = Cart.objects.filter(~Q(pk__in=pks))
#pass these books to cart page as table
table = CartTable(removed_books)
RequestConfig(request).configure(table)
# display updated table on same page
return render(request, 'bookrental/YourCart.html', {'table': table})
return render(request, 'bookrental/YourCart.html', {'table': table})
def category(request):
c = {}
c.update(csrf(request))
# all available categories for books
categories = {"programming_languages", "software_engineering", "computer_networking", "operating_systems", "database_systems", "computer_organization"}
if request.method == 'POST':
# if the button was pressed, pass the selected category to the books page
select_books_from = request.POST.get('books')
request.session['category'] = select_books_from
return HttpResponseRedirect(reverse('book'), c, {'select_books_from': select_books_from})
return render_to_response('bookrental/category.html', c, context_instance=RequestContext(request))
def login_failure(request):
c = {}
c.update(csrf(request))
if request.method == 'POST':
# if the button was clicked, authenticate user and pass in auth_user table
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user is not None:
# if the user/pass pair is good, login and redirect to warning page
login(request, user)
# update session
request.session['username'] = username
return HttpResponseRedirect(reverse('warning'))
return render_to_response('bookrental/login_failure.html', c)
def logout_page(request):
# clear out their cart
for c in Cart.objects.all():
c.delete()
# logout the user
logout(request)
# go back to the login page
return render(request, 'bookrental/Login.html')
# Register a new user with a custom form, log them in, and redirect to the Warning page.
def new_user(request):
if request.method == 'POST':
# when they hit submit, check if their form is correct
user_form = UserCreateForm(request.POST)
if user_form.is_valid():
username1 = user_form.clean_username()
password = user_form.clean_password2()
user_form.save()
user = authenticate(username=username1, password=password)
login(request, user)
# update current session
request.session['username'] = username1
return HttpResponseRedirect(reverse('warning'))
user_form = UserCreateForm()
return render(request, 'bookrental/new_user.html', {'user_form': user_form})
def update_user(request):
if request.method == 'POST':
# if they hit submit, get their user and pass
username = request.session['username'] # request.user
password = request.POST.get('password')
# Current password is correct, so can set new password
if authenticate(username=username, passoword=password) is not None:
request.user.set_password(request.POST.get('new_password'))
request.user.email = request.POST.get('email')
# go to category page
return HttpResponseRedirect(reverse('category'))
return render_to_response('bookrental/update_user.html')
################################################<|fim▁end|> | |
<|file_name|>mutex.rs<|end_file_name|><|fim▁begin|>use alloc::boxed::Box;
use core::borrow::{Borrow, BorrowMut};
use core::ops::{Deref, DerefMut};
use core::cell::{Cell, RefCell, RefMut};
use crate::syscall;
use core::marker::Sync;
#[link(name="os_init", kind="static")]
extern "C" {
fn mutex_lock(lock: *mut i32) -> i32;
}
pub enum TryLockResult {
AlreadyLocked,
Poisoned,
}
pub struct Mutex<T: ?Sized> {
data: Box<RefCell<T>>,
lock: Cell<i32>,
}
pub struct LockedResource<'a, T: 'a + ?Sized> {
mutex: &'a Mutex<T>,
data_ref: RefMut<'a, T>
}
impl<T> Mutex<T> {
pub fn new(t: T) -> Mutex<T> {
Self {
data: Box::new(RefCell::new(t)),
lock: Cell::new(0),
}
}
}
<|fim▁hole|> pub fn lock(&self) -> LockedResource<'_, T> {
unsafe {
while mutex_lock(self.lock.as_ptr()) != 0 {
syscall::sys_sleep(0);
}
}
LockedResource {
mutex: &self,
data_ref: (*self.data).borrow_mut(),
}
}
pub fn try_lock(&self) -> Result<LockedResource<'_, T>, TryLockResult> {
unsafe {
if mutex_lock(self.lock.as_ptr()) != 0 {
return Err(TryLockResult::AlreadyLocked)
}
}
Ok(LockedResource {
mutex: &self,
data_ref: (*self.data).borrow_mut(),
})
}
}
impl<'a, T: ?Sized> Deref for LockedResource<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.data_ref.borrow()
}
}
impl<'a, T: ?Sized> DerefMut for LockedResource<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.data_ref.borrow_mut()
}
}
impl<'a, T: ?Sized> Drop for LockedResource<'a, T> {
fn drop(&mut self) {
self.mutex.lock.set(0);
}
}<|fim▁end|> | impl<T: ?Sized> Mutex<T> {
|
<|file_name|>test_parse_specific_case4.py<|end_file_name|><|fim▁begin|>import unittest, random, sys, time, os
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i
import codecs, unicodedata
print "create some specific small datasets with exp row/col combinations"
print "This injects the full set of single byte UTF8 in a col, except for some special h2o chars"
toDoList = range(0x00, 0x100)
def removeIfThere(d):
if d in toDoList:
toDoList.remove(d)
H2O_COL_SEPARATOR = 0x2c # comma
# H2O_COL_SEPARATOR = 0x1 # hive separator
# removeIfThere(0x1) # hive separator okay if we force comma below
removeIfThere(0x0) # nul. known issue
removeIfThere(0xa) # LF. causes EOL
removeIfThere(0xd) # CR. causes EOL
removeIfThere(0x22) # double quote. known issue
removeIfThere(0x2c) # comma. don't mess up my expected col count
tryList = []
for unicodeNum in toDoList:
unicodeSymbol = unichr(unicodeNum)
tryList.append(
((
'a,b,c,d' + unicodeSymbol + 's,n\n'
'a,b,c,d' + unicodeSymbol + 's,n\n'
'a,b,c,d' + unicodeSymbol + 's,n\n'
'a,b,c,d' + unicodeSymbol + 's,n\n'
'a,b,c,d' + unicodeSymbol + 's,n\n'
'a,b,c,d' + unicodeSymbol + 's,n\n'
'a,b,c,d' + unicodeSymbol + 's,n\n'
'a,b,c,d' + unicodeSymbol + 's,n\n'
'a,b,c,d' + unicodeSymbol + 's,n\n'
'a,b,c,d' + unicodeSymbol + 's,n\n'
), 10, 5, [0,0,0,0,0], ['Enum', 'Enum', 'Enum', 'Enum', 'Enum'], unicodeNum)
)
def write_syn_dataset(csvPathname, dataset):
dsf = codecs.open(csvPathname, encoding='utf-8', mode='w+')
encoded = dataset.encode('utf-8')
print "utf8:" , repr(encoded), type(encoded)
print "str or utf8:" , repr(dataset), type(dataset)
dsf.write(dataset)
dsf.close()
class Basic(unittest.TestCase):<|fim▁hole|> def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(java_heap_GB=1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_parse_specific_case4(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
hex_key = "a.hex"
for (dataset, expNumRows, expNumCols, expNaCnt, expType, unicodeNum) in tryList:
csvFilename = 'specific_' + str(expNumRows) + str(expNumCols) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
write_syn_dataset(csvPathname, dataset)
parseResult = h2i.import_parse(path=csvPathname, schema='put', header=0,
# force column separator
hex_key=hex_key, timeoutSecs=10, doSummary=False, separator=H2O_COL_SEPARATOR)
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], timeoutSecs=60)
print "Parsed with special unichr(%s):" % unicodeNum
# print "inspect:", h2o.dump_json(inspect)
numRows = inspect['numRows']
self.assertEqual(numRows, expNumRows, msg='Using unichr(0x%x) Wrong numRows: %s Expected: %s' % \
(unicodeNum, numRows, expNumRows))
numCols = inspect['numCols']
self.assertEqual(numCols, expNumCols, msg='Using unichr(0x%x) Wrong numCols: %s Expected: %s' % \
(unicodeNum, numCols, expNumCols))
# this is required for the test setup
assert(len(expNaCnt)>=expNumCols)
assert(len(expType)>=expNumCols)
for k in range(expNumCols):
naCnt = inspect['cols'][k]['naCnt']
self.assertEqual(expNaCnt[k], naCnt, msg='Using unichr(0x%x) col: %s naCnt: %d should be: %s' % \
(unicodeNum, k, naCnt, expNaCnt[k]))
stype = inspect['cols'][k]['type']
self.assertEqual(expType[k], stype, msg='Using unichr(0x%x) col: %s type: %s should be: %s' % \
(unicodeNum, k, stype, expType[k]))
if __name__ == '__main__':
h2o.unit_main()<|fim▁end|> | |
<|file_name|>ImageSpan.js<|end_file_name|><|fim▁begin|>'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var _css = require('antd/lib/message/style/css');
var _message = require('antd/lib/message');
var _message2 = _interopRequireDefault(_message);
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
var _react = require('react');
var _react2 = _interopRequireDefault(_react);
var _reactDom = require('react-dom');
var _reactDom2 = _interopRequireDefault(_reactDom);
var _draftJsWhkfzyx = require('draft-js-whkfzyx');
var _decoratorStyle = require('./decoratorStyle.css');
var _decoratorStyle2 = _interopRequireDefault(_decoratorStyle);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var ImageSpan = function (_Component) {
_inherits(ImageSpan, _Component);
function ImageSpan(props) {
_classCallCheck(this, ImageSpan);
var _this = _possibleConstructorReturn(this, (ImageSpan.__proto__ || Object.getPrototypeOf(ImageSpan)).call(this, props));
var entity = _draftJsWhkfzyx.Entity.get(_this.props.entityKey);
var _entity$getData = entity.getData(),
width = _entity$getData.width,
height = _entity$getData.height;
_this.state = {
width: width,
height: height,
imageSrc: ''
};
_this.onImageClick = _this._onImageClick.bind(_this);
_this.onDoubleClick = _this._onDoubleClick.bind(_this);
return _this;
}
_createClass(ImageSpan, [{
key: 'componentDidMount',
value: function componentDidMount() {
var _this2 = this;
var _state = this.state,
width = _state.width,
height = _state.height;
var entity = _draftJsWhkfzyx.Entity.get(this.props.entityKey);
var image = new Image();
var _entity$getData2 = entity.getData(),
src = _entity$getData2.src;
src = src.replace(/[?#&].*$/g, "");
this.setState({ imageSrc: src });
image.src = this.state.imageSrc;
image.onload = function () {
if (width == null || height == null) {
_this2.setState({ width: image.width, height: image.height });
_draftJsWhkfzyx.Entity.mergeData(_this2.props.entityKey, {
width: image.width,
height: image.height,
originalWidth: image.width,
originalHeight: image.height
});
}
};
}
}, {
key: 'render',
value: function render() {
var _this3 = this;
var _state2 = this.state,
width = _state2.width,
height = _state2.height;
var key = this.props.entityKey;
var entity = _draftJsWhkfzyx.Entity.get(key);
var _entity$getData3 = entity.getData(),
src = _entity$getData3.src;
var imageStyle = {
verticalAlign: 'bottom',
backgroundImage: 'url("' + this.state.imageSrc + '")',
backgroundSize: width + 'px ' + height + 'px',
lineHeight: height + 'px',
fontSize: height + 'px',
width: width,
height: height,
letterSpacing: width
};
return _react2.default.createElement(
'div',
{ className: 'editor-inline-image', onClick: this._onClick },
_react2.default.createElement('img', { src: '' + this.state.imageSrc, className: 'media-image', onClick: function onClick(event) {
_this3.onImageClick(event, key);event.stopPropagation();
}, onDoubleClick: this.onDoubleClick })
);
}
}, {
key: '_onDoubleClick',
value: function _onDoubleClick() {
var currentPicture = _reactDom2.default.findDOMNode(this).querySelector("img");
var pictureWidth = currentPicture.naturalWidth;
var pictureSrc = currentPicture.src;
}
}, {
key: '_onImageClick',
value: function _onImageClick(e, key) {
var currentPicture = _reactDom2.default.findDOMNode(this).querySelector("img");
var pictureWidth = currentPicture.naturalWidth;<|fim▁hole|> var selection = editorState.getSelection();
var blockTree = editorState.getBlockTree(this.props.children[0].key);
if (pictureWidth == 0) {
_message2.default.error("图片地址错误!");
} else if (pictureWidth > 650) {
_message2.default.error("图片尺寸过大将会导致用户流量浪费!请调整至最大650px。", 10);
}
}
}, {
key: '_handleResize',
value: function _handleResize(event, data) {
var _data$size = data.size,
width = _data$size.width,
height = _data$size.height;
this.setState({ width: width, height: height });
_draftJsWhkfzyx.Entity.mergeData(this.props.entityKey, { width: width, height: height });
}
}]);
return ImageSpan;
}(_react.Component);
exports.default = ImageSpan;
ImageSpan.defaultProps = {
children: null,
entityKey: "",
className: ""
};<|fim▁end|> |
var editorState = _draftJsWhkfzyx.EditorState.createEmpty(); |
<|file_name|>merge.py<|end_file_name|><|fim▁begin|>from . elasticfactor import ElasticFactor
from ... environment import cfg
from elasticsearch import Elasticsearch
def run(node):
id_a, id_b = node.get('id_a', '63166071_1'), node.get('id_b', '63166071_2')
es = Elasticsearch()
data_a = es.get(index="factor_state2016", doc_type='factor_network', id=id_a)
data_b = es.get(index="factor_state2016", doc_type='factor_network', id=id_b)<|fim▁hole|><|fim▁end|> | constructor = ElasticFactor(cfg["cdr_elastic_search"]["hosts"] + cfg["cdr_elastic_search"]["index"])
merged = constructor.merge(data_a["_source"], data_b["_source"])
return merged |
<|file_name|>wasp94_ut140801.py<|end_file_name|><|fim▁begin|>from wasp94_base import *
# create a night to analyze
from mosasaurus.Night import Night
n = Night('ut140801', instrument=i)
n.createNightlyLog(remake=False)
# create an observation
from mosasaurus.Observation import Observation
o = Observation(t, i, n)
o.setupFilePrefixes(science=['WASP-94'], reference=['WASP-94'], flat=['flat'])
# create a reducer to analyze this observation
from mosasaurus.Reducer import Reducer
r = Reducer(o, visualize=False)
r.reduce()
from mosasaurus.Cube import Cube
c = Cube(o, width=16)
c.populate(shift=False, max=None)
c.setStars(target='aperture_709_1066', comparisons='aperture_751_1066')
c.save()
from mosasaurus.WavelengthRecalibrator import WavelengthRecalibrator
wr = WavelengthRecalibrator(c)
<|fim▁hole|>#c.imageCube(keys=['raw_counts'], stars=[c.target])
#c.imageCube()
#c.populate(shift=True, max=None)
#c.imageCube(keys=['raw_counts'], stars=[c.target])
#c.imageCube()
#c.exportShiftStretch()
#c.shiftCube()
#c.imageCube(keys=['raw_counts'], stars=[c.target], remake=True)
'''
c.movieCube(stride=1, remake=False)
c.imageCube(remake=True)
c.movieCube(stride=1, remake=False)
'''
#c.nudgeWavelengths()<|fim▁end|> | |
<|file_name|>is_api.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import time
from openerp import pooler
from openerp.osv import fields, osv
from openerp.tools.translate import _
class is_api(osv.osv):
_name = 'is_api'
_description = u'Fonctions générales'
def get_usagers_structure(self, cr, uid, structure_id, context=None):
""" Retourner la liste des usagers appartenants à la structure passée en paramètre
"""
usager_line_obj = self.pool.get('ove.usager.structure')
line_ids = usager_line_obj.search(cr, uid, [('structure_id','=',structure_id)], context=context)
print 'line_ids *****', line_ids
usagers = list(set([line['usager_id'][0] for line in usager_line_obj.read(cr, uid, line_ids, ['usager_id'], context=context)]))
return usagers
def get_usager_groups(self, cr, uid, usager_id, context=None):
""" Retourner les groupes associés à l'usager passé en paramètre
"""
group_obj = self.pool.get('ove.groupe')
group_ids = group_obj.search(cr, uid, [('usager_id','=', usager_id)], context=context)
groups = []
for group in group_obj.read(cr, uid, group_ids, ['id', 'code'], context=context):
groups.append({'id':group['id'], 'code':group['code']})
newlist = sorted(groups, key=lambda k: k['code'])
return newlist
def get_users_usager(self, cr, uid, structure_lines, context=None):
""" Retourner les utilisateurs liés aux groupes de l'usager à partir des structures qui leurs appartient
"""
users = {'group_1':[], 'group_2':[], 'group_3':[], 'group_4':[], 'group_5':[],
'group_6':[], 'group_7':[], 'group_8':[], 'group_9':[], 'group_10':[]
}
if not structure_lines:
return users
for line in structure_lines:
if line.structure_id.users_line:
for user_line in line.structure_id.users_line:
if user_line.group_1 and line.group_1:
users['group_1'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
if user_line.group_2 and line.group_2:
users['group_2'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
if user_line.group_3 and line.group_3:
users['group_3'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
if user_line.group_4 and line.group_4:
users['group_4'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
if user_line.group_5 and line.group_5:
users['group_5'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
if user_line.group_6 and line.group_6:
users['group_6'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
if user_line.group_7 and line.group_7:
users['group_7'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
if user_line.group_8 and line.group_8:
users['group_8'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
if user_line.group_9 and line.group_9:
users['group_9'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
""" Eliminer les doublons des listes """
users.update({'group_1': list(set(users['group_1']))})
users.update({'group_2': list(set(users['group_2']))})
users.update({'group_3': list(set(users['group_3']))})
users.update({'group_4': list(set(users['group_4']))})
users.update({'group_5': list(set(users['group_5']))})
users.update({'group_6': list(set(users['group_6']))})
users.update({'group_7': list(set(users['group_7']))})
users.update({'group_8': list(set(users['group_8']))})
users.update({'group_9': list(set(users['group_9']))})
users.update({'group_10': list(set(users['group_10']))})
return users
def create_group(self, cr, uid, code_groupe, prefix, name_group, users, usager_id, context=None):
""" Création d'un groupe OVE
"""
vals = {
'code': code_groupe,
'name': prefix + ' - ' + name_group,
'user_ids': [[6, 0, users]],
'usager_id': usager_id,
}
return self.pool.get('ove.groupe').create(cr, uid, vals, context=context)
def associate_groupe_usager(self, cr, uid, usager_id, group_id, group_usager, context=None):
""" Associer un groupe au groupe correspondant de l'usager
"""
usager_obj = self.pool.get('is.usager')
if group_usager == 'G1':
usager_obj.write(cr, uid, usager_id, {'group_1': group_id}, context=context)
if group_usager == 'G2':
usager_obj.write(cr, uid, usager_id, {'group_2': group_id}, context=context)
if group_usager == 'G3':
usager_obj.write(cr, uid, usager_id, {'group_3': group_id}, context=context)
if group_usager == 'G4':
usager_obj.write(cr, uid, usager_id, {'group_4': group_id}, context=context)
if group_usager == 'G5':
usager_obj.write(cr, uid, usager_id, {'group_5': group_id}, context=context)
if group_usager == 'G6':
usager_obj.write(cr, uid, usager_id, {'group_6': group_id}, context=context)
if group_usager == 'G7':
usager_obj.write(cr, uid, usager_id, {'group_7': group_id}, context=context)
if group_usager == 'G8':
usager_obj.write(cr, uid, usager_id, {'group_8': group_id}, context=context)
if group_usager == 'G9':
usager_obj.write(cr, uid, usager_id, {'group_9': group_id}, context=context)
if group_usager == 'G10':
usager_obj.write(cr, uid, usager_id, {'group_10': group_id}, context=context)
return True
def create_ove_groups(self, cr, uid, prefix, users, usager_id, context=None):
""" Création de l'ensemble des groupes pour chaque usager ou structure
"""
group_id = self.create_group(cr, uid, 'G1', prefix, 'Groupe Impression', users['group_1'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G1', context)
group_id = self.create_group(cr, uid, 'G2', prefix, 'Groupe Donnée Administrative', users['group_2'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G2', context)
group_id = self.create_group(cr, uid, 'G3', prefix, 'Groupe Donnée Administrative Modification', users['group_3'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G3', context)
group_id = self.create_group(cr, uid, 'G4', prefix, 'Groupe Donnée Institutionnelle', users['group_4'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G4', context)
group_id = self.create_group(cr, uid, 'G5', prefix, 'Groupe Donnée Institutionnelle Modification', users['group_5'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G5', context)
group_id = self.create_group(cr, uid, 'G6', prefix, 'Groupe Donnée Institutionnelle Validation', users['group_6'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G6', context)
group_id = self.create_group(cr, uid, 'G7', prefix, 'Groupe Donnée métier', users['group_7'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G7', context)
group_id = self.create_group(cr, uid, 'G8', prefix, 'Groupe Donnée métier Modification', users['group_8'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G8', context)
group_id = self.create_group(cr, uid, 'G9', prefix, 'Groupe Donnée métier Validation', users['group_9'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G9', context)
group_id = self.create_group(cr, uid, 'G10', prefix, 'Groupe Structure', users['group_10'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G10', context)
return True
def update_usager_groupes(self, cr, uid, usager_id, users, context=None):
""" Mettre à jour les groupes de l'usager courant
"""<|fim▁hole|> self.update_ove_group(cr, uid, group['id'], users['group_1'], context)
if group['code'] == 'G2':
self.update_ove_group(cr, uid, group['id'], users['group_2'], context)
if group['code'] == 'G3':
self.update_ove_group(cr, uid, group['id'], users['group_3'], context)
if group['code'] == 'G4':
self.update_ove_group(cr, uid, group['id'], users['group_4'], context)
if group['code'] == 'G5':
self.update_ove_group(cr, uid, group['id'], users['group_5'], context)
if group['code'] == 'G6':
self.update_ove_group(cr, uid, group['id'], users['group_6'], context)
if group['code'] == 'G7':
self.update_ove_group(cr, uid, group['id'], users['group_7'], context)
if group['code'] == 'G8':
self.update_ove_group(cr, uid, group['id'], users['group_8'], context)
if group['code'] == 'G9':
self.update_ove_group(cr, uid, group['id'], users['group_9'], context)
if group['code'] == 'G10':
self.update_ove_group(cr, uid, group['id'], users['group_10'], context)
return True
def update_ove_group(self, cr, uid, group_id, users, context=None):
""" Mettre à jour d'un groupe d'un usager
"""
vals = {
'user_ids': [[6, 0, users]],
}
return self.pool.get('ove.groupe').write(cr, uid, group_id, vals, context=context)
def get_missed_ove_group(self, cr, uid, usager_groups, context=None):
""" Chercher le groupe manquant dans la liste des groupes d'un usager
"""
groups = ['G1', 'G2', 'G3', 'G4', 'G5', 'G6', 'G7', 'G8', 'G9', 'G10']
exist_groups = []
missed_groups = []
for group in usager_groups:
exist_groups.append(group['code'])
for group in groups:
if group not in exist_groups:
missed_groups.append(group)
else:
continue
return missed_groups
def create_missed_ove_group(self, cr, uid, group, usager_id, prefix, context=None):
""" Créer les groupes manquant de l'usager passé en paramètre
"""
if group == 'G1':
self.create_group(cr, uid, 'G1', prefix, 'Groupe Impression', [], usager_id, context=context)
if group == 'G2':
self.create_group(cr, uid, 'G2', prefix, 'Groupe Donnée Administrative', [], usager_id, context=context)
if group == 'G3':
self.create_group(cr, uid, 'G3', prefix, 'Groupe Donnée Administrative Modification', [], usager_id, context=context)
if group == 'G4':
self.create_group(cr, uid, 'G4', prefix, 'Groupe Donnée Institutionnelle', [], usager_id, context=context)
if group == 'G5':
self.create_group(cr, uid, 'G5', prefix, 'Groupe Donnée Institutionnelle Modification', [], usager_id, context=context)
if group == 'G6':
self.create_group(cr, uid, 'G6', prefix, 'Groupe Donnée Institutionnelle Validation', [], usager_id, context=context)
if group == 'G7':
self.create_group(cr, uid, 'G7', prefix, 'Groupe Donnée métier', [], usager_id, context=context)
if group == 'G8':
self.create_group(cr, uid, 'G8', prefix, 'Groupe Donnée métier Modification', [], usager_id, context=context)
if group == 'G9':
self.create_group(cr, uid, 'G9', prefix, 'Groupe Donnée métier Validation', [], usager_id, context=context)
if group == 'G10':
self.create_group(cr, uid, 'G10', prefix, 'Groupe Structure', [], usager_id, context=context)
return True<|fim▁end|> | groups = self.get_usager_groups(cr, uid, usager_id, context=context)
for group in groups:
if group['code'] == 'G1': |
<|file_name|>request_pool.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kuryr_libnetwork.schemata import commons
REQUEST_POOL_SCHEMA = {
u'links': [{
u'method': u'POST',
u'href': u'/IpamDriver.RequestPool',
u'description': u'Allocate pool of ip addresses',
u'rel': u'self',
u'title': u'Create'
}],
u'title': u'Create pool',
u'required': [u'AddressSpace', u'Pool', u'SubPool', u'V6'],
u'definitions': {u'commons': {}},
u'$schema': u'http://json-schema.org/draft-04/hyper-schema',
u'type': u'object',
u'properties': {
u'AddressSpace': {
u'description': u'The name of the address space.',
u'type': u'string',
u'example': u'foo',
},
u'Pool': {
u'description': u'A range of IP Addresses represented in '
u'CIDR format address/mask.',
u'$ref': u'#/definitions/commons/definitions/cidr'
},
u'SubPool': {
u'description': u'A subset of IP range from Pool in'
u'CIDR format address/mask.',
u'$ref': u'#/definitions/commons/definitions/cidr'
},
u'Options': {
u'type': [u'object', u'null'],
u'description': u'Options',
u'example': {},
},<|fim▁hole|> u'example': False
}
}
}
REQUEST_POOL_SCHEMA[u'definitions'][u'commons'] = commons.COMMONS<|fim▁end|> | u'V6': {
u'description': u'If set to "True", requesting IPv6 pool and '
u'vice-versa.',
u'type': u'boolean', |
<|file_name|>base.py<|end_file_name|><|fim▁begin|># TmLibrary - TissueMAPS library for distibuted image analysis routines.
# Copyright (C) 2016 Markus D. Herrmann, University of Zurich and Robin Hafen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''Base classes for data analysis tools.'''
import re
import logging
import inspect
import importlib
import simplejson
import numpy as np
import pandas as pd
import collections
from abc import ABCMeta
from abc import abstractmethod
from abc import abstractproperty
from sqlalchemy import func
from sqlalchemy.dialects.postgresql import FLOAT
from psycopg2.extras import execute_values
from psycopg2.sql import SQL, Identifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import RobustScaler
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.cluster import KMeans
from tmlib import cfg
import tmlib.models as tm
from tmlib.config import DEFAULT_LIB, IMPLEMENTED_LIBS
from tmlib.utils import (
same_docstring_as, autocreate_directory_property, assert_type,
create_partitions
)
logger = logging.getLogger(__name__)
_register = {}
class _ToolMeta(ABCMeta):
'''Meta class for :class:`Tool <tmlib.tools.base.Tool>`.'''
def __init__(cls, cls_name, cls_bases, cls_args):
def is_abstract(cls):
is_abstract = False
if '__abstract__' in vars(cls):
if getattr(cls, '__abstract__'):
is_abstract = True
return is_abstract
if not is_abstract(cls):
required_attrs = {'__icon__', '__description__'}
for attr in required_attrs:
if not hasattr(cls, attr):
raise AttributeError(
'Tool class "%s" must implement attribute "%s".' % (
cls_name, attr
)
)
logger.debug('registering tool %s', cls.__name__)
_register[cls_name] = cls
return super(_ToolMeta, cls).__init__(cls_name, cls_bases, cls_args)
def __call__(cls, *args, **kwargs):
return super(_ToolMeta, cls).__call__(*args, **kwargs)
class Tool(object):
'''Abstract base class for data analysis tools.
Tools use the
`Pandas DataFrame <http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html>`_ data container.
This is compatible with standard machine learning libries,
such as `Scikit-Learn <http://scikit-learn.org/stable/>`_
`Caffe <http://caffe.berkeleyvision.org/>`_ or `Keras <https://keras.io/>`_.<|fim▁hole|> '''
__metaclass__ = _ToolMeta
__abstract__ = True
def __init__(self, experiment_id):
'''
Parameters
----------
experiment_id: int
ID of the experiment for which the tool request is made
'''
self.experiment_id = experiment_id
def load_feature_values(self, mapobject_type_name, feature_names,
mapobject_ids=None):
'''Loads values for each given feature of the given mapobject type.
Parameters
----------
mapobject_type_name: str
name of the selected
:class:`MapobjectType <tmlib.models.mapobject.MapobjectType>`
feature_names: List[str]
name of each selected
:class:`Feature <tmlib.models.feature.Feature>`
mapobject_ids: List[int], optional
ID of each :class:`Mapobject <tmlib.models.mapobject.Mapobject>`
for which values should be selected; if ``None`` values for
all objects will be loaded (default: ``None``)
Returns
-------
pandas.DataFrame
dataframe where columns are features and rows are mapobjects
indexable by their ID
'''
logger.info(
'load feature values for objects of type "%s"', mapobject_type_name
)
logger.debug(
'load values for features: "%s"', '", "'.join(feature_names)
)
if mapobject_ids is not None:
logger.debug('load values for %d objects', len(mapobject_ids))
else:
logger.debug('load values for all objects')
# FIXME: Use ExperimentSession
with tm.utils.ExperimentConnection(self.experiment_id) as conn:
conn.execute('''
SELECT t.id AS mapobject_type_id, f.id AS feature_id, f.name
FROM features AS f
JOIN mapobject_types AS t ON t.id = f.mapobject_type_id
WHERE f.name = ANY(%(feature_names)s)
AND t.name = %(mapobject_type_name)s;
''', {
'feature_names': feature_names,
'mapobject_type_name': mapobject_type_name
})
records = conn.fetchall()
mapobject_type_id = records[0].mapobject_type_id
feature_map = {str(r.feature_id): r.name for r in records}
sql = '''
SELECT
v.mapobject_id, v.tpoint,
slice(v.values, %(feature_ids)s) AS values
FROM feature_values AS v
JOIN mapobjects AS m
ON m.id = v.mapobject_id AND m.partition_key = v.partition_key
WHERE m.mapobject_type_id = %(mapobject_type_id)s
'''
if mapobject_ids is not None:
sql += '''
AND m.id = ANY(%(mapobject_ids)s)
'''
conn.execute(sql, {
'feature_ids': feature_map.keys(),
'mapobject_type_id': mapobject_type_id,
'mapobject_ids': mapobject_ids
})
records = conn.fetchall()
values = list()
index = list()
for r in records:
values.append(r.values)
index.append((r.mapobject_id, r.tpoint))
index = pd.MultiIndex.from_tuples(
index, names=['mapobject_id', 'tpoint']
)
# TODO: This probably creates a copy in memory. Can we avoid this?
df = pd.DataFrame(values, index=index).astype(float)
column_map = {i: name for i, name in feature_map.iteritems()}
df.rename(columns=column_map, inplace=True)
# TODO: How shall we deal with NaN values? Ideally we would expose
# the option to users to either filter rows (mapobjects) or columns
# (columns).
null_indices = self.identify_features_with_null_values(df)
for name, count in null_indices:
if count > 0:
logger.warn('feature "%s" contains %d null values', name, count)
return df
def calculate_extrema(self, mapobject_type_name, feature_name):
'''Calculates minimum and maximum values of a given feature and
mapobject type.
Parameters
----------
mapobject_type_name: str
name of the selected
:class:`MapobjectType <tmlib.models.mapobject.MapobjectType>`
feature_names: List[str]
name of each selected
:class:`Feature <tmlib.models.feature.Feature>`
Returns
-------
Tuple[float]
min and max
'''
logger.info(
'calculate min/max for objects of type "%s" and feature "%s"',
mapobject_type_name, feature_name
)
with tm.utils.ExperimentSession(self.experiment_id) as session:
mapobject_type = session.query(tm.MapobjectType.id).\
filter_by(name=mapobject_type_name).\
one()
feature = session.query(tm.Feature.id).\
filter_by(
name=feature_name, mapobject_type_id=mapobject_type.id
).\
one()
lower, upper = session.query(
func.min(
tm.FeatureValues.values[str(feature.id)].cast(FLOAT)
),
func.max(
tm.FeatureValues.values[str(feature.id)].cast(FLOAT)
)
).\
join(tm.Mapobject).\
filter(
tm.Mapobject.mapobject_type_id == mapobject_type.id,
tm.FeatureValues.values[str(feature.id)] != 'nan'
).\
one()
return (lower, upper)
def get_random_mapobject_subset(self, mapobject_type_name, n):
'''Selects a random subset of mapobjects.
Parameters
----------
mapobject_type_name: str
name of the selected
:class:`MapobjectType <tmlib.models.mapobject.MapobjectType>`
n: int
number of mapobjects that should be selected at random
Returns
-------
Tuple[int]
IDs of selected mapobject
'''
with tm.utils.ExperimentSession(self.experiment_id) as session:
mapobject_type = session.query(tm.MapobjectType.id).\
filter_by(name=mapobject_type_name).\
one()
mapobjects = session.query(tm.Mapobject.id).\
filter_by(mapobject_type_id=mapobject_type.id).\
order_by(func.random()).\
limit(n).\
all()
return [m.id for m in mapobjects]
def partition_mapobjects(self, mapobject_type_name, n):
'''Splits mapobjects into partitions of size `n`.
Parameters
----------
mapobject_type_name: str
name of the selected
:class:`MapobjectType <tmlib.models.mapobject.MapobjectType>`
n: int
number of mapobjects per partition
Returns
-------
List[List[int]]
mapobject IDs
Note
----
Mapobjects are ordered by ID.
'''
with tm.utils.ExperimentSession(self.experiment_id) as session:
mapobject_type = session.query(tm.MapobjectType.id).\
filter_by(name=mapobject_type_name).\
one()
mapobjects = session.query(tm.Mapobject.id).\
filter_by(mapobject_type_id=mapobject_type.id).\
order_by(tm.Mapobject.id).\
all()
return create_partitions([m.id for m in mapobjects], n)
def identify_features_with_null_values(self, feature_data):
'''Identifies features with NULL values (including NaNs).
Parameters
----------
feature_data: pandas.DataFrame
data frame where columns are feature names and rows and objects
Returns
-------
Tuple[Union[str, int]]
name of the feature and the number of objects with NULL values
'''
null_indices = list()
for name, values in feature_data.isnull().iteritems():
null_indices.append((name, np.sum(values)))
return null_indices
def save_result_values(self, mapobject_type_name, result_id, data):
'''Saves generated label values.
Parameters
----------
mapobject_type_name: str
name of the selected
:class:`MapobjectType <tmlib.models.mapobject.MapobjectType>`
result_id: int
ID of a registerd
:class:`ToolResult <tmlib.models.result.ToolResult>`
data: pandas.Series
series with multi-level index for "mapobject_id" and "tpoint"
See also
--------
:class:`tmlib.models.result.LabelValues`
'''
logger.info('save label values for result %d', result_id)
mapobject_ids = data.index.levels[0].tolist()
tpoints = data.index.levels[1]
with tm.utils.ExperimentConnection(self.experiment_id) as connection:
connection.execute('''
SELECT id FROM mapobject_types
WHERE name = %(mapobject_type_name)s
''', {
'mapobject_type_name': mapobject_type_name
})
results = connection.fetchall()
mapobject_type_id = results[0][0]
connection.execute('''
SELECT partition_key, array_agg(id) AS mapobject_ids
FROM mapobjects AS m
WHERE m.mapobject_type_id = %(mapobject_type_id)s
AND m.id = ANY(%(mapobject_ids)s)
GROUP BY partition_key
''', {
'mapobject_type_id': mapobject_type_id,
'mapobject_ids': mapobject_ids
})
records = connection.fetchall()
# Grouping mapobject IDs per partition_key allows us
# to target individual shards of the label_values table directly
# on the worker nodes with full SQL support, including multi-row
# insert/update statements.
for tpoint in tpoints:
for partition_key, mapobject_ids in records:
with tm.utils.ExperimentConnection(self.experiment_id) as conn:
host, port, shard_id = conn.locate_partition(
tm.LabelValues, partition_key
)
worker_connection = tm.utils.ExperimentWorkerConnection(
self.experiment_id, host, port
)
with worker_connection as connection:
logger.debug(
'upsert label values for partition %d', partition_key
)
sql = '''
INSERT INTO label_values_{shard} AS v (
partition_key, mapobject_id, values, tpoint
)
VALUES %s
ON CONFLICT ON CONSTRAINT label_values_pkey_{shard}
DO UPDATE
SET values = v.values || EXCLUDED.values
'''.format(shard=shard_id)
template = '''
(
%(partition_key)s, %(mapobject_id)s,
%(values)s, %(tpoint)s
)
'''
args = [
{
'values': {
str(result_id):
str(np.round(data.ix[(mid, tpoint)], 6))
},
'mapobject_id': mid,
'partition_key': partition_key,
'tpoint': tpoint
}
for mid in mapobject_ids
]
execute_values(
connection, sql, args, template=template, page_size=500
)
def register_result(self, submission_id, mapobject_type_name,
result_type, **result_attributes):
'''Registers a result for the given tool request.
Parameters
----------
submission_id: int
ID of the corresponding job submission
mapobject_type_name: str
name of the selected
:class:`MapobjectType <tmlib.models.mapobject.MapobjectType>`
result_type: str
name of a class derived from
:class:`ToolResult <tmlib.models.result.ToolResult>`
**result_attributes: dict, optional
result-specific attributes as key-value value pairs
that get parsed to the constructor of the implemented `result_type`
Returns
-------
int
ID of the tool result
'''
logger.info('register result')
with tm.utils.ExperimentSession(self.experiment_id) as session:
mapobject_type = session.query(tm.MapobjectType.id).\
filter_by(name=mapobject_type_name).\
one()
try:
module_name = 'tmlib.models.result'
module = importlib.import_module(module_name)
cls = getattr(module, result_type)
except ImportError:
raise ImportError(
'Ups this module should exist: %s' % module_name
)
except AttributeError:
raise ValueError(
'"%s" is not a valid result type.' % result_type
)
required_args = inspect.getargspec(cls.__init__).args[1:]
provided_args = {
'submission_id', 'tool_name', 'mapobject_type_id', 'type'
}
for arg in required_args:
if arg not in result_attributes and arg not in provided_args:
raise ValueError(
'Argument "%s" is required for result of type "%s".'
% (arg, result_type)
)
# A result might already exist, for example when debugging
# or when the job got canceled.
result = session.query(tm.ToolResult).\
filter_by(submission_id=submission_id).\
one_or_none()
if result is None:
result = tm.ToolResult(
submission_id, self.__class__.__name__, mapobject_type.id,
type=result_type, **result_attributes
)
session.add(result)
session.flush()
return result.id
@abstractmethod
def process_request(self, submission_id, payload):
'''Processes a tool request sent by the client.
Parameters
----------
submission_id: int
ID of the corresponding job submission
payload: dict
an arbitrary mapping provided by the client that describes the job
'''
pass
class Classifier(Tool):
'''Abstract base class for classification tools.'''
__abstract__ = True
@same_docstring_as(Tool.__init__)
def __init__(self, experiment_id):
super(Classifier, self).__init__(experiment_id)
def train_supervised(self, feature_data, labels, method, n_fold_cv):
'''Trains a classifier for mapobjects based on `feature_data` and
known labels.
Parameters
----------
feature_data: pandas.DataFrame
feature values that should be used to train the classifier
labels: Dict[int, int]
mapping of :class:`Mapobject <tmlib.models.mapobject.Mapobject>`
ID to assigned label
method: str
method to use for classification
n_fold_cv: int
number of crossvalidation iterations (*n*-fold)
Returns
-------
Tuple[sklearn.base.BaseEstimator]
trained supervised classifier and scaler
'''
classifiers = {
'randomforest': {
# NOTE: RF could be parallelized.
'cls': RandomForestClassifier(n_jobs=1),
# No scaling required for decision trees.
'scaler': None,
'search_space': {
# Number of trees.
'n_estimators': [3, 6, 12, 24],
# Number of leafs in the tree.
'max_depth': [3, 6, 12, None],
'min_samples_split': [2, 4, 8],
# TODO: this should rather be a user defined parameter
'class_weight': ['balanced', None]
},
},
'svm': {
'cls': SVC(cache_size=500, decision_function_shape='ovr'),
# Scale to zero mean and unit variance
'scaler': RobustScaler(quantile_range=(1.0, 99.0), copy=False),
# Search optimal regularization parameters to control
# model complexity.
'search_space': {
'kernel': ['linear', 'rbf'],
'C': np.logspace(-5, 15, 10, base=2),
'gamma': np.logspace(-15, -3, 10, base=2)
}
},
'logisticregression': {
'cls': SGDClassifier(
loss='log', fit_intercept=False,
n_jobs=1, penalty='elasticnet'
),
# Scale to zero mean and unit variance
'scaler': RobustScaler(quantile_range=(1.0, 99.0), copy=False),
# Search optimal regularization parameters to control
# model complexity.
'search_space': {
'alpha': np.logspace(-6, -1, 10),
'l1_ratio': np.linspace(0, 1, 10)
}
}
}
logger.info('train "%s" classifier', method)
# TODO: We may want to include tpoint into labels mapping.
y = list()
for i in feature_data.index.get_level_values('mapobject_id'):
y.append(labels[i])
scaler = classifiers[method]['scaler']
# TODO: identify NaN and infinite values
X = feature_data
if scaler:
scaler.fit(X)
X = scaler.transform(X)
clf = classifiers[method]['cls']
folds = KFold(n_splits=n_fold_cv)
# TODO: Second, finer grid search
model = GridSearchCV(clf, classifiers[method]['search_space'], cv=folds)
model.fit(X, y)
return (model, scaler)
def train_unsupervised(self, feature_data, k, method):
'''Trains a classifier that groups mapobjects into `k` classes based
on `feature_data`.
Parameters
----------
feature_data: pandas.DataFrame
feature values that should be used to train the classifier
k: int
number of classes
method: str
model to use for clustering
Returns
-------
Tuple[sklearn.base.BaseEstimator]
trained unsupervised classifier and scaler
'''
classifiers = {
'kmeans': {
'cls': KMeans,
'scaler': RobustScaler(quantile_range=(1.0, 99.0), copy=False)
}
}
logger.info('train "%s" classifier for %d classes', method)
scaler = classifiers[method]['scaler']
X = feature_data
if scaler:
scaler.fit(X)
X = scaler.transform(X)
clf = classifiers[method]['cls']
model = clf(n_clusters=k)
model.fit(X)
return (model, scaler)
def predict(self, feature_data, model, scaler=None):
'''Predicts class labels for mapobjects based on `feature_values` using
pre-trained `model`.
Parameters
----------
feature_data: pandas.DataFrame
feature values based on which labels should be predicted
model: sklearn.base.BaseEstimator
model fitted on training data
scaler: sklearn.preprocessing.data.RobustScaler, optional
scaler fitted on training data to rescale `feature_data` the same
way
Returns
-------
pandas.Series
predicted labels for each mapobject
'''
logger.info('predict labels')
X = feature_data
if scaler is not None:
X = scaler.transform(X)
predictions = model.predict(X)
return pd.Series(predictions, index=feature_data.index)<|fim▁end|> | |
<|file_name|>pstmt.go<|end_file_name|><|fim▁begin|>// Copyright 2015 Sergii Bogomolov. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
// Package dbhelper helps to interact with sql.DB by generating, preparing and
// executing queries. It marshals Go structs to and from databases and uses
// database/sql.
//
// Source code and project home:
// https://github.com/bogomolovs/dbhelper
//
package dbhelper
import (
"database/sql"
"errors"
"fmt"
"reflect"
)
// Contains prepared statement ready for execution.
type Pstmt struct {
dbHelper *DbHelper
params []string
stmt *sql.Stmt
}
// Returns a list of values for query parameters
func (pstmt *Pstmt) getValues(params interface{}) ([]interface{}, error) {
// number of parameters
num := len(pstmt.params)
// there are no parameters
if params == nil {
// if params = nil
if num == 0 {
// OK if query has no parameters
return nil, nil
} else {
// error if query has parameters
return nil, errors.New("dbhelper: values for all parameters are missing")
}
}
// slice containing values
values := make([]interface{}, num, num)
// get value of params
paramsValue := reflect.ValueOf(params)
// get type of params
paramsType := paramsValue.Type()
if paramsType.Kind() == reflect.Map {
// fill values in correct order
for i, p := range pstmt.params {
// value
v := paramsValue.MapIndex(reflect.ValueOf(p))
if !v.IsValid() {
return nil, errors.New(fmt.Sprintf("dbhelper: value for parameter '%s' is missing", p))
}
values[i] = v.Interface()
}
} else {
if num > 1 {
return nil, errors.New("dbhelper: query has more than one parameter, params must be a map[string]interface{}")
}
if !checkFieldType(paramsType) {
return nil, errors.New(fmt.Sprintf("dbhelper: wrong parameter type '%v'", paramsType))
}
values[0] = paramsValue.Interface()
}
return values, nil
}
func (pstmt *Pstmt) exec(params interface{}) (sql.Result, error) {
// get parameter values for query
values, err := pstmt.getValues(params)
if err != nil {
return nil, err
}
// execute query
var res sql.Result
if values != nil {
res, err = pstmt.stmt.Exec(values...)
} else {
res, err = pstmt.stmt.Exec()
}
if err != nil {
return nil, wrapError(err)
}
return res, nil
}
// Executes prepared statement with provided parameter values.
// If query has only one parameter, params can be the value of that parameter.
// If query has more than one parameter, params must be a map[string]interface{}.
// Returns number of affected rows or -1 if this number cannot be obtained.
func (pstmt *Pstmt) Exec(params interface{}) (int64, error) {
// execute query
res, err := pstmt.exec(params)
if err != nil {
return 0, err
}
// get number of affected rows
num, err := res.RowsAffected()
if err != nil {
return -1, nil
}
return num, nil
}
// Executes prepared query with provided parameter values. Returns number of processed rows.
// If i is a pointer to slice of pointers - all rows are mapped.
// If i is a pointer to structure - only the first matched row is mapped.
// If i is a pointer to another supported data type - corresponding column value
// of the first matched row is mapped.
// If query has only one parameter, params can be the value of that parameter.
// If query has more than one parameter, params must be a map[string]interface{}.
func (pstmt *Pstmt) Query(i interface{}, params interface{}) (int64, error) {
if i == nil {
return 0, errorNil
}
var err error
returnSlice := false
returnStruct := false
// get pointer to slice value
slicePtrValue := reflect.ValueOf(i)
slicePtrType := slicePtrValue.Type()
if slicePtrType.Kind() != reflect.Ptr {
return 0, errors.New("dbhelper: pointer expected")
}
// get slice value
sliceValue := slicePtrValue.Elem()<|fim▁hole|> return 0, errors.New("dbhelper: cannot use pointer to nil")
}
// get slice type
sliceType := sliceValue.Type()
if sliceType.Kind() == reflect.Ptr {
return 0, errors.New("dbhelper: cannot use pointer to pointer")
}
if sliceType.Kind() == reflect.Interface {
return 0, errors.New("dbhelper: wrong type of i")
}
// get return pointer type
var returnPtrType reflect.Type
if sliceType.Kind() == reflect.Slice {
// return slice of pointers to structs
returnSlice = true
returnPtrType = sliceType.Elem()
if returnPtrType.Kind() != reflect.Ptr {
return 0, errors.New("dbhelper: pointer to a slice of pointers expected")
}
} else {
// return pointer
returnPtrType = slicePtrType
}
// get return type
returnType := returnPtrType.Elem()
if returnType.Kind() == reflect.Struct {
returnStruct = true
}
// get table
var tbl *dbTable
if returnStruct {
tbl, err = pstmt.dbHelper.getTable(returnType)
if err != nil {
return 0, err
}
}
// get parameter values for query
values, err := pstmt.getValues(params)
if err != nil {
return 0, err
}
// perform query
var rows *sql.Rows
if values != nil {
rows, err = pstmt.stmt.Query(values...)
} else {
rows, err = pstmt.stmt.Query()
}
if err != nil {
return 0, wrapError(err)
}
// close rows on exit
defer rows.Close()
// create slice
if returnSlice {
sliceValue.Set(reflect.MakeSlice(sliceType, 0, 10))
}
// get column names
columns, err := rows.Columns()
if err != nil {
return 0, wrapError(err)
}
// read rows data to structures
num := int64(0)
for rows.Next() {
// create new structure and get a pointer to it
var returnPtrValue reflect.Value
if returnSlice {
returnPtrValue = reflect.New(returnType)
} else {
returnPtrValue = slicePtrValue
}
// get new structure value
returnValue := returnPtrValue.Elem()
if returnStruct {
// slice containing pointers to corresponding fields of the structure
fields := make([]interface{}, tbl.numField, tbl.numField)
// fill slice with pointers
for i, col := range columns {
// get field in new structure
v := returnValue.FieldByIndex(tbl.fields[col].index)
// append pointer to field to slice
fields[i] = v.Addr().Interface()
}
// scan row and assign values to struct fields
err = rows.Scan(fields...)
} else {
// scan row and assign return value
err = rows.Scan(returnValue.Addr().Interface())
}
// check scan error
if err != nil {
return 0, wrapError(err)
}
num++
if returnSlice {
// append pointer to slice
sliceValue.Set(reflect.Append(sliceValue, returnPtrValue))
} else {
break
}
}
return num, nil
}<|fim▁end|> | if !sliceValue.IsValid() { |
<|file_name|>chat_markers.py<|end_file_name|><|fim▁begin|># Copyright (C) 2018 Philipp Hörist <philipp AT hoerist.com>
#
# This file is part of nbxmpp.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; If not, see <http://www.gnu.org/licenses/>.
from nbxmpp.namespaces import Namespace
from nbxmpp.structs import StanzaHandler
from nbxmpp.structs import ChatMarker
from nbxmpp.modules.base import BaseModule
class ChatMarkers(BaseModule):<|fim▁hole|> self._client = client
self.handlers = [
StanzaHandler(name='message',
callback=self._process_message_marker,
ns=Namespace.CHATMARKERS,
priority=15),
]
def _process_message_marker(self, _client, stanza, properties):
type_ = stanza.getTag('received', namespace=Namespace.CHATMARKERS)
if type_ is None:
type_ = stanza.getTag('displayed', namespace=Namespace.CHATMARKERS)
if type_ is None:
type_ = stanza.getTag('acknowledged',
namespace=Namespace.CHATMARKERS)
if type_ is None:
return
name = type_.getName()
id_ = type_.getAttr('id')
if id_ is None:
self._log.warning('Chatmarker without id')
self._log.warning(stanza)
return
properties.marker = ChatMarker(name, id_)<|fim▁end|> | def __init__(self, client):
BaseModule.__init__(self, client)
|
<|file_name|>Forbidden.ts<|end_file_name|><|fim▁begin|>import {Exception} from "../core/Exception";<|fim▁hole|> static readonly STATUS = 403;
constructor(message: string, origin?: Error | string | any) {
super(Forbidden.STATUS, message, origin);
}
}<|fim▁end|> |
export class Forbidden extends Exception { |
<|file_name|>quandl_data.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import scrapy
import numpy
import quandl
from mykgb import indicator
from myapp.models import Quandlset
from mykgb.items import MykgbItem
quandl.ApiConfig.api_key = "taJyZN8QXqj2Dj8SNr6Z"
quandl.ApiConfig.api_version = '2015-04-09'
class QuandlDataSpider(scrapy.Spider):
name = "quandl_data"
allowed_domains = ["www.baidu.com"]
start_urls = ['http://www.baidu.com/']
custom_settings = {
'ITEM_PIPELINES': {
# 'mykgb.pipelines.DestinyPipeline': 100
'mykgb.pipelines.MykgbPipeline': 100
},
'DEFAULT_REQUEST_HEADERS': {
'Referer': 'http://www.baidu.com'
}
}
def parse(self, response):
Quandlset.objects.update(actived=True)
qs = Quandlset.objects.filter(actived=True)
for p in qs:
symbol = p.quandlcode + "1"
if p and p.namezh:<|fim▁hole|>
try:
df = quandl.get(symbol)[-100:]
except:
print("error", symbol)
p.actived = False
p.save()
continue
if 'Last' in df.columns:
df = df.rename(
# columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Volume': 'volume', 'Last': 'close'})
columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Last': 'close'})
elif 'Close' in df.columns:
df = df.rename(
# columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Volume': 'volume', 'Close': 'close'})
columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Close': 'close'})
elif 'Settle' in df.columns:
df = df.rename(
# columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Volume': 'volume', 'Settle': 'close'})
columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Settle': 'close'})
else:
p.actived = False
p.save()
continue
# df[df['volume'] == 0] = numpy.nan
df = df.dropna()
if not df.empty and df.shape[0] > 50:
item = MykgbItem()
item['title'] = 'sleepless money'
item['code'] = code_str
macd = indicator.get_macd(df)
kdj = indicator.get_kdj(df)
rsi = indicator.get_rsi(df)
cci = indicator.get_cci(df)
item['macd'] = sum(macd.values())
item['kdj'] = sum(kdj.values())
item['rsi'] = sum(rsi.values())
item['cci'] = sum(cci.values())
yield item<|fim▁end|> | code_str = p.namezh + ' ' + p.exchange + ' ' + p.name
else:
code_str = p.exchange + ' ' + p.name |
<|file_name|>Error.py<|end_file_name|><|fim▁begin|>class Error ( Exception ):
"""Exception class for Address exceptions"""
<|fim▁hole|><|fim▁end|> | def __init__( self, message ) :
Exception.__init__(self,message) |
<|file_name|>Confirmation.js<|end_file_name|><|fim▁begin|>import Timeouts from 'platform/testing/e2e/timeouts';
class Confirmation {
validatePageLoaded = () => {
cy.get('h1', { timeout: Timeouts.slow })
.should('be.visible')
.and('have.text', 'You’ve completed pre-check-in');
};
validatePageContent = () => {
cy.get('h1', { timeout: Timeouts.slow })
.should('be.visible')
.and('have.text', 'You’ve completed pre-check-in');
cy.get("[data-testid='confirmation-wrapper']");<|fim▁hole|> .and('have.text', 'What if I have questions about my appointment?');
};
validateConfirmWithUpdates = () => {
cy.get("[data-testid='confirmation-update-alert']")
.should('be.visible')
.and(
'have.text',
'A staff member will help you on the day of your appointment to update your information.',
);
};
validateConfirmNoUpdates = () => {
cy.get("[data-testid='confirmation-update-alert']").should('not.exist');
};
}
export default new Confirmation();<|fim▁end|> | cy.get("p[data-testid='appointment-day-location']");
cy.get("[data-testid='appointment-list']");
cy.get("h3[data-testid='appointment-questions']")
.should('be.visible') |
<|file_name|>init.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::RegisterBindings;
use crate::dom::bindings::proxyhandler;
use crate::script_runtime::JSEngineSetup;
#[cfg(target_os = "linux")]
#[allow(unsafe_code)]
fn perform_platform_specific_initialization() {
// 4096 is default max on many linux systems<|fim▁hole|>
// Bump up our number of file descriptors to save us from impending doom caused by an onslaught
// of iframes.
unsafe {
let mut rlim = libc::rlimit {
rlim_cur: 0,
rlim_max: 0,
};
match libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlim) {
0 => {
if rlim.rlim_cur >= MAX_FILE_LIMIT {
// we have more than enough
return;
}
rlim.rlim_cur = match rlim.rlim_max {
libc::RLIM_INFINITY => MAX_FILE_LIMIT,
_ => {
if rlim.rlim_max < MAX_FILE_LIMIT {
rlim.rlim_max
} else {
MAX_FILE_LIMIT
}
},
};
match libc::setrlimit(libc::RLIMIT_NOFILE, &rlim) {
0 => (),
_ => warn!("Failed to set file count limit"),
};
},
_ => warn!("Failed to get file count limit"),
};
}
}
#[cfg(not(target_os = "linux"))]
fn perform_platform_specific_initialization() {}
#[allow(unsafe_code)]
pub fn init() -> JSEngineSetup {
unsafe {
proxyhandler::init();
// Create the global vtables used by the (generated) DOM
// bindings to implement JS proxies.
RegisterBindings::RegisterProxyHandlers();
}
perform_platform_specific_initialization();
JSEngineSetup::new()
}<|fim▁end|> | const MAX_FILE_LIMIT: libc::rlim_t = 4096; |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.