repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
jaruba/chromium.src | tools/telemetry/telemetry/core/backends/chrome_inspector/inspector_websocket.py | 11 | 6437 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import json
import logging
import socket
import time
from telemetry.core.backends.chrome_inspector import websocket
_DomainHandler = collections.namedtuple(
'DomainHandler', ['notification_handler', 'will_close_handler'])
class DispatchNotificationsUntilDoneTimeoutException(Exception):
"""Exception that can be thrown from DispatchNotificationsUntilDone to
indicate timeout exception of the function.
"""
def __init__(self, elapsed_time):
super(DispatchNotificationsUntilDoneTimeoutException, self).__init__()
self.elapsed_time = elapsed_time
class InspectorWebsocket(object):
def __init__(self, error_handler=None):
"""Create a websocket handler for communicating with Inspectors.
Args:
error_handler: A callback for errors in communicating with the Inspector.
Must accept a single numeric parameter indicated the time elapsed before
the error.
"""
self._socket = None
self._cur_socket_timeout = 0
self._next_request_id = 0
self._error_handler = error_handler
self._all_data_received = False
self._domain_handlers = {}
def RegisterDomain(
self, domain_name, notification_handler, will_close_handler=None):
"""Registers a given domain for handling notification methods.
When used as handler for DispatchNotificationsUntilDone,
notification handler should return a boolean, where True indicates
that we should stop listening for more notifications.
For example, given inspector_backend:
def OnConsoleNotification(msg):
if msg['method'] == 'Console.messageAdded':
print msg['params']['message']
return True
def OnConsoleClose(self):
pass
inspector_backend.RegisterDomain(
'Console', OnConsoleNotification, OnConsoleClose)
Args:
domain_name: The devtools domain name. E.g., 'Tracing', 'Memory', 'Page'.
notification_handler: Handler for devtools notification. Will be
called if a devtools notification with matching domain is received
(via DispatchNotifications and DispatchNotificationsUntilDone).
The handler accepts a single paramater: the JSON object representing
the notification.
will_close_handler: Handler to be called from Disconnect().
"""
assert domain_name not in self._domain_handlers
self._domain_handlers[domain_name] = _DomainHandler(
notification_handler, will_close_handler)
def UnregisterDomain(self, domain_name):
"""Unregisters a previously registered domain."""
assert domain_name in self._domain_handlers
self._domain_handlers.pop(domain_name)
def Connect(self, url, timeout=10):
assert not self._socket
self._socket = websocket.create_connection(url, timeout=timeout)
self._cur_socket_timeout = 0
self._next_request_id = 0
def Disconnect(self):
"""Disconnects the inspector websocket.
All existing domain handlers will also be unregistered.
"""
for _, handler in self._domain_handlers.items():
if handler.will_close_handler:
handler.will_close_handler()
if self._socket:
self._socket.close()
self._socket = None
def SendAndIgnoreResponse(self, req):
req['id'] = self._next_request_id
self._next_request_id += 1
data = json.dumps(req)
self._socket.send(data)
if logging.getLogger().isEnabledFor(logging.DEBUG):
logging.debug('sent [%s]', json.dumps(req, indent=2, sort_keys=True))
def SyncRequest(self, req, timeout=10):
self.SendAndIgnoreResponse(req)
while self._socket:
res = self._Receive(timeout)
if 'id' in res and res['id'] == req['id']:
return res
def DispatchNotifications(self, timeout=10):
self._Receive(timeout)
def DispatchNotificationsUntilDone(self, timeout):
"""Dispatch notifications until notification_handler return True.
Args:
timeout: a number that respresents the timeout in seconds.
Raises:
DispatchNotificationsUntilDoneTimeoutException if more than |timeout| has
seconds has passed since the last time any data is received or since this
function is called, whichever happens later, to when the next attempt to
receive data fails due to some WebSocketException.
"""
self._all_data_received = False
if timeout < self._cur_socket_timeout:
self._SetTimeout(timeout)
timeout_start_time = time.time()
while self._socket:
try:
if self._Receive(timeout):
timeout_start_time = time.time()
if self._all_data_received:
break
except websocket.WebSocketTimeoutException:
# TODO(chrishenry): Since we always call settimeout in
# _Receive, we should be able to rip manual logic of tracking
# elapsed time and simply throw
# DispatchNotificationsUntilDoneTimeoutException from here.
pass
elapsed_time = time.time() - timeout_start_time
if elapsed_time > timeout:
raise DispatchNotificationsUntilDoneTimeoutException(elapsed_time)
def _SetTimeout(self, timeout):
if self._cur_socket_timeout != timeout:
self._socket.settimeout(timeout)
self._cur_socket_timeout = timeout
def _Receive(self, timeout=10):
self._SetTimeout(timeout)
start_time = time.time()
try:
if self._socket:
self._all_data_received = False
data = self._socket.recv()
result = json.loads(data)
if logging.getLogger().isEnabledFor(logging.DEBUG):
logging.debug(
'got [%s]', json.dumps(result, indent=2, sort_keys=True))
if 'method' in result and self._HandleNotification(result):
self._all_data_received = True
return None
return result
except (socket.error, websocket.WebSocketException):
elapsed_time = time.time() - start_time
self._error_handler(elapsed_time)
def _HandleNotification(self, result):
mname = result['method']
dot_pos = mname.find('.')
domain_name = mname[:dot_pos]
if domain_name in self._domain_handlers:
return self._domain_handlers[domain_name].notification_handler(result)
logging.warn('Unhandled inspector message: %s', result)
return False
| bsd-3-clause | 3,315,547,056,991,166,500 | 34.174863 | 80 | 0.687898 | false |
Livefyre/agency_tools | streamhub_app_generator.py | 2 | 9031 | import sys, itertools, os, argparse, urllib2, json, re
class Application_Type(object):
LIST = "list"
WALL = "wall"
class Application(object):
LIST_TYPE = "list"
WALL_TYPE = "wall"
def __init__(self, network, site_id, article_id, instance_type):
self.network = network
self.site_id = site_id
self.article_id = article_id
self.instance_type = instance_type
def build_view_fragment(self, var_id):
"""
Builds the JS for the view
"""
view_type_cc = ""
view_type = ""
el_prefix = ""
if self.instance_type == Application_Type.LIST:
view_type_cc = "listView"
view_type = "ListView"
el_prefix = "list"
else:
view_type_cc = "wallView"
view_type = "WallView"
el_prefix = "wall"
js_view_var = "{}{}".format(view_type_cc, var_id)
html_view_el = "{}-{}".format(el_prefix, var_id)
template = """
var {js_view_var} = new {view_type}({{
initial: 50,
showMore: 50,
el: document.getElementById("{html_view_el}")
}});
""".format(js_view_var=js_view_var, view_type=view_type, html_view_el=html_view_el)
return template, js_view_var, html_view_el
def build_opts_fragment(self, var_id):
"""
Build the opts for each application
"""
template = """
var opt{iter} = {{
network: '{network}',
siteId: '{site_id}',
articleId: '{article_id}'
}};
""".format(iter=var_id, network=self.network, site_id=self.site_id, article_id=self.article_id)
return template, "opt{}".format(var_id)
def build_collection_fragment(self, var_id, opt_var, view_var):
"""
Build the final js for connecting the collection with the view.
"""
template = """
var collection{iter} = new Collection({opt_var});
collection{iter}.on("error", function (err) {{
if (console) {{
console.log("Error just occurred: " + err);
}}
}});
collection{iter}.pipe({view_var});
""".format(iter=var_id, opt_var=opt_var, view_var=view_var)
return template, "collection{}".format(var_id)
class Generator(object):
def __init__(self, sdk_version, wall_version, filename="index.html", **kwargs):
self.filename = filename
self.sdk_version = sdk_version
self.wall_version = wall_version
self._apps = self._build_apps(**kwargs)
self._app_types = self._generate_app_type_list(**kwargs)
def _generate_app_type_list(self, **kwargs):
"""
Figures out what kind of apps we have so that we can later add in the
appropriate JS, CSS, and whatnot.
"""
app_types = []
if kwargs['list_article_ids']:
app_types.append(Application_Type.LIST)
if kwargs['wall_article_ids']:
app_types.append(Application_Type.WALL)
return app_types
def _build_apps(self, list_article_ids, wall_article_ids, **kwargs):
"""
Generates a list of apps so that we can use them later to build the right
html and js.
"""
apps = []
if list_article_ids:
for list_article_id in list_article_ids:
apps.append(Application(article_id=list_article_id,
instance_type=Application_Type.LIST,
**kwargs))
if wall_article_ids:
for wall_article_id in wall_article_ids:
apps.append(Application(article_id=wall_article_id,
instance_type=Application_Type.WALL,
**kwargs))
return apps
def _build_header(self):
"""
Builds the header section for the html file
"""
header = """
<script src="http://cdn.livefyre.com/libs/sdk/v{sdk_version}/streamhub-sdk.min.js"></script>
<link rel="stylesheet" href="http://cdn.livefyre.com/libs/sdk/v{sdk_version}/streamhub-sdk.min.css" />
""".format(sdk_version=self.sdk_version)
if Application_Type.WALL in self._app_types:
header += """
<script src="http://cdn.livefyre.com/libs/apps/Livefyre/streamhub-wall/v{wall_version}/streamhub-wall.min.js"></script>
""".format(wall_version=self.wall_version)
return header
def _build_body(self):
"""
Builds body of the html file
"""
include = ""
if Application_Type.LIST in self._app_types:
include += """var ListView = Livefyre.require("streamhub-sdk/content/views/content-list-view");\n\t\t\t"""
if Application_Type.WALL in self._app_types:
include += """var WallView = Livefyre.require("streamhub-wall");\n"""
script = ""
html_els = ""
for i, app in enumerate(self._apps):
opt_template, opt_var = app.build_opts_fragment(i)
view_template, view_var, view_el = app.build_view_fragment(i)
col_template, col_var = app.build_collection_fragment(i, opt_var, view_var)
html_els += "<div id='{}'></div>".format(view_el)
script += opt_template + view_template + col_template
body = """
{html_els}
<script>
(function () {{
var Collection = Livefyre.require("streamhub-sdk/collection");
{include}
{script}
}})();
</script>
""".format(html_els=html_els, include=include, script=script)
return body
def generate_html(self):
header = self._build_header()
body = self._build_body()
template = """
<!DOCTYPE html>
<html>
<head>
{header}
</head>
<body>
{body}
</body>
</html>
""".format(header=header, body=body)
f = open(self.filename, "w")
f.write(template)
f.close()
print "\nSuccess. File can be found here {}\n".format(self.filename)
def get_versions(args_dict):
"""
Modifies the args dict based upon the options it has to get the appropriate sdk and application
versions.
"""
if args_dict["wall_article_ids"] and ((args_dict["wall_version"] and not args_dict["sdk_version"]) or (not args_dict["wall_version"] and args_dict["sdk_version"])):
print "ERROR: Must specify both wall version and sdk version if specifying a wall version"
sys.exit(2)
if args_dict["wall_article_ids"] and not args_dict["wall_version"] and not args_dict["sdk_version"]:
url = "http://appgallery.herokuapp.com/api/v1/packages/json"
apps = json.loads(urllib2.urlopen(url).read())
for app in apps:
if app["id"] == "Livefyre/streamhub-wall":
args_dict["wall_version"] = app["latestBuild"].get("version", "")
args_dict["sdk_version"] = app["latestBuild"].get("sdkVersion", "")
break
if args_dict["list_article_ids"] and not args_dict["sdk_version"]:
reg = re.compile("^v\d+\.\d+\.\d+$")
url = "https://api.github.com/repos/Livefyre/streamhub-sdk/tags"
tags = json.loads(urllib2.urlopen(url).read())
for tag in tags:
if reg.match(tag["name"]):
args_dict["sdk_version"] = tag["name"]
break
def main():
"""
The main entry point to the application
"""
# Parse the command options
parser = argparse.ArgumentParser(description="A way to generate boilerplate code for media walls and list feeds")
parser.add_argument("-n", "--network", help="the network the app is for (e.g. example.fyre.co)", required=True)
parser.add_argument("-s", "--site-id", help="the site id the app is for (e.g. 123456)", required=True)
parser.add_argument("--wall-article-ids", nargs="+", help="article ids for media walls (e.g. article-id-1 article-id-2")
parser.add_argument("--list-article-ids", nargs="+", help="article ids for list views (e.g. article-id-1 article-id-2")
parser.add_argument("-f", "--filename", help="the output filename", default="./index.html")
parser.add_argument("-v", "--sdk-version", help="(optional - will attempt to use latest) which sdk version you'd like to use")
parser.add_argument("-w", "--wall-version", help="(optional - will attempt to use latest) which version of the media wall you want to use")
args = parser.parse_args()
args_dict = vars(args)
if not args_dict["list_article_ids"] and not args_dict["wall_article_ids"]:
print "ERROR: Must have at least 1 list_article_ids or wall_article_ids"
sys.exit(2)
get_versions(args_dict)
generator = Generator(**args_dict)
generator.generate_html()
if __name__ == "__main__":
main()
| mit | -3,347,605,190,159,661,000 | 34.555118 | 168 | 0.565497 | false |
gnuhub/intellij-community | python/lib/Lib/encodings/mac_croatian.py | 593 | 13889 | """ Python Character Mapping Codec mac_croatian generated from 'MAPPINGS/VENDORS/APPLE/CROATIAN.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-croatian',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON
u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\u2206' # 0xB4 -> INCREMENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON
u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\uf8ff' # 0xD8 -> Apple logo
u'\xa9' # 0xD9 -> COPYRIGHT SIGN
u'\u2044' # 0xDA -> FRACTION SLASH
u'\u20ac' # 0xDB -> EURO SIGN
u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\xc6' # 0xDE -> LATIN CAPITAL LETTER AE
u'\xbb' # 0xDF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2013' # 0xE0 -> EN DASH
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u03c0' # 0xF9 -> GREEK SMALL LETTER PI
u'\xcb' # 0xFA -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\xca' # 0xFD -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xe6' # 0xFE -> LATIN SMALL LETTER AE
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 | 1,460,277,965,143,584,000 | 44.241042 | 118 | 0.547628 | false |
cloudera/hue | apps/search/src/search/controller.py | 2 | 1827 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import object
import logging
from libsolr.api import SolrApi
from search.conf import SOLR_URL
LOG = logging.getLogger(__name__)
class SearchController(object):
def __init__(self, user):
self.user = user
def is_collection(self, collection_name):
return collection_name in self.get_solr_collections()
def is_core(self, core_name):
solr_cores = SolrApi(SOLR_URL.get(), self.user).cores()
return core_name in solr_cores
def get_solr_collections(self):
return SolrApi(SOLR_URL.get(), self.user).collections()
def get_all_indexes(self, show_all=False):
indexes = []
try:
indexes = list(self.get_solr_collections().keys())
except:
LOG.exception('failed to get indexes')
try:
indexes += list(SolrApi(SOLR_URL.get(), self.user).aliases().keys())
except:
LOG.exception('failed to get index aliases')
if show_all or not indexes:
return indexes + list(SolrApi(SOLR_URL.get(), self.user).cores().keys())
else:
return indexes
| apache-2.0 | -7,750,711,898,194,428,000 | 29.966102 | 78 | 0.711549 | false |
ArneBab/pypyjs | website/demo/home/rfk/repos/pypy/lib-python/2.7/curses/__init__.py | 46 | 1817 | """curses
The main package for curses support for Python. Normally used by importing
the package, and perhaps a particular module inside it.
import curses
from curses import textpad
curses.initwin()
...
"""
__revision__ = "$Id$"
from _curses import *
from curses.wrapper import wrapper
import os as _os
import sys as _sys
# Some constants, most notably the ACS_* ones, are only added to the C
# _curses module's dictionary after initscr() is called. (Some
# versions of SGI's curses don't define values for those constants
# until initscr() has been called.) This wrapper function calls the
# underlying C initscr(), and then copies the constants from the
# _curses module to the curses package's dictionary. Don't do 'from
# curses import *' if you'll be needing the ACS_* constants.
def initscr():
import _curses, curses
# we call setupterm() here because it raises an error
# instead of calling exit() in error cases.
setupterm(term=_os.environ.get("TERM", "unknown"),
fd=_sys.__stdout__.fileno())
stdscr = _curses.initscr()
for key, value in _curses.__dict__.items():
if key[0:4] == 'ACS_' or key in ('LINES', 'COLS'):
setattr(curses, key, value)
return stdscr
# This is a similar wrapper for start_color(), which adds the COLORS and
# COLOR_PAIRS variables which are only available after start_color() is
# called.
def start_color():
import _curses, curses
retval = _curses.start_color()
if hasattr(_curses, 'COLORS'):
curses.COLORS = _curses.COLORS
if hasattr(_curses, 'COLOR_PAIRS'):
curses.COLOR_PAIRS = _curses.COLOR_PAIRS
return retval
# Import Python has_key() implementation if _curses doesn't contain has_key()
try:
has_key
except NameError:
from has_key import has_key
| mit | 1,475,989,010,377,561,300 | 29.79661 | 77 | 0.682994 | false |
BRCDcomm/pynos | pynos/versions/ver_7/ver_7_0_0/yang/brocade_terminal.py | 4 | 2453 | #!/usr/bin/env python
import xml.etree.ElementTree as ET
class brocade_terminal(object):
"""Auto generated class.
"""
def __init__(self, **kwargs):
self._callback = kwargs.pop('callback')
def terminal_cfg_line_sessionid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
terminal_cfg = ET.SubElement(config, "terminal-cfg", xmlns="urn:brocade.com:mgmt:brocade-terminal")
line = ET.SubElement(terminal_cfg, "line")
sessionid = ET.SubElement(line, "sessionid")
sessionid.text = kwargs.pop('sessionid')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def terminal_cfg_line_exec_timeout(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
terminal_cfg = ET.SubElement(config, "terminal-cfg", xmlns="urn:brocade.com:mgmt:brocade-terminal")
line = ET.SubElement(terminal_cfg, "line")
sessionid_key = ET.SubElement(line, "sessionid")
sessionid_key.text = kwargs.pop('sessionid')
exec_timeout = ET.SubElement(line, "exec-timeout")
exec_timeout.text = kwargs.pop('exec_timeout')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def terminal_cfg_line_sessionid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
terminal_cfg = ET.SubElement(config, "terminal-cfg", xmlns="urn:brocade.com:mgmt:brocade-terminal")
line = ET.SubElement(terminal_cfg, "line")
sessionid = ET.SubElement(line, "sessionid")
sessionid.text = kwargs.pop('sessionid')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def terminal_cfg_line_exec_timeout(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
terminal_cfg = ET.SubElement(config, "terminal-cfg", xmlns="urn:brocade.com:mgmt:brocade-terminal")
line = ET.SubElement(terminal_cfg, "line")
sessionid_key = ET.SubElement(line, "sessionid")
sessionid_key.text = kwargs.pop('sessionid')
exec_timeout = ET.SubElement(line, "exec-timeout")
exec_timeout.text = kwargs.pop('exec_timeout')
callback = kwargs.pop('callback', self._callback)
return callback(config)
| apache-2.0 | -2,744,403,395,678,361,000 | 37.952381 | 107 | 0.618019 | false |
gylian/headphones | lib/unidecode/x0cb.py | 253 | 5012 | data = (
'jjwaels', # 0x00
'jjwaelt', # 0x01
'jjwaelp', # 0x02
'jjwaelh', # 0x03
'jjwaem', # 0x04
'jjwaeb', # 0x05
'jjwaebs', # 0x06
'jjwaes', # 0x07
'jjwaess', # 0x08
'jjwaeng', # 0x09
'jjwaej', # 0x0a
'jjwaec', # 0x0b
'jjwaek', # 0x0c
'jjwaet', # 0x0d
'jjwaep', # 0x0e
'jjwaeh', # 0x0f
'jjoe', # 0x10
'jjoeg', # 0x11
'jjoegg', # 0x12
'jjoegs', # 0x13
'jjoen', # 0x14
'jjoenj', # 0x15
'jjoenh', # 0x16
'jjoed', # 0x17
'jjoel', # 0x18
'jjoelg', # 0x19
'jjoelm', # 0x1a
'jjoelb', # 0x1b
'jjoels', # 0x1c
'jjoelt', # 0x1d
'jjoelp', # 0x1e
'jjoelh', # 0x1f
'jjoem', # 0x20
'jjoeb', # 0x21
'jjoebs', # 0x22
'jjoes', # 0x23
'jjoess', # 0x24
'jjoeng', # 0x25
'jjoej', # 0x26
'jjoec', # 0x27
'jjoek', # 0x28
'jjoet', # 0x29
'jjoep', # 0x2a
'jjoeh', # 0x2b
'jjyo', # 0x2c
'jjyog', # 0x2d
'jjyogg', # 0x2e
'jjyogs', # 0x2f
'jjyon', # 0x30
'jjyonj', # 0x31
'jjyonh', # 0x32
'jjyod', # 0x33
'jjyol', # 0x34
'jjyolg', # 0x35
'jjyolm', # 0x36
'jjyolb', # 0x37
'jjyols', # 0x38
'jjyolt', # 0x39
'jjyolp', # 0x3a
'jjyolh', # 0x3b
'jjyom', # 0x3c
'jjyob', # 0x3d
'jjyobs', # 0x3e
'jjyos', # 0x3f
'jjyoss', # 0x40
'jjyong', # 0x41
'jjyoj', # 0x42
'jjyoc', # 0x43
'jjyok', # 0x44
'jjyot', # 0x45
'jjyop', # 0x46
'jjyoh', # 0x47
'jju', # 0x48
'jjug', # 0x49
'jjugg', # 0x4a
'jjugs', # 0x4b
'jjun', # 0x4c
'jjunj', # 0x4d
'jjunh', # 0x4e
'jjud', # 0x4f
'jjul', # 0x50
'jjulg', # 0x51
'jjulm', # 0x52
'jjulb', # 0x53
'jjuls', # 0x54
'jjult', # 0x55
'jjulp', # 0x56
'jjulh', # 0x57
'jjum', # 0x58
'jjub', # 0x59
'jjubs', # 0x5a
'jjus', # 0x5b
'jjuss', # 0x5c
'jjung', # 0x5d
'jjuj', # 0x5e
'jjuc', # 0x5f
'jjuk', # 0x60
'jjut', # 0x61
'jjup', # 0x62
'jjuh', # 0x63
'jjweo', # 0x64
'jjweog', # 0x65
'jjweogg', # 0x66
'jjweogs', # 0x67
'jjweon', # 0x68
'jjweonj', # 0x69
'jjweonh', # 0x6a
'jjweod', # 0x6b
'jjweol', # 0x6c
'jjweolg', # 0x6d
'jjweolm', # 0x6e
'jjweolb', # 0x6f
'jjweols', # 0x70
'jjweolt', # 0x71
'jjweolp', # 0x72
'jjweolh', # 0x73
'jjweom', # 0x74
'jjweob', # 0x75
'jjweobs', # 0x76
'jjweos', # 0x77
'jjweoss', # 0x78
'jjweong', # 0x79
'jjweoj', # 0x7a
'jjweoc', # 0x7b
'jjweok', # 0x7c
'jjweot', # 0x7d
'jjweop', # 0x7e
'jjweoh', # 0x7f
'jjwe', # 0x80
'jjweg', # 0x81
'jjwegg', # 0x82
'jjwegs', # 0x83
'jjwen', # 0x84
'jjwenj', # 0x85
'jjwenh', # 0x86
'jjwed', # 0x87
'jjwel', # 0x88
'jjwelg', # 0x89
'jjwelm', # 0x8a
'jjwelb', # 0x8b
'jjwels', # 0x8c
'jjwelt', # 0x8d
'jjwelp', # 0x8e
'jjwelh', # 0x8f
'jjwem', # 0x90
'jjweb', # 0x91
'jjwebs', # 0x92
'jjwes', # 0x93
'jjwess', # 0x94
'jjweng', # 0x95
'jjwej', # 0x96
'jjwec', # 0x97
'jjwek', # 0x98
'jjwet', # 0x99
'jjwep', # 0x9a
'jjweh', # 0x9b
'jjwi', # 0x9c
'jjwig', # 0x9d
'jjwigg', # 0x9e
'jjwigs', # 0x9f
'jjwin', # 0xa0
'jjwinj', # 0xa1
'jjwinh', # 0xa2
'jjwid', # 0xa3
'jjwil', # 0xa4
'jjwilg', # 0xa5
'jjwilm', # 0xa6
'jjwilb', # 0xa7
'jjwils', # 0xa8
'jjwilt', # 0xa9
'jjwilp', # 0xaa
'jjwilh', # 0xab
'jjwim', # 0xac
'jjwib', # 0xad
'jjwibs', # 0xae
'jjwis', # 0xaf
'jjwiss', # 0xb0
'jjwing', # 0xb1
'jjwij', # 0xb2
'jjwic', # 0xb3
'jjwik', # 0xb4
'jjwit', # 0xb5
'jjwip', # 0xb6
'jjwih', # 0xb7
'jjyu', # 0xb8
'jjyug', # 0xb9
'jjyugg', # 0xba
'jjyugs', # 0xbb
'jjyun', # 0xbc
'jjyunj', # 0xbd
'jjyunh', # 0xbe
'jjyud', # 0xbf
'jjyul', # 0xc0
'jjyulg', # 0xc1
'jjyulm', # 0xc2
'jjyulb', # 0xc3
'jjyuls', # 0xc4
'jjyult', # 0xc5
'jjyulp', # 0xc6
'jjyulh', # 0xc7
'jjyum', # 0xc8
'jjyub', # 0xc9
'jjyubs', # 0xca
'jjyus', # 0xcb
'jjyuss', # 0xcc
'jjyung', # 0xcd
'jjyuj', # 0xce
'jjyuc', # 0xcf
'jjyuk', # 0xd0
'jjyut', # 0xd1
'jjyup', # 0xd2
'jjyuh', # 0xd3
'jjeu', # 0xd4
'jjeug', # 0xd5
'jjeugg', # 0xd6
'jjeugs', # 0xd7
'jjeun', # 0xd8
'jjeunj', # 0xd9
'jjeunh', # 0xda
'jjeud', # 0xdb
'jjeul', # 0xdc
'jjeulg', # 0xdd
'jjeulm', # 0xde
'jjeulb', # 0xdf
'jjeuls', # 0xe0
'jjeult', # 0xe1
'jjeulp', # 0xe2
'jjeulh', # 0xe3
'jjeum', # 0xe4
'jjeub', # 0xe5
'jjeubs', # 0xe6
'jjeus', # 0xe7
'jjeuss', # 0xe8
'jjeung', # 0xe9
'jjeuj', # 0xea
'jjeuc', # 0xeb
'jjeuk', # 0xec
'jjeut', # 0xed
'jjeup', # 0xee
'jjeuh', # 0xef
'jjyi', # 0xf0
'jjyig', # 0xf1
'jjyigg', # 0xf2
'jjyigs', # 0xf3
'jjyin', # 0xf4
'jjyinj', # 0xf5
'jjyinh', # 0xf6
'jjyid', # 0xf7
'jjyil', # 0xf8
'jjyilg', # 0xf9
'jjyilm', # 0xfa
'jjyilb', # 0xfb
'jjyils', # 0xfc
'jjyilt', # 0xfd
'jjyilp', # 0xfe
'jjyilh', # 0xff
)
| gpl-3.0 | 4,895,164,431,698,401,000 | 18.426357 | 20 | 0.487829 | false |
lociii/googleads-python-lib | examples/adspygoogle/adwords/v201309/basic_operations/add_keywords.py | 2 | 2787 | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds ad group criteria to an ad group. To get ad groups, run
get_ad_groups.py.
Tags: AdGroupCriterionService.mutate
Api: AdWordsOnly
"""
__author__ = '[email protected] (Kevin Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
ad_group_id = 'INSERT_AD_GROUP_ID_HERE'
def main(client, ad_group_id):
# Initialize appropriate service.
ad_group_criterion_service = client.GetAdGroupCriterionService(
version='v201309')
# Construct keyword ad group criterion object.
keyword1 = {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group_id,
'criterion': {
'xsi_type': 'Keyword',
'matchType': 'BROAD',
'text': 'mars'
},
# These fields are optional.
'userStatus': 'PAUSED',
'destinationUrl': 'http://example.com/mars'
}
keyword2 = {
'xsi_type': 'NegativeAdGroupCriterion',
'adGroupId': ad_group_id,
'criterion': {
'xsi_type': 'Keyword',
'matchType': 'EXACT',
'text': 'pluto'
}
}
# Construct operations and add ad group criteria.
operations = [
{
'operator': 'ADD',
'operand': keyword1
},
{
'operator': 'ADD',
'operand': keyword2
}
]
ad_group_criteria = ad_group_criterion_service.Mutate(operations)[0]['value']
# Display results.
for criterion in ad_group_criteria:
print ('Keyword ad group criterion with ad group id \'%s\', criterion id '
'\'%s\', text \'%s\', and match type \'%s\' was added.'
% (criterion['adGroupId'], criterion['criterion']['id'],
criterion['criterion']['text'],
criterion['criterion']['matchType']))
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client, ad_group_id)
| apache-2.0 | -2,051,276,545,035,153,400 | 28.03125 | 79 | 0.616792 | false |
ds-hwang/chromium-crosswalk | chromecast/tools/trace.py | 19 | 2980 | #!/usr/bin/env python
#
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# This script was originally written by Alok Priyadarshi (alokp@)
# with some minor local modifications.
import contextlib
import json
import optparse
import os
import sys
import websocket
from tracinglib import TracingBackend, TracingClient
@contextlib.contextmanager
def Connect(device_ip, devtools_port):
backend = TracingBackend()
try:
backend.Connect(device_ip, devtools_port)
yield backend
finally:
backend.Disconnect()
def DumpTrace(trace, options):
filepath = os.path.expanduser(options.output) if options.output \
else os.path.join(os.getcwd(), 'trace.json')
dirname = os.path.dirname(filepath)
if dirname:
if not os.path.exists(dirname):
os.makedirs(dirname)
else:
filepath = os.path.join(os.getcwd(), filepath)
with open(filepath, 'w') as f:
json.dump(trace, f)
return filepath
def _CreateOptionParser():
parser = optparse.OptionParser(description='Record about://tracing profiles '
'from any running instance of Chrome.')
parser.add_option(
'-v', '--verbose', help='Verbose logging.', action='store_true')
parser.add_option(
'-p', '--port', help='Remote debugging port.', type='int', default=9222)
parser.add_option(
'-d', '--device', help='Device ip address.', type='string',
default='127.0.0.1')
tracing_opts = optparse.OptionGroup(parser, 'Tracing options')
tracing_opts.add_option(
'-c', '--category-filter',
help='Apply filter to control what category groups should be traced.',
type='string')
tracing_opts.add_option(
'--record-continuously',
help='Keep recording until stopped. The trace buffer is of fixed size '
'and used as a ring buffer. If this option is omitted then '
'recording stops when the trace buffer is full.',
action='store_true')
parser.add_option_group(tracing_opts)
output_options = optparse.OptionGroup(parser, 'Output options')
output_options.add_option(
'-o', '--output',
help='Save trace output to file.')
parser.add_option_group(output_options)
return parser
def _ProcessOptions(options):
websocket.enableTrace(options.verbose)
def main():
parser = _CreateOptionParser()
options, _args = parser.parse_args()
_ProcessOptions(options)
with Connect(options.device, options.port) as tracing_backend:
tracing_backend.StartTracing(TracingClient(),
options.category_filter,
options.record_continuously)
raw_input('Capturing trace. Press Enter to stop...')
trace = tracing_backend.StopTracing()
filepath = DumpTrace(trace, options)
print('Done')
print('Trace written to file://%s' % filepath)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | -8,498,445,012,116,900,000 | 28.8 | 79 | 0.674832 | false |
felixma/nova | nova/tests/unit/scheduler/filters/test_core_filters.py | 17 | 4514 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.scheduler.filters import core_filter
from nova import test
from nova.tests.unit.scheduler import fakes
class TestCoreFilter(test.NoDBTestCase):
def test_core_filter_passes(self):
self.filt_cls = core_filter.CoreFilter()
filter_properties = {'instance_type': {'vcpus': 1}}
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 7,
'cpu_allocation_ratio': 2})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_core_filter_fails_safe(self):
self.filt_cls = core_filter.CoreFilter()
filter_properties = {'instance_type': {'vcpus': 1}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_core_filter_fails(self):
self.filt_cls = core_filter.CoreFilter()
filter_properties = {'instance_type': {'vcpus': 1}}
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 8,
'cpu_allocation_ratio': 2})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_core_filter_single_instance_overcommit_fails(self):
self.filt_cls = core_filter.CoreFilter()
filter_properties = {'instance_type': {'vcpus': 2}}
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 1, 'vcpus_used': 0,
'cpu_allocation_ratio': 2})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_aggregate_core_filter_value_error(self, agg_mock):
self.filt_cls = core_filter.AggregateCoreFilter()
filter_properties = {'context': mock.sentinel.ctx,
'instance_type': {'vcpus': 1}}
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 7,
'cpu_allocation_ratio': 2})
agg_mock.return_value = set(['XXX'])
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
agg_mock.assert_called_once_with(host, 'cpu_allocation_ratio')
self.assertEqual(4 * 2, host.limits['vcpu'])
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_aggregate_core_filter_default_value(self, agg_mock):
self.filt_cls = core_filter.AggregateCoreFilter()
filter_properties = {'context': mock.sentinel.ctx,
'instance_type': {'vcpus': 1}}
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 8,
'cpu_allocation_ratio': 2})
agg_mock.return_value = set([])
# False: fallback to default flag w/o aggregates
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
agg_mock.assert_called_once_with(host, 'cpu_allocation_ratio')
# True: use ratio from aggregates
agg_mock.return_value = set(['3'])
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
self.assertEqual(4 * 3, host.limits['vcpu'])
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_aggregate_core_filter_conflict_values(self, agg_mock):
self.filt_cls = core_filter.AggregateCoreFilter()
filter_properties = {'context': mock.sentinel.ctx,
'instance_type': {'vcpus': 1}}
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 8,
'cpu_allocation_ratio': 1})
agg_mock.return_value = set(['2', '3'])
# use the minimum ratio from aggregates
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
self.assertEqual(4 * 2, host.limits['vcpu'])
| apache-2.0 | -1,118,712,251,938,863,700 | 47.537634 | 78 | 0.628489 | false |
srvg/ansible | test/units/template/test_safe_eval.py | 58 | 1820 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
from collections import defaultdict
from units.compat import unittest
from ansible.template.safe_eval import safe_eval
class TestSafeEval(unittest.TestCase):
def test_safe_eval_usage(self):
# test safe eval calls with different possible types for the
# locals dictionary, to ensure we don't run into problems like
# ansible/ansible/issues/12206 again
for locals_vars in (dict(), defaultdict(dict)):
self.assertEqual(safe_eval('True', locals=locals_vars), True)
self.assertEqual(safe_eval('False', locals=locals_vars), False)
self.assertEqual(safe_eval('0', locals=locals_vars), 0)
self.assertEqual(safe_eval('[]', locals=locals_vars), [])
self.assertEqual(safe_eval('{}', locals=locals_vars), {})
@unittest.skipUnless(sys.version_info[:2] >= (2, 7), "Python 2.6 has no set literals")
def test_set_literals(self):
self.assertEqual(safe_eval('{0}'), set([0]))
| gpl-3.0 | -7,465,371,785,716,037,000 | 40.363636 | 90 | 0.704396 | false |
sinkuri256/python-for-android | python-build/python-libs/gdata/src/gdata/analytics/service.py | 213 | 13293 | #!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
# Refactored in 2009 to work for Google Analytics by Sal Uryasev at Juice Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
AccountsService extends the GDataService to streamline Google Analytics
account information operations.
AnalyticsDataService: Provides methods to query google analytics data feeds.
Extends GDataService.
DataQuery: Queries a Google Analytics Data list feed.
AccountQuery: Queries a Google Analytics Account list feed.
"""
__author__ = 'api.suryasev (Sal Uryasev)'
import urllib
import atom
import gdata.service
import gdata.analytics
class AccountsService(gdata.service.GDataService):
"""Client extension for the Google Analytics Account List feed."""
def __init__(self, email="", password=None, source=None,
server='www.google.com/analytics', additional_headers=None,
**kwargs):
"""Creates a client for the Google Analytics service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(
self, email=email, password=password, service='analytics',
source=source, server=server, additional_headers=additional_headers,
**kwargs)
def QueryAccountListFeed(self, uri):
"""Retrieves an AccountListFeed by retrieving a URI based off the Document
List feed, including any query parameters. An AccountListFeed object
can be used to construct these parameters.
Args:
uri: string The URI of the feed being retrieved possibly with query
parameters.
Returns:
An AccountListFeed object representing the feed returned by the server.
"""
return self.Get(uri, converter=gdata.analytics.AccountListFeedFromString)
def GetAccountListEntry(self, uri):
"""Retrieves a particular AccountListEntry by its unique URI.
Args:
uri: string The unique URI of an entry in an Account List feed.
Returns:
An AccountLisFeed object representing the retrieved entry.
"""
return self.Get(uri, converter=gdata.analytics.AccountListEntryFromString)
def GetAccountList(self, max_results=1000, text_query=None,
params=None, categories=None):
"""Retrieves a feed containing all of a user's accounts and profiles."""
q = gdata.analytics.service.AccountQuery(max_results=max_results,
text_query=text_query,
params=params,
categories=categories);
return self.QueryAccountListFeed(q.ToUri())
class AnalyticsDataService(gdata.service.GDataService):
"""Client extension for the Google Analytics service Data List feed."""
def __init__(self, email=None, password=None, source=None,
server='www.google.com/analytics', additional_headers=None,
**kwargs):
"""Creates a client for the Google Analytics service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'docs.google.com'.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(self,
email=email, password=password, service='analytics', source=source,
server=server, additional_headers=additional_headers, **kwargs)
def GetData(self, ids='', dimensions='', metrics='',
sort='', filters='', start_date='',
end_date='', start_index='',
max_results=''):
"""Retrieves a feed containing a user's data
ids: comma-separated string of analytics accounts.
dimensions: comma-separated string of dimensions.
metrics: comma-separated string of metrics.
sort: comma-separated string of dimensions and metrics for sorting.
This may be previxed with a minus to sort in reverse order.
(e.g. '-ga:keyword')
If ommited, the first dimension passed in will be used.
filters: comma-separated string of filter parameters.
(e.g. 'ga:keyword==google')
start_date: start date for data pull.
end_date: end date for data pull.
start_index: used in combination with max_results to pull more than 1000
entries. This defaults to 1.
max_results: maximum results that the pull will return. This defaults
to, and maxes out at 1000.
"""
q = gdata.analytics.service.DataQuery(ids=ids,
dimensions=dimensions,
metrics=metrics,
filters=filters,
sort=sort,
start_date=start_date,
end_date=end_date,
start_index=start_index,
max_results=max_results);
return self.AnalyticsDataFeed(q.ToUri())
def AnalyticsDataFeed(self, uri):
"""Retrieves an AnalyticsListFeed by retrieving a URI based off the
Document List feed, including any query parameters. An
AnalyticsListFeed object can be used to construct these parameters.
Args:
uri: string The URI of the feed being retrieved possibly with query
parameters.
Returns:
An AnalyticsListFeed object representing the feed returned by the
server.
"""
return self.Get(uri,
converter=gdata.analytics.AnalyticsDataFeedFromString)
"""
Account Fetching
"""
def QueryAccountListFeed(self, uri):
"""Retrieves an Account ListFeed by retrieving a URI based off the Account
List feed, including any query parameters. A AccountQuery object can
be used to construct these parameters.
Args:
uri: string The URI of the feed being retrieved possibly with query
parameters.
Returns:
An AccountListFeed object representing the feed returned by the server.
"""
return self.Get(uri, converter=gdata.analytics.AccountListFeedFromString)
def GetAccountListEntry(self, uri):
"""Retrieves a particular AccountListEntry by its unique URI.
Args:
uri: string The unique URI of an entry in an Account List feed.
Returns:
An AccountListEntry object representing the retrieved entry.
"""
return self.Get(uri, converter=gdata.analytics.AccountListEntryFromString)
def GetAccountList(self, username="default", max_results=1000,
start_index=1):
"""Retrieves a feed containing all of a user's accounts and profiles.
The username parameter is soon to be deprecated, with 'default'
becoming the only allowed parameter.
"""
if not username:
raise Exception("username is a required parameter")
q = gdata.analytics.service.AccountQuery(username=username,
max_results=max_results,
start_index=start_index);
return self.QueryAccountListFeed(q.ToUri())
class DataQuery(gdata.service.Query):
"""Object used to construct a URI to a data feed"""
def __init__(self, feed='/feeds/data', text_query=None,
params=None, categories=None, ids="",
dimensions="", metrics="", sort="", filters="",
start_date="", end_date="", start_index="",
max_results=""):
"""Constructor for Analytics List Query
Args:
feed: string (optional) The path for the feed. (e.g. '/feeds/data')
text_query: string (optional) The contents of the q query parameter.
This string is URL escaped upon conversion to a URI.
params: dict (optional) Parameter value string pairs which become URL
params when translated to a URI. These parameters are added to
the query's items.
categories: list (optional) List of category strings which should be
included as query categories. See gdata.service.Query for
additional documentation.
ids: comma-separated string of analytics accounts.
dimensions: comma-separated string of dimensions.
metrics: comma-separated string of metrics.
sort: comma-separated string of dimensions and metrics.
This may be previxed with a minus to sort in reverse order
(e.g. '-ga:keyword').
If ommited, the first dimension passed in will be used.
filters: comma-separated string of filter parameters
(e.g. 'ga:keyword==google').
start_date: start date for data pull.
end_date: end date for data pull.
start_index: used in combination with max_results to pull more than 1000
entries. This defaults to 1.
max_results: maximum results that the pull will return. This defaults
to, and maxes out at 1000.
Yields:
A DocumentQuery object used to construct a URI based on the Document
List feed.
"""
self.elements = {'ids': ids,
'dimensions': dimensions,
'metrics': metrics,
'sort': sort,
'filters': filters,
'start-date': start_date,
'end-date': end_date,
'start-index': start_index,
'max-results': max_results}
gdata.service.Query.__init__(self, feed, text_query, params, categories)
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the Analytics
List feed.
"""
old_feed = self.feed
self.feed = '/'.join([old_feed]) + '?' + \
urllib.urlencode(dict([(key, value) for key, value in \
self.elements.iteritems() if value]))
new_feed = gdata.service.Query.ToUri(self)
self.feed = old_feed
return new_feed
class AccountQuery(gdata.service.Query):
"""Object used to construct a URI to query the Google Account List feed"""
def __init__(self, feed='/feeds/accounts', start_index=1,
max_results=1000, username='default', text_query=None,
params=None, categories=None):
"""Constructor for Account List Query
Args:
feed: string (optional) The path for the feed. (e.g. '/feeds/documents')
visibility: string (optional) The visibility chosen for the current
feed.
projection: string (optional) The projection chosen for the current
feed.
text_query: string (optional) The contents of the q query parameter.
This string is URL escaped upon conversion to a URI.
params: dict (optional) Parameter value string pairs which become URL
params when translated to a URI. These parameters are added to
the query's items.
categories: list (optional) List of category strings which should be
included as query categories. See gdata.service.Query for
additional documentation.
username: string (deprecated) This value should now always be passed as
'default'.
Yields:
A DocumentQuery object used to construct a URI based on the Document
List feed.
"""
self.max_results = max_results
self.start_index = start_index
self.username = username
gdata.service.Query.__init__(self, feed, text_query, params, categories)
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the Account
List feed.
"""
old_feed = self.feed
self.feed = '/'.join([old_feed, self.username]) + '?' + \
'&'.join(['max-results=' + str(self.max_results),
'start-index=' + str(self.start_index)])
new_feed = self.feed
self.feed = old_feed
return new_feed
| apache-2.0 | -1,201,229,534,587,199,000 | 39.160121 | 79 | 0.633416 | false |
SlashRoot/WHAT | what_apps/main/models.py | 1 | 2208 | from django.db import models
from itertools import chain
BUBBLE_ACTIONS = (
(1, 'link'),
(2, 'pop'),
(3, 'modal'),
(4, 'ajax_crumb'),
)
class Bubble(models.Model):
url=models.CharField(max_length=300, blank=True, null=True)
name=models.CharField(max_length=200)
content=models.TextField(blank=True, null=True)
action=models.IntegerField(choices=BUBBLE_ACTIONS)
data=models.CharField(max_length=80, blank=True, null=True)
menu_crumb=models.ForeignKey('BubbleMenu', blank=True, null=True, related_name="origins")
def __unicode__(self):
return self.name
#This will need to be more dynamic in the future, but I need to get something down.
class BubbleMenu(models.Model):
bubbles=models.ManyToManyField(Bubble, related_name="menu")
name=models.CharField(max_length=80)
launch_name=models.CharField(max_length=80)
crumbs=models.ManyToManyField('self', symmetrical=False, blank=True, null=True)
def allBubbles(self):
return chain(self.bubbles.all(), self.crumbs.all())
def totalBubbles(self):
return self.bubbles.count() + self.crumbs.count()
def getMenuTree(self):
#Start with an empty list
menu_tree = []
#This menu is obviously in the tree
menu_tree.append(self)
for crumb in self.crumbs.all():
b = Bubble()
b.url = 'javascript:;'
b.name = crumb.name
b.action = 2
b.menu_crumb = crumb
for new_menu in self.crumbs.all():
#Avoid infinite recursion.
if not new_menu in menu_tree:
#Since this menu is not already in the tree, it must be added, along with all its sub-menus.
#Thus, we'll run this very method (the one we are inside) to append them all.
menu_tree += new_menu.getMenuTree() #Concat list to list
return menu_tree
def __unicode__(self):
return self.name | mit | -6,342,958,906,977,628,000 | 31.485294 | 108 | 0.564764 | false |
hufsm/tu_gen2_libsigrokdecode | decoders/cfp/__init__.py | 7 | 1583 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2018 Elias Oenal <[email protected]>
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## 1. Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## 2. Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
## ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
## LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
## CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
## SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
## INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
## CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
## ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
## POSSIBILITY OF SUCH DAMAGE.
##
'''
This decoder stacks on top of the 'mdio' PD and decodes the CFP 100G
pluggable transceiver protocol.
'''
from .pd import Decoder
| gpl-3.0 | -4,650,766,062,340,091,000 | 45.558824 | 79 | 0.765635 | false |
amanuel/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Tool/f77.py | 61 | 2056 | """engine.SCons.Tool.f77
Tool-specific initialization for the generic Posix f77 Fortran compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/f77.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Defaults
import SCons.Scanner.Fortran
import SCons.Tool
import SCons.Util
from SCons.Tool.FortranCommon import add_all_to_env, add_f77_to_env
compilers = ['f77']
def generate(env):
add_all_to_env(env)
add_f77_to_env(env)
fcomp = env.Detect(compilers) or 'f77'
env['F77'] = fcomp
env['SHF77'] = fcomp
env['FORTRAN'] = fcomp
env['SHFORTRAN'] = fcomp
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 | -2,879,549,820,191,078,400 | 32.16129 | 95 | 0.745623 | false |
quom/google-cloud-python | docs/logging_snippets.py | 6 | 12290 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testable usage examples for Stackdriver Logging API wrapper
Each example function takes a ``client`` argument (which must be an instance
of :class:`google.cloud.logging.client.Client`) and uses it to perform a task
with the API.
To facilitate running the examples as system tests, each example is also passed
a ``to_delete`` list; the function adds to the list any objects created which
need to be deleted during teardown.
"""
import time
from google.cloud.logging.client import Client
def snippet(func):
"""Mark ``func`` as a snippet example function."""
func._snippet = True
return func
def _millis():
return time.time() * 1000
def do_something_with(item): # pylint: disable=unused-argument
pass
# pylint: disable=reimported,unused-variable,unused-argument
@snippet
def instantiate_client(_unused_client, _unused_to_delete):
"""Instantiate client."""
# [START client_create_default]
from google.cloud import logging
client = logging.Client()
# [END client_create_default]
credentials = object()
# [START client_create_explicit]
from google.cloud import logging
client = logging.Client(project='my-project', credentials=credentials)
# [END client_create_explicit]
# pylint: enable=reimported,unused-variable,unused-argument
@snippet
def client_list_entries(client, to_delete): # pylint: disable=unused-argument
"""List entries via client."""
# [START client_list_entries_default]
for entry in client.list_entries(): # API call(s)
do_something_with(entry)
# [END client_list_entries_default]
# [START client_list_entries_filter]
FILTER = 'logName:log_name AND textPayload:simple'
for entry in client.list_entries(filter_=FILTER): # API call(s)
do_something_with(entry)
# [END client_list_entries_filter]
# [START client_list_entries_order_by]
from google.cloud.logging import DESCENDING
for entry in client.list_entries(order_by=DESCENDING): # API call(s)
do_something_with(entry)
# [END client_list_entries_order_by]
# [START client_list_entries_paged]
iterator = client.list_entries()
pages = iterator.pages
page1 = next(pages) # API call
for entry in page1:
do_something_with(entry)
page2 = next(pages) # API call
for entry in page2:
do_something_with(entry)
# [END client_list_entries_paged]
# @snippet Commented because we need real project IDs to test
def client_list_entries_multi_project(
client, to_delete): # pylint: disable=unused-argument
"""List entries via client across multiple projects."""
# [START client_list_entries_multi_project]
PROJECT_IDS = ['one-project', 'another-project']
for entry in client.list_entries(project_ids=PROJECT_IDS): # API call(s)
do_something_with(entry)
# [END client_list_entries_multi_project]
@snippet
def logger_usage(client, to_delete):
"""Logger usage."""
LOG_NAME = 'logger_usage_%d' % (_millis())
# [START logger_create]
logger = client.logger(LOG_NAME)
# [END logger_create]
to_delete.append(logger)
# [START logger_log_text]
logger.log_text("A simple entry") # API call
# [END logger_log_text]
# [START logger_log_struct]
logger.log_struct({
'message': 'My second entry',
'weather': 'partly cloudy',
}) # API call
# [END logger_log_struct]
# [START logger_list_entries]
from google.cloud.logging import DESCENDING
for entry in logger.list_entries(order_by=DESCENDING): # API call(s)
do_something_with(entry)
# [END logger_list_entries]
def _logger_delete():
# [START logger_delete]
logger.delete() # API call
# [END logger_delete]
_backoff_not_found(_logger_delete)
to_delete.remove(logger)
@snippet
def metric_crud(client, to_delete):
"""Metric CRUD."""
METRIC_NAME = 'robots-%d' % (_millis(),)
DESCRIPTION = "Robots all up in your server"
FILTER = 'logName:apache-access AND textPayload:robot'
UPDATED_FILTER = 'textPayload:robot'
UPDATED_DESCRIPTION = "Danger, Will Robinson!"
# [START client_list_metrics]
for metric in client.list_metrics(): # API call(s)
do_something_with(metric)
# [END client_list_metrics]
# [START metric_create]
metric = client.metric(
METRIC_NAME, filter_=FILTER, description=DESCRIPTION)
assert not metric.exists() # API call
metric.create() # API call
assert metric.exists() # API call
# [END metric_create]
to_delete.append(metric)
# [START metric_reload]
existing_metric = client.metric(METRIC_NAME)
existing_metric.reload() # API call
# [END metric_reload]
assert existing_metric.filter_ == FILTER
assert existing_metric.description == DESCRIPTION
# [START metric_update]
existing_metric.filter_ = UPDATED_FILTER
existing_metric.description = UPDATED_DESCRIPTION
existing_metric.update() # API call
# [END metric_update]
existing_metric.reload()
assert existing_metric.filter_ == UPDATED_FILTER
assert existing_metric.description == UPDATED_DESCRIPTION
def _metric_delete():
# [START metric_delete]
metric.delete()
# [END metric_delete]
_backoff_not_found(_metric_delete)
to_delete.remove(metric)
def _sink_storage_setup(client):
from google.cloud import storage
BUCKET_NAME = 'sink-storage-%d' % (_millis(),)
client = storage.Client()
bucket = client.bucket(BUCKET_NAME)
bucket.create()
# [START sink_bucket_permissions]
bucket.acl.reload() # API call
logs_group = bucket.acl.group('[email protected]')
logs_group.grant_owner()
bucket.acl.add_entity(logs_group)
bucket.acl.save() # API call
# [END sink_bucket_permissions]
return bucket
@snippet
def sink_storage(client, to_delete):
"""Sink log entries to storage."""
bucket = _sink_storage_setup(client)
to_delete.append(bucket)
SINK_NAME = 'robots-storage-%d' % (_millis(),)
FILTER = 'textPayload:robot'
# [START sink_storage_create]
DESTINATION = 'storage.googleapis.com/%s' % (bucket.name,)
sink = client.sink(SINK_NAME, filter_=FILTER, destination=DESTINATION)
assert not sink.exists() # API call
sink.create() # API call
assert sink.exists() # API call
# [END sink_storage_create]
to_delete.insert(0, sink) # delete sink before bucket
def _sink_bigquery_setup(client):
from google.cloud import bigquery
DATASET_NAME = 'sink_bigquery_%d' % (_millis(),)
client = bigquery.Client()
dataset = client.dataset(DATASET_NAME)
dataset.create()
dataset.reload()
# [START sink_dataset_permissions]
from google.cloud.bigquery.dataset import AccessGrant
grants = dataset.access_grants
grants.append(AccessGrant(
'WRITER', 'groupByEmail', '[email protected]'))
dataset.access_grants = grants
dataset.update() # API call
# [END sink_dataset_permissions]
return dataset
@snippet
def sink_bigquery(client, to_delete):
"""Sink log entries to bigquery."""
dataset = _sink_bigquery_setup(client)
to_delete.append(dataset)
SINK_NAME = 'robots-bigquery-%d' % (_millis(),)
FILTER = 'textPayload:robot'
# [START sink_bigquery_create]
DESTINATION = 'bigquery.googleapis.com%s' % (dataset.path,)
sink = client.sink(SINK_NAME, filter_=FILTER, destination=DESTINATION)
assert not sink.exists() # API call
sink.create() # API call
assert sink.exists() # API call
# [END sink_bigquery_create]
to_delete.insert(0, sink) # delete sink before dataset
def _sink_pubsub_setup(client):
from google.cloud import pubsub
TOPIC_NAME = 'sink-pubsub-%d' % (_millis(),)
client = pubsub.Client()
topic = client.topic(TOPIC_NAME)
topic.create()
# [START sink_topic_permissions]
policy = topic.get_iam_policy() # API call
policy.owners.add(policy.group('[email protected]'))
topic.set_iam_policy(policy) # API call
# [END sink_topic_permissions]
return topic
@snippet
def sink_pubsub(client, to_delete):
"""Sink log entries to pubsub."""
topic = _sink_pubsub_setup(client)
to_delete.append(topic)
SINK_NAME = 'robots-pubsub-%d' % (_millis(),)
FILTER = 'logName:apache-access AND textPayload:robot'
UPDATED_FILTER = 'textPayload:robot'
# [START sink_pubsub_create]
DESTINATION = 'pubsub.googleapis.com/%s' % (topic.full_name,)
sink = client.sink(SINK_NAME, filter_=FILTER, destination=DESTINATION)
assert not sink.exists() # API call
sink.create() # API call
assert sink.exists() # API call
# [END sink_pubsub_create]
to_delete.insert(0, sink) # delete sink before topic
# [START client_list_sinks]
for sink in client.list_sinks(): # API call(s)
do_something_with(sink)
# [END client_list_sinks]
# [START sink_reload]
existing_sink = client.sink(SINK_NAME)
existing_sink.reload()
# [END sink_reload]
assert existing_sink.filter_ == FILTER
assert existing_sink.destination == DESTINATION
# [START sink_update]
existing_sink.filter_ = UPDATED_FILTER
existing_sink.update()
# [END sink_update]
existing_sink.reload()
assert existing_sink.filter_ == UPDATED_FILTER
# [START sink_delete]
sink.delete()
# [END sink_delete]
to_delete.pop(0)
@snippet
def logging_handler(client):
# [START create_default_handler]
import logging
handler = client.get_default_handler()
cloud_logger = logging.getLogger('cloudLogger')
cloud_logger.setLevel(logging.INFO)
cloud_logger.addHandler(handler)
cloud_logger.error('bad news')
# [END create_default_handler]
# [START create_cloud_handler]
from google.cloud.logging.handlers import CloudLoggingHandler
handler = CloudLoggingHandler(client)
cloud_logger = logging.getLogger('cloudLogger')
cloud_logger.setLevel(logging.INFO)
cloud_logger.addHandler(handler)
cloud_logger.error('bad news')
# [END create_cloud_handler]
# [START create_named_handler]
handler = CloudLoggingHandler(client, name='mycustomlog')
# [END create_named_handler]
@snippet
def setup_logging(client):
import logging
# [START setup_logging]
client.setup_logging(log_level=logging.INFO)
# [END setup_logging]
# [START setup_logging_excludes]
client.setup_logging(log_level=logging.INFO,
excluded_loggers=('werkzeug',))
# [END setup_logging_excludes]
def _line_no(func):
return func.__code__.co_firstlineno
def _find_examples():
funcs = [obj for obj in globals().values()
if getattr(obj, '_snippet', False)]
for func in sorted(funcs, key=_line_no):
yield func
def _name_and_doc(func):
return func.__name__, func.__doc__
def _backoff_not_found(deleter):
from google.cloud.exceptions import NotFound
timeouts = [1, 2, 4, 8, 16]
while timeouts:
try:
deleter()
except NotFound:
time.sleep(timeouts.pop(0))
else:
break
def main():
client = Client()
for example in _find_examples():
to_delete = []
print('%-25s: %s' % _name_and_doc(example))
try:
example(client, to_delete)
except AssertionError as failure:
print(' FAIL: %s' % (failure,))
except Exception as error: # pylint: disable=broad-except
print(' ERROR: %r' % (error,))
for item in to_delete:
_backoff_not_found(item.delete)
if __name__ == '__main__':
main()
| apache-2.0 | 370,908,914,346,466,500 | 29.122549 | 79 | 0.663548 | false |
spunkmars/ProFTPD-Admin | src/proftpd/ftpadmin/models/ftpacl.py | 1 | 4142 | #coding=utf-8
from django.db import models
from proftpd.ftpadmin.lib.common import set_hexdigest, fix_path, check_safe_range
from proftpd.ftpadmin.settings import DISABLED_CHOICES, SHELL_CHOICES, FILE_PATH, FTP_GROUP_DEFAULT_GID, FTP_USER_SAFE_HOMEDIR, FTP_ACL_CHOICES
from proftpd.ftpadmin import signals
from proftpd.ftpadmin.models.ftpgroups import Ftpgroup
from proftpd.ftpadmin.models.ftpusers import Ftpuser
# ACL Commands
# READ RETR
# WRITE APPE, STOR, STOU
# DELETE DELE, RMD, XRMD
# CREATE MKD, XMKD, LINK, SYMLINK
# MODIFY MFF, MFMT, SITE CHGRP, SITE CHMOD, SETSTAT, FSETSTAT
# MOVE RNFR, RNTO, SITE CPTO, RENAME
# VIEW LIST, MDTM, MLSD, MLST, NLST, SIZE, STAT, LSTAT, OPENDIR, READLINK
# NAVIGATE CDUP, XCDUP, CWD, XCWD, PWD, XPWD, REALPATH
# ACL columns value:
#
# true/false
# on/off
# allow/deny
# allowed/denied
# yes/no
#FTP_ACL_CHOICES = (
# ('true', 'true'),
# ('false', 'false'),
# ('on', 'on'),
# ('off', 'off'),
# ('allow', 'allow'),
# ('deny', 'deny'),
# ('allowed', 'allowed'),
# ('denied', 'denied'),
# ('yes', 'yes'),
# ('no', 'no'),
#)
#FTP_ACL_CHOICES = (
# ('allow', 'allow'),
# ('deny', 'deny'),
#)
class Ftpacl(models.Model):
username_id = models.IntegerField(null=True, blank=True, unique=False)
username = models.CharField(max_length=255, blank=True, null=True, editable=False)
groupname_id = models.IntegerField(null=True, blank=True, unique=False)
groupname = models.CharField(max_length=255, blank=True, null=True, editable=False)
path = models.CharField(max_length=255, default=FILE_PATH.get('proftpd_default_data_path', ''))
read_acl = models.CharField(max_length=255, choices=FTP_ACL_CHOICES, blank=True, null=True, default='allow')
write_acl = models.CharField(max_length=255, choices=FTP_ACL_CHOICES, blank=True, null=True, default='deny')
delete_acl = models.CharField(max_length=255, choices=FTP_ACL_CHOICES, blank=True, null=True, default='deny')
create_acl = models.CharField(max_length=255, choices=FTP_ACL_CHOICES, blank=True, null=True, default='deny')
modify_acl = models.CharField(max_length=255, choices=FTP_ACL_CHOICES, blank=True, null=True, default='deny')
move_acl = models.CharField(max_length=255, choices=FTP_ACL_CHOICES, blank=True, null=True, default='deny')
view_acl = models.CharField(max_length=255, choices=FTP_ACL_CHOICES, blank=True, null=True, default='allow')
navigate_acl = models.CharField(max_length=255, choices=FTP_ACL_CHOICES, blank=True, null=True, default='allow')
class Meta:
db_table = 'ftpacl'
ordering = ['-path']
app_label = 'ftpadmin'
def __unicode__(self):
return self.path
#为了在模板标签中可以使用items方法
def items(self):
return [(field, field.value_to_string(self)) for field in Ftpacl._meta.fields]
def sync_username_groupname(self):
if self.username_id:
self.username = Ftpuser.objects.get( pk=int(self.username_id) ).username
else:
self.username = ''
if self.groupname_id:
self.groupname = Ftpgroup.objects.get( pk=int(self.groupname_id) ).groupname
else:
self.groupname = ''
def save(self, *args, **kwargs):
if self.username_id.isdigit():
self.username_id = int( self.username_id )
else:
self.username_id = None
if self.groupname_id.isdigit():
self.groupname_id = int( self.groupname_id )
else:
self.groupname_id = None
self.sync_username_groupname()
if check_safe_range(safe_range=FTP_USER_SAFE_HOMEDIR, c_type="startswith", v_value=self.path) != True:
raise ValueError("You must type a valid path!")
return None
self.path = fix_path(self.path)
super(Ftpacl, self).save(*args, **kwargs) | bsd-3-clause | 4,780,994,311,556,186,000 | 36.752294 | 144 | 0.614244 | false |
Splo0sh/3DCT | test/test_TDCT_correlation.py | 2 | 6800 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# @Title : test_TDCT_correlation
# @Project : 3DCTv2
# @Description : pytest test
# @Author : Jan Arnold
# @Email : jan.arnold (at) coraxx.net
# @Copyright : Copyright (C) 2016 Jan Arnold
# @License : GPLv3 (see LICENSE file)
# @Credits :
# @Maintainer : Jan Arnold
# @Date : 2016/04
# @Version : 3DCT 2.3.0 module rev. 3
# @Status : stable
# @Usage : pytest
# @Notes :
# @Python_version : 2.7.11
"""
# ======================================================================================================================#
import pytest
import os
import tifffile as tf
try:
import TDCT_correlation
TDCT_error = ""
TDCT_correlation.debug = False
except Exception as e:
TDCT_error = e
def test_imgShapeRGB(image_RGB):
img = tf.imread(str(image_RGB))
assert img.shape == (941, 1024, 3)
def test_imgShapeGrey(image_Grey):
img = tf.imread(str(image_Grey))
assert img.shape == (941, 1024)
@pytest.fixture(scope='module')
def tdct_CorrelationInstance_setup(request, image_RGB):
def tdct_CorrelationInstance_teardown():
print('\ndone using TDCT_correlation instance')
request.addfinalizer(tdct_CorrelationInstance_teardown)
print('\nsetting up TDCT_correlation instance')
left = str(image_RGB)
right = str(image_RGB)
main = TDCT_correlation.Main(leftImage=left,rightImage=right)
return main
def test_TDCT_correlationImport():
if 'TDCT_correlation' not in globals():
pytest.fail("TDCT_correlation import: {0}".format(TDCT_error))
# @pytest.mark.skipif(TDCT_error != "", reason="TDCT_correlation import failed: {0}".format(TDCT_error))
# def test_TDCT_correlationInit():
# window = TDCT_correlation.MainWidget()
# assert window
@pytest.mark.skipif(TDCT_error != "", reason="TDCT_correlation import failed: {0}".format(TDCT_error))
def test_guiFile(maindir):
qtCreatorFile_main = os.path.join(maindir, "TDCT_correlation.ui")
assert os.path.isfile(qtCreatorFile_main) is True
Ui_MainWindow, QtBaseClass = TDCT_correlation.uic.loadUiType(qtCreatorFile_main)
assert Ui_MainWindow
assert QtBaseClass
@pytest.mark.skipif(TDCT_error != "", reason="TDCT_correlation import failed: {0}".format(TDCT_error))
def test_model2np(tdct_CorrelationInstance_setup):
# print dir(tdct_CorrelationInstance_setup.window)
compArray = TDCT_correlation.np.array([
[0., 0., 0.],
[50., 25., 5.],
[100., 50., 10.],
[150., 75., 15.],
[200., 100., 20.]])
for i in range(5):
items = [
TDCT_correlation.QtGui.QStandardItem(str(50*i)),
TDCT_correlation.QtGui.QStandardItem(str(25*i)),
TDCT_correlation.QtGui.QStandardItem(str(5*i))]
tdct_CorrelationInstance_setup.window.modelRight.appendRow(items)
retArray = tdct_CorrelationInstance_setup.window.model2np(tdct_CorrelationInstance_setup.window.modelRight,[0,5])
# assert TDCT_correlation.np.array_equal(retArray, compArray)
assert TDCT_correlation.np.testing.assert_array_equal(retArray, compArray) is None
@pytest.mark.skipif(TDCT_error != "", reason="TDCT_correlation import failed: {0}".format(TDCT_error))
def test_anglectrl(tdct_CorrelationInstance_setup):
testArray = {-1:359,0:0,1:1,359:359,360:0,361:1}
for k,v in testArray.iteritems():
print "Testing angle {0:03}, expecting {1:03} ... ".format(k, v),
angle = tdct_CorrelationInstance_setup.window.anglectrl(angle=k)
assert angle == v
print "OK"
@pytest.mark.skipif(TDCT_error != "", reason="TDCT_correlation import failed: {0}".format(TDCT_error))
def test_pxSize(tdct_CorrelationInstance_setup, image_RGB, image_Grey):
pixelSize = tdct_CorrelationInstance_setup.window.pxSize(str(image_RGB),z=False)
assert pixelSize == 123.
pixelSize = tdct_CorrelationInstance_setup.window.pxSize(str(image_Grey),z=False)
assert pixelSize == 4.56e-006*1e006
pixelSize = tdct_CorrelationInstance_setup.window.pxSize(str(image_RGB),z=True)
assert pixelSize == 456.
pixelSize = tdct_CorrelationInstance_setup.window.pxSize(str(image_Grey),z=True)
assert pixelSize == 123.
@pytest.mark.skipif(TDCT_error != "", reason="TDCT_correlation import failed: {0}".format(TDCT_error))
def test_norm_img(tdct_CorrelationInstance_setup):
compArray = TDCT_correlation.np.array([[127, 127, 127],[254, 254, 254]], dtype='uint8')
retArray = tdct_CorrelationInstance_setup.window.norm_img(TDCT_correlation.np.array([[1,1,1],[2,2,2]],dtype='uint8'))
assert TDCT_correlation.np.testing.assert_array_equal(retArray, compArray) is None
@pytest.mark.skipif(TDCT_error != "", reason="TDCT_correlation import failed: {0}".format(TDCT_error))
def test_blendImages(tdct_CorrelationInstance_setup):
## Blending images
img1 = TDCT_correlation.np.array([[1,1],[2,2]],dtype='uint8')
img2 = TDCT_correlation.np.array([[3,4],[4,4]],dtype='uint8')
## Blending using "screen"
compArray = TDCT_correlation.np.array([[3,4],[5,5]], dtype='uint8')
retArray = tdct_CorrelationInstance_setup.window.blendImages([img1,img2], blendmode='screen')
assert TDCT_correlation.np.testing.assert_array_equal(retArray, compArray) is None
## Blending using "minimum"
compArray = TDCT_correlation.np.array([[1,1],[2,2]], dtype='uint8')
retArray = tdct_CorrelationInstance_setup.window.blendImages([img1,img2], blendmode='minimum')
assert TDCT_correlation.np.testing.assert_array_equal(retArray, compArray) is None
## Passing no images should return a "white image" i.e. array with all pixels = 255
compArray = TDCT_correlation.np.zeros([10,10], dtype='uint8')-1
retArray = tdct_CorrelationInstance_setup.window.blendImages([], blendmode='screen')
assert TDCT_correlation.np.testing.assert_array_equal(retArray, compArray) is None
retArray = tdct_CorrelationInstance_setup.window.blendImages([], blendmode='minimum')
assert TDCT_correlation.np.testing.assert_array_equal(retArray, compArray) is None
# @pytest.fixture(scope='module')
# def resource_a_setup(request):
# print('\nresources_a_setup()')
# def resource_a_teardown():
# print('\nresources_a_teardown()')
# request.addfinalizer(resource_a_teardown)
# def test_1_that_needs_resource_a(resource_a_setup):
# print('test_1_that_needs_resource_a()')
# def test_2_that_does_not():
# print('\ntest_2_that_does_not()')
# def test_3_that_does(resource_a_setup):
# print('\ntest_3_that_does()')
##########################################
# def resource_a_setup():
# print('resources_a_setup()')
# def resource_a_teardown():
# print('resources_a_teardown()')
# class TestClass:
# @classmethod
# def setup_class(cls):
# print ('\nsetup_class()')
# resource_a_setup()
# @classmethod
# def teardown_class(cls):
# print ('\nteardown_class()')
# resource_a_teardown()
# def test_1_that_needs_resource_a(self):
# print('\ntest_1_that_needs_resource_a()')
# def test_2_that_does_not():
# print('\ntest_2_that_does_not()')
| gpl-3.0 | -2,414,214,076,446,822,400 | 34.789474 | 121 | 0.701765 | false |
samvarankashyap/googlecloudutility2 | lib/simplejson/simplejson/compat.py | 155 | 1036 | """Python 3 compatibility shims
"""
import sys
if sys.version_info[0] < 3:
PY3 = False
def b(s):
return s
def u(s):
return unicode(s, 'unicode_escape')
import cStringIO as StringIO
StringIO = BytesIO = StringIO.StringIO
text_type = unicode
binary_type = str
string_types = (basestring,)
integer_types = (int, long)
unichr = unichr
reload_module = reload
def fromhex(s):
return s.decode('hex')
else:
PY3 = True
if sys.version_info[:2] >= (3, 4):
from importlib import reload as reload_module
else:
from imp import reload as reload_module
import codecs
def b(s):
return codecs.latin_1_encode(s)[0]
def u(s):
return s
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
text_type = str
binary_type = bytes
string_types = (str,)
integer_types = (int,)
def unichr(s):
return u(chr(s))
def fromhex(s):
return bytes.fromhex(s)
long_type = integer_types[-1]
| apache-2.0 | -4,714,297,911,925,072,000 | 21.521739 | 53 | 0.593629 | false |
tprrt/linux-stable | tools/perf/python/tracepoint.py | 291 | 1504 | #! /usr/bin/env python
# SPDX-License-Identifier: GPL-2.0
# -*- python -*-
# -*- coding: utf-8 -*-
import perf
class tracepoint(perf.evsel):
def __init__(self, sys, name):
config = perf.tracepoint(sys, name)
perf.evsel.__init__(self,
type = perf.TYPE_TRACEPOINT,
config = config,
freq = 0, sample_period = 1, wakeup_events = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_RAW | perf.SAMPLE_TIME)
def main():
tp = tracepoint("sched", "sched_switch")
cpus = perf.cpu_map()
threads = perf.thread_map(-1)
evlist = perf.evlist(cpus, threads)
evlist.add(tp)
evlist.open()
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
if not isinstance(event, perf.sample_event):
continue
print "time %u prev_comm=%s prev_pid=%d prev_prio=%d prev_state=0x%x ==> next_comm=%s next_pid=%d next_prio=%d" % (
event.sample_time,
event.prev_comm,
event.prev_pid,
event.prev_prio,
event.prev_state,
event.next_comm,
event.next_pid,
event.next_prio)
if __name__ == '__main__':
main()
| gpl-2.0 | 6,872,729,223,369,229,000 | 30.333333 | 134 | 0.492686 | false |
liujinliu/squirrel | src/squirrel/controllers/worker.py | 1 | 1946 | # -*- coding: utf-8 -*-
import logging
from datetime import datetime
from tornado import gen
from tornado.queues import Queue
from concurrent.futures import ThreadPoolExecutor
from db.cache import Cache
from db.persis import Persis
from squirrel.utils import USER_CACHE_MAX, DAY_FMT
q = Queue(maxsize=1000)
LOG = logging.getLogger(__name__)
thread_pool = ThreadPoolExecutor(4)
sync_records_thread = ThreadPoolExecutor(1)
def do_sync_records(user_id, timestamp):
LOG.info('sync %s data to persis storage begin, timestamp:%d'
% (user_id, timestamp))
LOG.info('got all records of %s, timestamp:%d'
% (user_id, timestamp))
cache_records = Cache.select(user_id, timestamp,
USER_CACHE_MAX*2)
records = []
list(map(lambda x: records.extend(x.get('doc', [])),
cache_records))
utc_date = datetime.utcfromtimestamp(timestamp)
dt = datetime.strftime(utc_date, DAY_FMT)
LOG.info('persis all records of %s, dt:%s'
% (user_id, dt))
Persis.insert(user_id, dt, records)
LOG.info('update record in rds user_id:%s, timestamp:%d'
% (user_id, timestamp))
LOG.info('sync %s data to persis storage finish, timestamp:%d'
% (user_id, timestamp))
@gen.coroutine
def sync_record_data():
while True:
val = yield q.get()
user_id = val.get('user_id', '')
timestamp = val.get('timestamp', 0)
try:
LOG.info('submit to sync user %s, timestamp:%d'
% (user_id, timestamp))
yield sync_records_thread.submit(do_sync_records, user_id,
timestamp)
finally:
q.task_done()
@gen.coroutine
def producer_sync_job(user_id, timestamp):
LOG.info('add %s to worker queue, timestamp:%d'
% (user_id, timestamp))
yield q.put(dict(user_id=user_id, timestamp=timestamp))
| apache-2.0 | -7,385,836,377,033,772,000 | 33.75 | 70 | 0.608941 | false |
jendap/tensorflow | tensorflow/python/kernel_tests/scatter_ops_test.py | 6 | 12310 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _NumpyAdd(ref, indices, updates):
# Since numpy advanced assignment does not support repeated indices,
# we run a simple loop to perform scatter_add.
for i, indx in np.ndenumerate(indices):
ref[indx] += updates[i]
def _NumpyAddScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] += update
def _NumpySub(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] -= updates[i]
def _NumpySubScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] -= update
def _NumpyMul(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] *= updates[i]
def _NumpyMulScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] *= update
def _NumpyDiv(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] /= updates[i]
def _NumpyDivScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] /= update
def _NumpyMin(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] = np.minimum(ref[indx], updates[i])
def _NumpyMinScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] = np.minimum(ref[indx], update)
def _NumpyMax(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] = np.maximum(ref[indx], updates[i])
def _NumpyMaxScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] = np.maximum(ref[indx], update)
def _NumpyUpdate(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] = updates[i]
def _NumpyUpdateScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] = update
_TF_OPS_TO_NUMPY = {
state_ops.scatter_update: _NumpyUpdate,
state_ops.scatter_add: _NumpyAdd,
state_ops.scatter_sub: _NumpySub,
state_ops.scatter_mul: _NumpyMul,
state_ops.scatter_div: _NumpyDiv,
state_ops.scatter_min: _NumpyMin,
state_ops.scatter_max: _NumpyMax,
}
_TF_OPS_TO_NUMPY_SCALAR = {
state_ops.scatter_update: _NumpyUpdateScalar,
state_ops.scatter_add: _NumpyAddScalar,
state_ops.scatter_sub: _NumpySubScalar,
state_ops.scatter_mul: _NumpyMulScalar,
state_ops.scatter_div: _NumpyDivScalar,
state_ops.scatter_min: _NumpyMinScalar,
state_ops.scatter_max: _NumpyMaxScalar,
}
class ScatterTest(test.TestCase):
def _VariableRankTest(self,
tf_scatter,
vtype,
itype,
repeat_indices=False,
updates_are_scalar=False):
np.random.seed(8)
with self.cached_session(use_gpu=True):
for indices_shape in (), (2,), (3, 7), (3, 4, 7):
for extra_shape in (), (5,), (5, 9):
# Generate random indices with no duplicates for easy numpy comparison
size = np.prod(indices_shape, dtype=itype)
first_dim = 3 * size
indices = np.arange(first_dim)
np.random.shuffle(indices)
indices = indices[:size]
if size > 1 and repeat_indices:
# Add some random repeats.
indices = indices[:size // 2]
for _ in range(size - size // 2):
# Randomly append some repeats.
indices = np.append(indices,
indices[np.random.randint(size // 2)])
np.random.shuffle(indices)
indices = indices.reshape(indices_shape)
if updates_are_scalar:
updates = _AsType(np.random.randn(), vtype)
else:
updates = _AsType(
np.random.randn(*(indices_shape + extra_shape)), vtype)
# Clips small values to avoid division by zero.
def clip_small_values(x):
threshold = 1e-4
sign = np.sign(x)
if isinstance(x, np.int32):
threshold = 1
sign = np.random.choice([-1, 1])
return threshold * sign if np.abs(x) < threshold else x
updates = np.vectorize(clip_small_values)(updates)
old = _AsType(np.random.randn(*((first_dim,) + extra_shape)), vtype)
# Scatter via numpy
new = old.copy()
if updates_are_scalar:
np_scatter = _TF_OPS_TO_NUMPY_SCALAR[tf_scatter]
else:
np_scatter = _TF_OPS_TO_NUMPY[tf_scatter]
np_scatter(new, indices, updates)
# Scatter via tensorflow
ref = variables.VariableV1(old)
ref.initializer.run()
tf_scatter(ref, indices, updates).eval()
self.assertAllClose(ref.eval(), new)
def _VariableRankTests(self,
tf_scatter,
repeat_indices=False,
updates_are_scalar=False):
vtypes = [np.float32, np.float64]
if tf_scatter != state_ops.scatter_div:
vtypes.append(np.int32)
if (tf_scatter == state_ops.scatter_min or
tf_scatter == state_ops.scatter_max):
vtypes.append(np.float16)
for vtype in vtypes:
for itype in (np.int32, np.int64):
self._VariableRankTest(tf_scatter, vtype, itype, repeat_indices,
updates_are_scalar)
@test_util.run_deprecated_v1
def testVariableRankUpdate(self):
self._VariableRankTests(state_ops.scatter_update, False)
@test_util.run_deprecated_v1
def testVariableRankAdd(self):
self._VariableRankTests(state_ops.scatter_add, False)
@test_util.run_deprecated_v1
def testVariableRankSub(self):
self._VariableRankTests(state_ops.scatter_sub, False)
@test_util.run_deprecated_v1
def testVariableRankMul(self):
self._VariableRankTests(state_ops.scatter_mul, False)
@test_util.run_deprecated_v1
def testVariableRankDiv(self):
self._VariableRankTests(state_ops.scatter_div, False)
@test_util.run_deprecated_v1
def testVariableRankMin(self):
self._VariableRankTests(state_ops.scatter_min, False)
@test_util.run_deprecated_v1
def testVariableRankMax(self):
self._VariableRankTests(state_ops.scatter_max, False)
@test_util.run_deprecated_v1
def testRepeatIndicesAdd(self):
self._VariableRankTests(state_ops.scatter_add, True)
@test_util.run_deprecated_v1
def testRepeatIndicesSub(self):
self._VariableRankTests(state_ops.scatter_sub, True)
@test_util.run_deprecated_v1
def testRepeatIndicesMul(self):
self._VariableRankTests(state_ops.scatter_mul, True)
@test_util.run_deprecated_v1
def testRepeatIndicesDiv(self):
self._VariableRankTests(state_ops.scatter_div, True)
@test_util.run_deprecated_v1
def testRepeatIndicesMin(self):
self._VariableRankTests(state_ops.scatter_min, True)
@test_util.run_deprecated_v1
def testRepeatIndicesMax(self):
self._VariableRankTests(state_ops.scatter_max, True)
@test_util.run_deprecated_v1
def testVariableRankUpdateScalar(self):
self._VariableRankTests(state_ops.scatter_update, False, True)
@test_util.run_deprecated_v1
def testVariableRankAddScalar(self):
self._VariableRankTests(state_ops.scatter_add, False, True)
@test_util.run_deprecated_v1
def testVariableRankSubScalar(self):
self._VariableRankTests(state_ops.scatter_sub, False, True)
@test_util.run_deprecated_v1
def testVariableRankMulScalar(self):
self._VariableRankTests(state_ops.scatter_mul, False, True)
@test_util.run_deprecated_v1
def testVariableRankDivScalar(self):
self._VariableRankTests(state_ops.scatter_div, False, True)
@test_util.run_deprecated_v1
def testVariableRankMinScalar(self):
self._VariableRankTests(state_ops.scatter_min, False, True)
@test_util.run_deprecated_v1
def testVariableRankMaxScalar(self):
self._VariableRankTests(state_ops.scatter_max, False, True)
@test_util.run_deprecated_v1
def testRepeatIndicesAddScalar(self):
self._VariableRankTests(state_ops.scatter_add, True, True)
@test_util.run_deprecated_v1
def testRepeatIndicesSubScalar(self):
self._VariableRankTests(state_ops.scatter_sub, True, True)
@test_util.run_deprecated_v1
def testRepeatIndicesMulScalar(self):
self._VariableRankTests(state_ops.scatter_mul, True, True)
@test_util.run_deprecated_v1
def testRepeatIndicesDivScalar(self):
self._VariableRankTests(state_ops.scatter_div, True, True)
@test_util.run_deprecated_v1
def testRepeatIndicesMinScalar(self):
self._VariableRankTests(state_ops.scatter_min, True, True)
@test_util.run_deprecated_v1
def testRepeatIndicesMaxScalar(self):
self._VariableRankTests(state_ops.scatter_max, True, True)
@test_util.run_deprecated_v1
def testBooleanScatterUpdate(self):
if not test.is_gpu_available():
with self.session(use_gpu=False) as session:
var = variables.Variable([True, False])
update0 = state_ops.scatter_update(var, 1, True)
update1 = state_ops.scatter_update(
var, constant_op.constant(
0, dtype=dtypes.int64), False)
var.initializer.run()
session.run([update0, update1])
self.assertAllEqual([False, True], self.evaluate(var))
@test_util.run_deprecated_v1
def testScatterOutOfRangeCpu(self):
for op, _ in _TF_OPS_TO_NUMPY.items():
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
if not test.is_gpu_available():
with self.session(use_gpu=False):
ref = variables.VariableV1(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
op(ref, indices, updates).eval()
# Test some out of range errors.
indices = np.array([-1, 0, 5])
with self.assertRaisesOpError(
r'indices\[0\] = -1 is not in \[0, 6\)'):
op(ref, indices, updates).eval()
indices = np.array([2, 0, 6])
with self.assertRaisesOpError(r'indices\[2\] = 6 is not in \[0, 6\)'):
op(ref, indices, updates).eval()
# TODO(fpmc): Re-enable this test when gpu_pip test actually runs on a GPU.
def _disabledTestScatterOutOfRangeGpu(self):
if test.is_gpu_available():
return
for op, _ in _TF_OPS_TO_NUMPY.items():
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
# With GPU, the code ignores indices that are out of range.
# We don't test the implementation; just test there's no failures.
with test_util.force_gpu():
ref = variables.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
self.evaluate(op(ref, indices, updates))
# Indicies out of range should not fail.
indices = np.array([-1, 0, 5])
self.evaluate(op(ref, indices, updates))
indices = np.array([2, 0, 6])
self.evaluate(op(ref, indices, updates))
if __name__ == '__main__':
test.main()
| apache-2.0 | 5,174,124,197,377,692,000 | 32.091398 | 80 | 0.653859 | false |
ch3lo/zookeeper | src/contrib/rest/src/python/demo_master_election.py | 115 | 2920 | #! /usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import threading
import time
from zkrest import ZooKeeper
class Agent(threading.Thread):
""" A basic agent that wants to become a master and exit """
root = '/election'
def __init__(self, id):
super(Agent, self).__init__()
self.zk = ZooKeeper()
self.id = id
def run(self):
print 'Starting #%s' % self.id
with self.zk.session(expire=5):
# signal agent presence
r = self.zk.create("%s/agent-" % self.root,
sequence=True, ephemeral=True)
self.me = r['path']
while True:
children = sorted([el['path'] \
for el in self.zk.get_children(self.root)])
master, previous = children[0], None
try:
index = children.index(self.me)
if index != 0:
previous = children[index-1]
except ValueError:
break
if previous is None:
self.do_master_work()
# and don't forget to send heartbeat messages
break
else:
# do slave work in another thread
pass
# wait for the previous agent or current master to exit / finish
while self.zk.exists(previous) or self.zk.exists(master):
time.sleep(0.5)
self.zk.heartbeat()
# TODO signal the slave thread to exit and wait for it
# and rerun the election loop
def do_master_work(self):
print "#%s: I'm the master: %s" % (self.id, self.me)
def main():
zk = ZooKeeper()
# create the root node used for master election
if not zk.exists('/election'):
zk.create('/election')
print 'Starting 10 agents ...'
agents = [Agent(id) for id in range(0,15)]
map(Agent.start, agents)
map(Agent.join, agents)
zk.delete('/election')
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | -3,886,839,221,831,640,600 | 31.444444 | 80 | 0.573288 | false |
Erethon/synnefo | snf-branding/distribute_setup.py | 82 | 15757 | #!python
"""Bootstrap distribute installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from distribute_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import sys
import time
import fnmatch
import tempfile
import tarfile
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
try:
import subprocess
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
except ImportError:
# will be used for python 2.3
def _python_cmd(*args):
args = (sys.executable,) + args
# quoting arguments if windows
if sys.platform == 'win32':
def quote(arg):
if ' ' in arg:
return '"%s"' % arg
return arg
args = [quote(arg) for arg in args]
return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
DEFAULT_VERSION = "0.6.10"
DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
SETUPTOOLS_FAKED_VERSION = "0.6c11"
SETUPTOOLS_PKG_INFO = """\
Metadata-Version: 1.0
Name: setuptools
Version: %s
Summary: xxxx
Home-page: xxx
Author: xxx
Author-email: xxx
License: xxx
Description: xxx
""" % SETUPTOOLS_FAKED_VERSION
def _install(tarball):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Distribute')
if not _python_cmd('setup.py', 'install'):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
finally:
os.chdir(old_wd)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Distribute egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15, no_fake=True):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
try:
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
if not no_fake:
_fake_setuptools()
raise ImportError
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("distribute>="+version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of distribute (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U distribute'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
finally:
if not no_fake:
_create_fake_setuptools_pkg_info(to_dir)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download distribute from a specified location and return its filename
`version` should be a valid distribute version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tgz_name = "distribute-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
log.warn("Downloading %s", url)
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(saveto, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto)
def _no_sandbox(function):
def __no_sandbox(*args, **kw):
try:
from setuptools.sandbox import DirectorySandbox
if not hasattr(DirectorySandbox, '_old'):
def violation(*args):
pass
DirectorySandbox._old = DirectorySandbox._violation
DirectorySandbox._violation = violation
patched = True
else:
patched = False
except ImportError:
patched = False
try:
return function(*args, **kw)
finally:
if patched:
DirectorySandbox._violation = DirectorySandbox._old
del DirectorySandbox._old
return __no_sandbox
def _patch_file(path, content):
"""Will backup the file then patch it"""
existing_content = open(path).read()
if existing_content == content:
# already patched
log.warn('Already patched.')
return False
log.warn('Patching...')
_rename_path(path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
return True
_patch_file = _no_sandbox(_patch_file)
def _same_content(path, content):
return open(path).read() == content
def _rename_path(path):
new_name = path + '.OLD.%s' % time.time()
log.warn('Renaming %s into %s', path, new_name)
os.rename(path, new_name)
return new_name
def _remove_flat_installation(placeholder):
if not os.path.isdir(placeholder):
log.warn('Unkown installation at %s', placeholder)
return False
found = False
for file in os.listdir(placeholder):
if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
found = True
break
if not found:
log.warn('Could not locate setuptools*.egg-info')
return
log.warn('Removing elements out of the way...')
pkg_info = os.path.join(placeholder, file)
if os.path.isdir(pkg_info):
patched = _patch_egg_dir(pkg_info)
else:
patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
if not patched:
log.warn('%s already patched.', pkg_info)
return False
# now let's move the files out of the way
for element in ('setuptools', 'pkg_resources.py', 'site.py'):
element = os.path.join(placeholder, element)
if os.path.exists(element):
_rename_path(element)
else:
log.warn('Could not find the %s element of the '
'Setuptools distribution', element)
return True
_remove_flat_installation = _no_sandbox(_remove_flat_installation)
def _after_install(dist):
log.warn('After install bootstrap.')
placeholder = dist.get_command_obj('install').install_purelib
_create_fake_setuptools_pkg_info(placeholder)
def _create_fake_setuptools_pkg_info(placeholder):
if not placeholder or not os.path.exists(placeholder):
log.warn('Could not find the install location')
return
pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
setuptools_file = 'setuptools-%s-py%s.egg-info' % \
(SETUPTOOLS_FAKED_VERSION, pyver)
pkg_info = os.path.join(placeholder, setuptools_file)
if os.path.exists(pkg_info):
log.warn('%s already exists', pkg_info)
return
log.warn('Creating %s', pkg_info)
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
pth_file = os.path.join(placeholder, 'setuptools.pth')
log.warn('Creating %s', pth_file)
f = open(pth_file, 'w')
try:
f.write(os.path.join(os.curdir, setuptools_file))
finally:
f.close()
_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
def _patch_egg_dir(path):
# let's check if it's already patched
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
if os.path.exists(pkg_info):
if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
log.warn('%s already patched.', pkg_info)
return False
_rename_path(path)
os.mkdir(path)
os.mkdir(os.path.join(path, 'EGG-INFO'))
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
return True
_patch_egg_dir = _no_sandbox(_patch_egg_dir)
def _before_install():
log.warn('Before install bootstrap.')
_fake_setuptools()
def _under_prefix(location):
if 'install' not in sys.argv:
return True
args = sys.argv[sys.argv.index('install')+1:]
for index, arg in enumerate(args):
for option in ('--root', '--prefix'):
if arg.startswith('%s=' % option):
top_dir = arg.split('root=')[-1]
return location.startswith(top_dir)
elif arg == option:
if len(args) > index:
top_dir = args[index+1]
return location.startswith(top_dir)
if arg == '--user' and USER_SITE is not None:
return location.startswith(USER_SITE)
return True
def _fake_setuptools():
log.warn('Scanning installed packages')
try:
import pkg_resources
except ImportError:
# we're cool
log.warn('Setuptools or Distribute does not seem to be installed.')
return
ws = pkg_resources.working_set
try:
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
replacement=False))
except TypeError:
# old distribute API
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
if setuptools_dist is None:
log.warn('No setuptools distribution found')
return
# detecting if it was already faked
setuptools_location = setuptools_dist.location
log.warn('Setuptools installation detected at %s', setuptools_location)
# if --root or --preix was provided, and if
# setuptools is not located in them, we don't patch it
if not _under_prefix(setuptools_location):
log.warn('Not patching, --root or --prefix is installing Distribute'
' in another location')
return
# let's see if its an egg
if not setuptools_location.endswith('.egg'):
log.warn('Non-egg installation')
res = _remove_flat_installation(setuptools_location)
if not res:
return
else:
log.warn('Egg installation')
pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
if (os.path.exists(pkg_info) and
_same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
log.warn('Already patched.')
return
log.warn('Patching...')
# let's create a fake egg replacing setuptools one
res = _patch_egg_dir(setuptools_location)
if not res:
return
log.warn('Patched done.')
_relaunch()
def _relaunch():
log.warn('Relaunching...')
# we have to relaunch the process
# pip marker to avoid a relaunch bug
if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']:
sys.argv[0] = 'setup.py'
args = [sys.executable] + sys.argv
sys.exit(subprocess.call(args))
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
tarball = download_setuptools()
_install(tarball)
if __name__ == '__main__':
main(sys.argv[1:])
| gpl-3.0 | -3,165,972,533,108,857,300 | 31.48866 | 80 | 0.599607 | false |
Edraak/edraak-platform | lms/djangoapps/mobile_api/users/tests.py | 9 | 20141 | """
Tests for users API
"""
# pylint: disable=no-member
import datetime
import ddt
import pytz
from django.conf import settings
from django.template import defaultfilters
from django.test import RequestFactory, override_settings
from django.utils import timezone
from milestones.tests.utils import MilestonesTestCaseMixin
from mock import patch
from nose.plugins.attrib import attr
from lms.djangoapps.certificates.api import generate_user_certificates
from lms.djangoapps.certificates.models import CertificateStatuses
from lms.djangoapps.certificates.tests.factories import GeneratedCertificateFactory
from course_modes.models import CourseMode
from courseware.access_response import MilestoneAccessError, StartDateError, VisibilityError
from lms.djangoapps.grades.tests.utils import mock_passing_grade
from mobile_api.testutils import (
MobileAPITestCase,
MobileAuthTestMixin,
MobileAuthUserTestMixin,
MobileCourseAccessTestMixin
)
from openedx.core.lib.courses import course_image_url
from student.models import CourseEnrollment
from util.milestones_helpers import set_prerequisite_courses
from util.testing import UrlResetMixin
from xmodule.course_module import DEFAULT_START_DATE
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from .. import errors
from .serializers import CourseEnrollmentSerializer
@attr(shard=9)
class TestUserDetailApi(MobileAPITestCase, MobileAuthUserTestMixin):
"""
Tests for /api/mobile/v0.5/users/<user_name>...
"""
REVERSE_INFO = {'name': 'user-detail', 'params': ['username']}
def test_success(self):
self.login()
response = self.api_response()
self.assertEqual(response.data['username'], self.user.username)
self.assertEqual(response.data['email'], self.user.email)
@attr(shard=9)
class TestUserInfoApi(MobileAPITestCase, MobileAuthTestMixin):
"""
Tests for /api/mobile/v0.5/my_user_info
"""
def reverse_url(self, reverse_args=None, **kwargs):
return '/api/mobile/v0.5/my_user_info'
def test_success(self):
"""Verify the endpoint redirects to the user detail endpoint"""
self.login()
response = self.api_response(expected_response_code=302)
self.assertIn(self.username, response['location'])
@attr(shard=9)
@ddt.ddt
@override_settings(MKTG_URLS={'ROOT': 'dummy-root'})
class TestUserEnrollmentApi(UrlResetMixin, MobileAPITestCase, MobileAuthUserTestMixin,
MobileCourseAccessTestMixin, MilestonesTestCaseMixin):
"""
Tests for /api/mobile/v0.5/users/<user_name>/course_enrollments/
"""
REVERSE_INFO = {'name': 'courseenrollment-detail', 'params': ['username']}
ALLOW_ACCESS_TO_UNRELEASED_COURSE = True
ALLOW_ACCESS_TO_MILESTONE_COURSE = True
ALLOW_ACCESS_TO_NON_VISIBLE_COURSE = True
NEXT_WEEK = datetime.datetime.now(pytz.UTC) + datetime.timedelta(days=7)
LAST_WEEK = datetime.datetime.now(pytz.UTC) - datetime.timedelta(days=7)
ADVERTISED_START = "Spring 2016"
ENABLED_SIGNALS = ['course_published']
DATES = {
'next_week': NEXT_WEEK,
'last_week': LAST_WEEK,
'default_start_date': DEFAULT_START_DATE,
}
@patch.dict(settings.FEATURES, {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self, *args, **kwargs):
super(TestUserEnrollmentApi, self).setUp()
def verify_success(self, response):
"""
Verifies user course enrollment response for success
"""
super(TestUserEnrollmentApi, self).verify_success(response)
courses = response.data
self.assertEqual(len(courses), 1)
found_course = courses[0]['course']
self.assertIn('courses/{}/about'.format(self.course.id), found_course['course_about'])
self.assertIn('course_info/{}/updates'.format(self.course.id), found_course['course_updates'])
self.assertIn('course_info/{}/handouts'.format(self.course.id), found_course['course_handouts'])
self.assertIn('video_outlines/courses/{}'.format(self.course.id), found_course['video_outline'])
self.assertEqual(found_course['id'], unicode(self.course.id))
self.assertEqual(courses[0]['mode'], CourseMode.DEFAULT_MODE_SLUG)
self.assertEqual(courses[0]['course']['subscription_id'], self.course.clean_id(padding_char='_'))
expected_course_image_url = course_image_url(self.course)
self.assertIsNotNone(expected_course_image_url)
self.assertIn(expected_course_image_url, found_course['course_image'])
self.assertIn(expected_course_image_url, found_course['media']['course_image']['uri'])
def verify_failure(self, response, error_type=None):
self.assertEqual(response.status_code, 200)
courses = response.data
self.assertEqual(len(courses), 0)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_sort_order(self):
self.login()
num_courses = 3
courses = []
for course_index in range(num_courses):
courses.append(CourseFactory.create(mobile_available=True))
self.enroll(courses[course_index].id)
# verify courses are returned in the order of enrollment, with most recently enrolled first.
response = self.api_response()
for course_index in range(num_courses):
self.assertEqual(
response.data[course_index]['course']['id'],
unicode(courses[num_courses - course_index - 1].id)
)
@patch.dict(settings.FEATURES, {
'ENABLE_PREREQUISITE_COURSES': True,
'DISABLE_START_DATES': False,
'ENABLE_MKTG_SITE': True,
})
def test_courseware_access(self):
self.login()
course_with_prereq = CourseFactory.create(start=self.LAST_WEEK, mobile_available=True)
prerequisite_course = CourseFactory.create()
set_prerequisite_courses(course_with_prereq.id, [unicode(prerequisite_course.id)])
# Create list of courses with various expected courseware_access responses and corresponding expected codes
courses = [
course_with_prereq,
CourseFactory.create(start=self.NEXT_WEEK, mobile_available=True),
CourseFactory.create(visible_to_staff_only=True, mobile_available=True),
CourseFactory.create(start=self.LAST_WEEK, mobile_available=True, visible_to_staff_only=False),
]
expected_error_codes = [
MilestoneAccessError().error_code, # 'unfulfilled_milestones'
StartDateError(self.NEXT_WEEK).error_code, # 'course_not_started'
VisibilityError().error_code, # 'not_visible_to_user'
None,
]
# Enroll in all the courses
for course in courses:
self.enroll(course.id)
# Verify courses have the correct response through error code. Last enrolled course is first course in response
response = self.api_response()
for course_index in range(len(courses)):
result = response.data[course_index]['course']['courseware_access']
self.assertEqual(result['error_code'], expected_error_codes[::-1][course_index])
if result['error_code'] is not None:
self.assertFalse(result['has_access'])
@ddt.data(
('next_week', ADVERTISED_START, ADVERTISED_START, "string"),
('next_week', None, defaultfilters.date(NEXT_WEEK, "DATE_FORMAT"), "timestamp"),
('next_week', '', defaultfilters.date(NEXT_WEEK, "DATE_FORMAT"), "timestamp"),
('default_start_date', ADVERTISED_START, ADVERTISED_START, "string"),
('default_start_date', '', None, "empty"),
('default_start_date', None, None, "empty"),
)
@ddt.unpack
@patch.dict(settings.FEATURES, {'DISABLE_START_DATES': False, 'ENABLE_MKTG_SITE': True})
def test_start_type_and_display(self, start, advertised_start, expected_display, expected_type):
"""
Tests that the correct start_type and start_display are returned in the
case the course has not started
"""
self.login()
course = CourseFactory.create(start=self.DATES[start], advertised_start=advertised_start, mobile_available=True)
self.enroll(course.id)
response = self.api_response()
self.assertEqual(response.data[0]['course']['start_type'], expected_type)
self.assertEqual(response.data[0]['course']['start_display'], expected_display)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_no_certificate(self):
self.login_and_enroll()
response = self.api_response()
certificate_data = response.data[0]['certificate']
self.assertDictEqual(certificate_data, {})
def verify_pdf_certificate(self):
"""
Verifies the correct URL is returned in the response
for PDF certificates.
"""
self.login_and_enroll()
certificate_url = "http://test_certificate_url"
GeneratedCertificateFactory.create(
user=self.user,
course_id=self.course.id,
status=CertificateStatuses.downloadable,
mode='verified',
download_url=certificate_url,
)
response = self.api_response()
certificate_data = response.data[0]['certificate']
self.assertEquals(certificate_data['url'], certificate_url)
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': False, 'ENABLE_MKTG_SITE': True})
def test_pdf_certificate_with_html_cert_disabled(self):
"""
Tests PDF certificates with CERTIFICATES_HTML_VIEW set to False.
"""
self.verify_pdf_certificate()
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': True, 'ENABLE_MKTG_SITE': True})
def test_pdf_certificate_with_html_cert_enabled(self):
"""
Tests PDF certificates with CERTIFICATES_HTML_VIEW set to True.
"""
self.verify_pdf_certificate()
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': True, 'ENABLE_MKTG_SITE': True})
def test_web_certificate(self):
CourseMode.objects.create(
course_id=self.course.id,
mode_display_name="Honor",
mode_slug=CourseMode.HONOR,
)
self.login_and_enroll()
certificates = [
{
'id': 1,
'name': 'Test Certificate Name',
'description': 'Test Certificate Description',
'course_title': 'tes_course_title',
'signatories': [],
'version': 1,
'is_active': True
}
]
self.course.certificates = {'certificates': certificates}
self.course.cert_html_view_enabled = True
self.store.update_item(self.course, self.user.id)
with mock_passing_grade():
generate_user_certificates(self.user, self.course.id)
response = self.api_response()
certificate_data = response.data[0]['certificate']
self.assertRegexpMatches(
certificate_data['url'],
r'http.*/certificates/user/{user_id}/course/{course_id}'.format(
user_id=self.user.id,
course_id=self.course.id,
)
)
@patch.dict(settings.FEATURES, {"ENABLE_DISCUSSION_SERVICE": True, 'ENABLE_MKTG_SITE': True})
def test_discussion_url(self):
self.login_and_enroll()
response = self.api_response()
response_discussion_url = response.data[0]['course']['discussion_url'] # pylint: disable=E1101
self.assertIn('/api/discussion/v1/courses/{}'.format(self.course.id), response_discussion_url)
def test_org_query(self):
self.login()
# Create list of courses with various organizations
courses = [
CourseFactory.create(org='edX', mobile_available=True),
CourseFactory.create(org='edX', mobile_available=True),
CourseFactory.create(org='edX', mobile_available=True, visible_to_staff_only=True),
CourseFactory.create(org='Proversity.org', mobile_available=True),
CourseFactory.create(org='MITx', mobile_available=True),
CourseFactory.create(org='HarvardX', mobile_available=True),
]
# Enroll in all the courses
for course in courses:
self.enroll(course.id)
response = self.api_response(data={'org': 'edX'})
# Test for 3 expected courses
self.assertEqual(len(response.data), 3)
# Verify only edX courses are returned
for entry in response.data:
self.assertEqual(entry['course']['org'], 'edX')
@attr(shard=9)
class CourseStatusAPITestCase(MobileAPITestCase):
"""
Base test class for /api/mobile/v0.5/users/<user_name>/course_status_info/{course_id}
"""
REVERSE_INFO = {'name': 'user-course-status', 'params': ['username', 'course_id']}
def setUp(self):
"""
Creates a basic course structure for our course
"""
super(CourseStatusAPITestCase, self).setUp()
self.section = ItemFactory.create(
parent=self.course,
category='chapter',
)
self.sub_section = ItemFactory.create(
parent=self.section,
category='sequential',
)
self.unit = ItemFactory.create(
parent=self.sub_section,
category='vertical',
)
self.other_sub_section = ItemFactory.create(
parent=self.section,
category='sequential',
)
self.other_unit = ItemFactory.create(
parent=self.other_sub_section,
category='vertical',
)
@attr(shard=9)
class TestCourseStatusGET(CourseStatusAPITestCase, MobileAuthUserTestMixin,
MobileCourseAccessTestMixin, MilestonesTestCaseMixin):
"""
Tests for GET of /api/mobile/v0.5/users/<user_name>/course_status_info/{course_id}
"""
def test_success(self):
self.login_and_enroll()
response = self.api_response()
self.assertEqual(
response.data["last_visited_module_id"],
unicode(self.sub_section.location)
)
self.assertEqual(
response.data["last_visited_module_path"],
[unicode(module.location) for module in [self.sub_section, self.section, self.course]]
)
@attr(shard=9)
class TestCourseStatusPATCH(CourseStatusAPITestCase, MobileAuthUserTestMixin,
MobileCourseAccessTestMixin, MilestonesTestCaseMixin):
"""
Tests for PATCH of /api/mobile/v0.5/users/<user_name>/course_status_info/{course_id}
"""
def url_method(self, url, **kwargs):
# override implementation to use PATCH method.
return self.client.patch(url, data=kwargs.get('data', None))
def test_success(self):
self.login_and_enroll()
response = self.api_response(data={"last_visited_module_id": unicode(self.other_unit.location)})
self.assertEqual(
response.data["last_visited_module_id"],
unicode(self.other_sub_section.location)
)
def test_invalid_module(self):
self.login_and_enroll()
response = self.api_response(data={"last_visited_module_id": "abc"}, expected_response_code=400)
self.assertEqual(
response.data,
errors.ERROR_INVALID_MODULE_ID
)
def test_nonexistent_module(self):
self.login_and_enroll()
non_existent_key = self.course.id.make_usage_key('video', 'non-existent')
response = self.api_response(data={"last_visited_module_id": non_existent_key}, expected_response_code=400)
self.assertEqual(
response.data,
errors.ERROR_INVALID_MODULE_ID
)
def test_no_timezone(self):
self.login_and_enroll()
past_date = datetime.datetime.now()
response = self.api_response(
data={
"last_visited_module_id": unicode(self.other_unit.location),
"modification_date": past_date.isoformat()
},
expected_response_code=400
)
self.assertEqual(
response.data,
errors.ERROR_INVALID_MODIFICATION_DATE
)
def _date_sync(self, date, initial_unit, update_unit, expected_subsection):
"""
Helper for test cases that use a modification to decide whether
to update the course status
"""
self.login_and_enroll()
# save something so we have an initial date
self.api_response(data={"last_visited_module_id": unicode(initial_unit.location)})
# now actually update it
response = self.api_response(
data={
"last_visited_module_id": unicode(update_unit.location),
"modification_date": date.isoformat()
}
)
self.assertEqual(
response.data["last_visited_module_id"],
unicode(expected_subsection.location)
)
def test_old_date(self):
self.login_and_enroll()
date = timezone.now() + datetime.timedelta(days=-100)
self._date_sync(date, self.unit, self.other_unit, self.sub_section)
def test_new_date(self):
self.login_and_enroll()
date = timezone.now() + datetime.timedelta(days=100)
self._date_sync(date, self.unit, self.other_unit, self.other_sub_section)
def test_no_initial_date(self):
self.login_and_enroll()
response = self.api_response(
data={
"last_visited_module_id": unicode(self.other_unit.location),
"modification_date": timezone.now().isoformat()
}
)
self.assertEqual(
response.data["last_visited_module_id"],
unicode(self.other_sub_section.location)
)
def test_invalid_date(self):
self.login_and_enroll()
response = self.api_response(data={"modification_date": "abc"}, expected_response_code=400)
self.assertEqual(
response.data,
errors.ERROR_INVALID_MODIFICATION_DATE
)
@attr(shard=9)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
@override_settings(MKTG_URLS={'ROOT': 'dummy-root'})
class TestCourseEnrollmentSerializer(MobileAPITestCase, MilestonesTestCaseMixin):
"""
Test the course enrollment serializer
"""
ENABLED_SIGNALS = ['course_published']
def setUp(self):
super(TestCourseEnrollmentSerializer, self).setUp()
self.login_and_enroll()
self.request = RequestFactory().get('/')
self.request.user = self.user
def test_success(self):
serialized = CourseEnrollmentSerializer(
CourseEnrollment.enrollments_for_user(self.user)[0],
context={'request': self.request},
).data
self.assertEqual(serialized['course']['name'], self.course.display_name)
self.assertEqual(serialized['course']['number'], self.course.id.course)
self.assertEqual(serialized['course']['org'], self.course.id.org)
# Assert utm parameters
expected_utm_parameters = {
'twitter': 'utm_campaign=social-sharing-db&utm_medium=social&utm_source=twitter',
'facebook': 'utm_campaign=social-sharing-db&utm_medium=social&utm_source=facebook'
}
self.assertEqual(serialized['course']['course_sharing_utm_parameters'], expected_utm_parameters)
def test_with_display_overrides(self):
self.course.display_coursenumber = "overridden_number"
self.course.display_organization = "overridden_org"
self.store.update_item(self.course, self.user.id)
serialized = CourseEnrollmentSerializer(
CourseEnrollment.enrollments_for_user(self.user)[0],
context={'request': self.request},
).data
self.assertEqual(serialized['course']['number'], self.course.display_coursenumber)
self.assertEqual(serialized['course']['org'], self.course.display_organization)
| agpl-3.0 | 2,916,031,628,582,995,500 | 37.882239 | 120 | 0.640187 | false |
ilyes14/scikit-learn | sklearn/utils/class_weight.py | 63 | 7227 | # Authors: Andreas Mueller
# Manoj Kumar
# License: BSD 3 clause
import warnings
import numpy as np
from ..externals import six
from ..utils.fixes import in1d
from .fixes import bincount
def compute_class_weight(class_weight, classes, y):
"""Estimate class weights for unbalanced datasets.
Parameters
----------
class_weight : dict, 'balanced' or None
If 'balanced', class weights will be given by
``n_samples / (n_classes * np.bincount(y))``.
If a dictionary is given, keys are classes and values
are corresponding class weights.
If None is given, the class weights will be uniform.
classes : ndarray
Array of the classes occurring in the data, as given by
``np.unique(y_org)`` with ``y_org`` the original class labels.
y : array-like, shape (n_samples,)
Array of original class labels per sample;
Returns
-------
class_weight_vect : ndarray, shape (n_classes,)
Array with class_weight_vect[i] the weight for i-th class
References
----------
The "balanced" heuristic is inspired by
Logistic Regression in Rare Events Data, King, Zen, 2001.
"""
# Import error caused by circular imports.
from ..preprocessing import LabelEncoder
if class_weight is None or len(class_weight) == 0:
# uniform class weights
weight = np.ones(classes.shape[0], dtype=np.float64, order='C')
elif class_weight in ['auto', 'balanced']:
# Find the weight of each class as present in y.
le = LabelEncoder()
y_ind = le.fit_transform(y)
if not all(np.in1d(classes, le.classes_)):
raise ValueError("classes should have valid labels that are in y")
# inversely proportional to the number of samples in the class
if class_weight == 'auto':
recip_freq = 1. / bincount(y_ind)
weight = recip_freq[le.transform(classes)] / np.mean(recip_freq)
warnings.warn("The class_weight='auto' heuristic is deprecated in"
" favor of a new heuristic class_weight='balanced'."
" 'auto' will be removed in 0.18", DeprecationWarning)
else:
recip_freq = len(y) / (len(le.classes_) *
bincount(y_ind).astype(np.float64))
weight = recip_freq[le.transform(classes)]
else:
# user-defined dictionary
weight = np.ones(classes.shape[0], dtype=np.float64, order='C')
if not isinstance(class_weight, dict):
raise ValueError("class_weight must be dict, 'auto', or None,"
" got: %r" % class_weight)
for c in class_weight:
i = np.searchsorted(classes, c)
if i >= len(classes) or classes[i] != c:
raise ValueError("Class label %d not present." % c)
else:
weight[i] = class_weight[c]
return weight
def compute_sample_weight(class_weight, y, indices=None):
"""Estimate sample weights by class for unbalanced datasets.
Parameters
----------
class_weight : dict, list of dicts, "balanced", or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data:
``n_samples / (n_classes * np.bincount(y))``.
For multi-output, the weights of each column of y will be multiplied.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Array of original class labels per sample.
indices : array-like, shape (n_subsample,), or None
Array of indices to be used in a subsample. Can be of length less than
n_samples in the case of a subsample, or equal to n_samples in the
case of a bootstrap subsample with repeated indices. If None, the
sample weight will be calculated over the full sample. Only "auto" is
supported for class_weight if this is provided.
Returns
-------
sample_weight_vect : ndarray, shape (n_samples,)
Array with sample weights as applied to the original y
"""
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
if isinstance(class_weight, six.string_types):
if class_weight not in ['balanced', 'auto']:
raise ValueError('The only valid preset for class_weight is '
'"balanced". Given "%s".' % class_weight)
elif (indices is not None and
not isinstance(class_weight, six.string_types)):
raise ValueError('The only valid class_weight for subsampling is '
'"balanced". Given "%s".' % class_weight)
elif n_outputs > 1:
if (not hasattr(class_weight, "__iter__") or
isinstance(class_weight, dict)):
raise ValueError("For multi-output, class_weight should be a "
"list of dicts, or a valid string.")
if len(class_weight) != n_outputs:
raise ValueError("For multi-output, number of elements in "
"class_weight should match number of outputs.")
expanded_class_weight = []
for k in range(n_outputs):
y_full = y[:, k]
classes_full = np.unique(y_full)
classes_missing = None
if class_weight in ['balanced', 'auto'] or n_outputs == 1:
class_weight_k = class_weight
else:
class_weight_k = class_weight[k]
if indices is not None:
# Get class weights for the subsample, covering all classes in
# case some labels that were present in the original data are
# missing from the sample.
y_subsample = y[indices, k]
classes_subsample = np.unique(y_subsample)
weight_k = np.choose(np.searchsorted(classes_subsample,
classes_full),
compute_class_weight(class_weight_k,
classes_subsample,
y_subsample),
mode='clip')
classes_missing = set(classes_full) - set(classes_subsample)
else:
weight_k = compute_class_weight(class_weight_k,
classes_full,
y_full)
weight_k = weight_k[np.searchsorted(classes_full, y_full)]
if classes_missing:
# Make missing classes' weight zero
weight_k[in1d(y_full, list(classes_missing))] = 0.
expanded_class_weight.append(weight_k)
expanded_class_weight = np.prod(expanded_class_weight,
axis=0,
dtype=np.float64)
return expanded_class_weight
| bsd-3-clause | -8,156,360,381,310,762,000 | 38.928177 | 80 | 0.576311 | false |
rest-of/the-deck | lambda/lib/python2.7/site-packages/setuptools/command/install.py | 496 | 4685 | from distutils.errors import DistutilsArgError
import inspect
import glob
import warnings
import platform
import distutils.command.install as orig
import setuptools
# Prior to numpy 1.9, NumPy relies on the '_install' name, so provide it for
# now. See https://bitbucket.org/pypa/setuptools/issue/199/
_install = orig.install
class install(orig.install):
"""Use easy_install to install the package, w/dependencies"""
user_options = orig.install.user_options + [
('old-and-unmanageable', None, "Try not to use this!"),
('single-version-externally-managed', None,
"used by system package builders to create 'flat' eggs"),
]
boolean_options = orig.install.boolean_options + [
'old-and-unmanageable', 'single-version-externally-managed',
]
new_commands = [
('install_egg_info', lambda self: True),
('install_scripts', lambda self: True),
]
_nc = dict(new_commands)
def initialize_options(self):
orig.install.initialize_options(self)
self.old_and_unmanageable = None
self.single_version_externally_managed = None
def finalize_options(self):
orig.install.finalize_options(self)
if self.root:
self.single_version_externally_managed = True
elif self.single_version_externally_managed:
if not self.root and not self.record:
raise DistutilsArgError(
"You must specify --record or --root when building system"
" packages"
)
def handle_extra_path(self):
if self.root or self.single_version_externally_managed:
# explicit backward-compatibility mode, allow extra_path to work
return orig.install.handle_extra_path(self)
# Ignore extra_path when installing an egg (or being run by another
# command without --root or --single-version-externally-managed
self.path_file = None
self.extra_dirs = ''
def run(self):
# Explicit request for old-style install? Just do it
if self.old_and_unmanageable or self.single_version_externally_managed:
return orig.install.run(self)
if not self._called_from_setup(inspect.currentframe()):
# Run in backward-compatibility mode to support bdist_* commands.
orig.install.run(self)
else:
self.do_egg_install()
@staticmethod
def _called_from_setup(run_frame):
"""
Attempt to detect whether run() was called from setup() or by another
command. If called by setup(), the parent caller will be the
'run_command' method in 'distutils.dist', and *its* caller will be
the 'run_commands' method. If called any other way, the
immediate caller *might* be 'run_command', but it won't have been
called by 'run_commands'. Return True in that case or if a call stack
is unavailable. Return False otherwise.
"""
if run_frame is None:
msg = "Call stack not available. bdist_* commands may fail."
warnings.warn(msg)
if platform.python_implementation() == 'IronPython':
msg = "For best results, pass -X:Frames to enable call stack."
warnings.warn(msg)
return True
res = inspect.getouterframes(run_frame)[2]
caller, = res[:1]
info = inspect.getframeinfo(caller)
caller_module = caller.f_globals.get('__name__', '')
return (
caller_module == 'distutils.dist'
and info.function == 'run_commands'
)
def do_egg_install(self):
easy_install = self.distribution.get_command_class('easy_install')
cmd = easy_install(
self.distribution, args="x", root=self.root, record=self.record,
)
cmd.ensure_finalized() # finalize before bdist_egg munges install cmd
cmd.always_copy_from = '.' # make sure local-dir eggs get installed
# pick up setup-dir .egg files only: no .egg-info
cmd.package_index.scan(glob.glob('*.egg'))
self.run_command('bdist_egg')
args = [self.distribution.get_command_obj('bdist_egg').egg_output]
if setuptools.bootstrap_install_from:
# Bootstrap self-installation of setuptools
args.insert(0, setuptools.bootstrap_install_from)
cmd.args = args
cmd.run()
setuptools.bootstrap_install_from = None
# XXX Python 3.1 doesn't see _nc if this is inside the class
install.sub_commands = (
[cmd for cmd in orig.install.sub_commands if cmd[0] not in install._nc] +
install.new_commands
)
| mit | -132,627,986,381,934,820 | 36.48 | 79 | 0.629456 | false |
frank10704/DF_GCS_W | MissionPlanner-master/packages/IronPython.StdLib.2.7.5-beta1/content/Lib/ftplib.py | 50 | 36764 | """An FTP client class and some helper functions.
Based on RFC 959: File Transfer Protocol (FTP), by J. Postel and J. Reynolds
Example:
>>> from ftplib import FTP
>>> ftp = FTP('ftp.python.org') # connect to host, default port
>>> ftp.login() # default, i.e.: user anonymous, passwd anonymous@
'230 Guest login ok, access restrictions apply.'
>>> ftp.retrlines('LIST') # list directory contents
total 9
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 ..
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc
d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming
drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib
drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub
drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr
-rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg
'226 Transfer complete.'
>>> ftp.quit()
'221 Goodbye.'
>>>
A nice test that reveals some of the network dialogue would be:
python ftplib.py -d localhost -l -p -l
"""
#
# Changes and improvements suggested by Steve Majewski.
# Modified by Jack to work on the mac.
# Modified by Siebren to support docstrings and PASV.
# Modified by Phil Schwartz to add storbinary and storlines callbacks.
# Modified by Giampaolo Rodola' to add TLS support.
#
import os
import sys
# Import SOCKS module if it exists, else standard socket module socket
try:
import SOCKS; socket = SOCKS; del SOCKS # import SOCKS as socket
from socket import getfqdn; socket.getfqdn = getfqdn; del getfqdn
except ImportError:
import socket
from socket import _GLOBAL_DEFAULT_TIMEOUT
__all__ = ["FTP","Netrc"]
# Magic number from <socket.h>
MSG_OOB = 0x1 # Process data out of band
# The standard FTP server control port
FTP_PORT = 21
# Exception raised when an error or invalid response is received
class Error(Exception): pass
class error_reply(Error): pass # unexpected [123]xx reply
class error_temp(Error): pass # 4xx errors
class error_perm(Error): pass # 5xx errors
class error_proto(Error): pass # response does not begin with [1-5]
# All exceptions (hopefully) that may be raised here and that aren't
# (always) programming errors on our side
all_errors = (Error, IOError, EOFError)
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
CRLF = '\r\n'
# The class itself
class FTP:
'''An FTP client class.
To create a connection, call the class using these arguments:
host, user, passwd, acct, timeout
The first four arguments are all strings, and have default value ''.
timeout must be numeric and defaults to None if not passed,
meaning that no timeout will be set on any ftp socket(s)
If a timeout is passed, then this is now the default timeout for all ftp
socket operations for this instance.
Then use self.connect() with optional host and port argument.
To download a file, use ftp.retrlines('RETR ' + filename),
or ftp.retrbinary() with slightly different arguments.
To upload a file, use ftp.storlines() or ftp.storbinary(),
which have an open file as argument (see their definitions
below for details).
The download/upload functions first issue appropriate TYPE
and PORT or PASV commands.
'''
debugging = 0
host = ''
port = FTP_PORT
sock = None
file = None
welcome = None
passiveserver = 1
# Initialization method (called by class instantiation).
# Initialize host to localhost, port to standard ftp port
# Optional arguments are host (for connect()),
# and user, passwd, acct (for login())
def __init__(self, host='', user='', passwd='', acct='',
timeout=_GLOBAL_DEFAULT_TIMEOUT):
self.timeout = timeout
if host:
self.connect(host)
if user:
self.login(user, passwd, acct)
def connect(self, host='', port=0, timeout=-999):
'''Connect to host. Arguments are:
- host: hostname to connect to (string, default previous host)
- port: port to connect to (integer, default previous port)
'''
if host != '':
self.host = host
if port > 0:
self.port = port
if timeout != -999:
self.timeout = timeout
self.sock = socket.create_connection((self.host, self.port), self.timeout)
self.af = self.sock.family
self.file = self.sock.makefile('rb')
self.welcome = self.getresp()
return self.welcome
def getwelcome(self):
'''Get the welcome message from the server.
(this is read and squirreled away by connect())'''
if self.debugging:
print '*welcome*', self.sanitize(self.welcome)
return self.welcome
def set_debuglevel(self, level):
'''Set the debugging level.
The required argument level means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF'''
self.debugging = level
debug = set_debuglevel
def set_pasv(self, val):
'''Use passive or active mode for data transfers.
With a false argument, use the normal PORT mode,
With a true argument, use the PASV command.'''
self.passiveserver = val
# Internal: "sanitize" a string for printing
def sanitize(self, s):
if s[:5] == 'pass ' or s[:5] == 'PASS ':
i = len(s)
while i > 5 and s[i-1] in '\r\n':
i = i-1
s = s[:5] + '*'*(i-5) + s[i:]
return repr(s)
# Internal: send one line to the server, appending CRLF
def putline(self, line):
line = line + CRLF
if self.debugging > 1: print '*put*', self.sanitize(line)
self.sock.sendall(line)
# Internal: send one command to the server (through putline())
def putcmd(self, line):
if self.debugging: print '*cmd*', self.sanitize(line)
self.putline(line)
# Internal: return one line from the server, stripping CRLF.
# Raise EOFError if the connection is closed
def getline(self):
line = self.file.readline()
if self.debugging > 1:
print '*get*', self.sanitize(line)
if not line: raise EOFError
if line[-2:] == CRLF: line = line[:-2]
elif line[-1:] in CRLF: line = line[:-1]
return line
# Internal: get a response from the server, which may possibly
# consist of multiple lines. Return a single string with no
# trailing CRLF. If the response consists of multiple lines,
# these are separated by '\n' characters in the string
def getmultiline(self):
line = self.getline()
if line[3:4] == '-':
code = line[:3]
while 1:
nextline = self.getline()
line = line + ('\n' + nextline)
if nextline[:3] == code and \
nextline[3:4] != '-':
break
return line
# Internal: get a response from the server.
# Raise various errors if the response indicates an error
def getresp(self):
resp = self.getmultiline()
if self.debugging: print '*resp*', self.sanitize(resp)
self.lastresp = resp[:3]
c = resp[:1]
if c in ('1', '2', '3'):
return resp
if c == '4':
raise error_temp, resp
if c == '5':
raise error_perm, resp
raise error_proto, resp
def voidresp(self):
"""Expect a response beginning with '2'."""
resp = self.getresp()
if resp[:1] != '2':
raise error_reply, resp
return resp
def abort(self):
'''Abort a file transfer. Uses out-of-band data.
This does not follow the procedure from the RFC to send Telnet
IP and Synch; that doesn't seem to work with the servers I've
tried. Instead, just send the ABOR command as OOB data.'''
line = 'ABOR' + CRLF
if self.debugging > 1: print '*put urgent*', self.sanitize(line)
self.sock.sendall(line, MSG_OOB)
resp = self.getmultiline()
if resp[:3] not in ('426', '225', '226'):
raise error_proto, resp
def sendcmd(self, cmd):
'''Send a command and return the response.'''
self.putcmd(cmd)
return self.getresp()
def voidcmd(self, cmd):
"""Send a command and expect a response beginning with '2'."""
self.putcmd(cmd)
return self.voidresp()
def sendport(self, host, port):
'''Send a PORT command with the current host and the given
port number.
'''
hbytes = host.split('.')
pbytes = [repr(port//256), repr(port%256)]
bytes = hbytes + pbytes
cmd = 'PORT ' + ','.join(bytes)
return self.voidcmd(cmd)
def sendeprt(self, host, port):
'''Send a EPRT command with the current host and the given port number.'''
af = 0
if self.af == socket.AF_INET:
af = 1
if self.af == socket.AF_INET6:
af = 2
if af == 0:
raise error_proto, 'unsupported address family'
fields = ['', repr(af), host, repr(port), '']
cmd = 'EPRT ' + '|'.join(fields)
return self.voidcmd(cmd)
def makeport(self):
'''Create a new socket and send a PORT command for it.'''
msg = "getaddrinfo returns an empty list"
sock = None
for res in socket.getaddrinfo(None, 0, self.af, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
sock.bind(sa)
except socket.error, msg:
if sock:
sock.close()
sock = None
continue
break
if not sock:
raise socket.error, msg
sock.listen(1)
port = sock.getsockname()[1] # Get proper port
host = self.sock.getsockname()[0] # Get proper host
if self.af == socket.AF_INET:
resp = self.sendport(host, port)
else:
resp = self.sendeprt(host, port)
if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(self.timeout)
return sock
def makepasv(self):
if self.af == socket.AF_INET:
host, port = parse227(self.sendcmd('PASV'))
else:
host, port = parse229(self.sendcmd('EPSV'), self.sock.getpeername())
return host, port
def ntransfercmd(self, cmd, rest=None):
"""Initiate a transfer over the data connection.
If the transfer is active, send a port command and the
transfer command, and accept the connection. If the server is
passive, send a pasv command, connect to it, and start the
transfer command. Either way, return the socket for the
connection and the expected size of the transfer. The
expected size may be None if it could not be determined.
Optional `rest' argument can be a string that is sent as the
argument to a REST command. This is essentially a server
marker used to tell the server to skip over any data up to the
given marker.
"""
size = None
if self.passiveserver:
host, port = self.makepasv()
conn = socket.create_connection((host, port), self.timeout)
if rest is not None:
self.sendcmd("REST %s" % rest)
resp = self.sendcmd(cmd)
# Some servers apparently send a 200 reply to
# a LIST or STOR command, before the 150 reply
# (and way before the 226 reply). This seems to
# be in violation of the protocol (which only allows
# 1xx or error messages for LIST), so we just discard
# this response.
if resp[0] == '2':
resp = self.getresp()
if resp[0] != '1':
raise error_reply, resp
else:
sock = self.makeport()
if rest is not None:
self.sendcmd("REST %s" % rest)
resp = self.sendcmd(cmd)
# See above.
if resp[0] == '2':
resp = self.getresp()
if resp[0] != '1':
raise error_reply, resp
conn, sockaddr = sock.accept()
if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT:
conn.settimeout(self.timeout)
if resp[:3] == '150':
# this is conditional in case we received a 125
size = parse150(resp)
return conn, size
def transfercmd(self, cmd, rest=None):
"""Like ntransfercmd() but returns only the socket."""
return self.ntransfercmd(cmd, rest)[0]
def login(self, user = '', passwd = '', acct = ''):
'''Login, default anonymous.'''
if not user: user = 'anonymous'
if not passwd: passwd = ''
if not acct: acct = ''
if user == 'anonymous' and passwd in ('', '-'):
# If there is no anonymous ftp password specified
# then we'll just use anonymous@
# We don't send any other thing because:
# - We want to remain anonymous
# - We want to stop SPAM
# - We don't want to let ftp sites to discriminate by the user,
# host or country.
passwd = passwd + 'anonymous@'
resp = self.sendcmd('USER ' + user)
if resp[0] == '3': resp = self.sendcmd('PASS ' + passwd)
if resp[0] == '3': resp = self.sendcmd('ACCT ' + acct)
if resp[0] != '2':
raise error_reply, resp
return resp
def retrbinary(self, cmd, callback, blocksize=8192, rest=None):
"""Retrieve data in binary mode. A new port is created for you.
Args:
cmd: A RETR command.
callback: A single parameter callable to be called on each
block of data read.
blocksize: The maximum number of bytes to read from the
socket at one time. [default: 8192]
rest: Passed to transfercmd(). [default: None]
Returns:
The response code.
"""
self.voidcmd('TYPE I')
conn = self.transfercmd(cmd, rest)
while 1:
data = conn.recv(blocksize)
if not data:
break
callback(data)
conn.close()
return self.voidresp()
def retrlines(self, cmd, callback = None):
"""Retrieve data in line mode. A new port is created for you.
Args:
cmd: A RETR, LIST, NLST, or MLSD command.
callback: An optional single parameter callable that is called
for each line with the trailing CRLF stripped.
[default: print_line()]
Returns:
The response code.
"""
if callback is None: callback = print_line
resp = self.sendcmd('TYPE A')
conn = self.transfercmd(cmd)
fp = conn.makefile('rb')
while 1:
line = fp.readline()
if self.debugging > 2: print '*retr*', repr(line)
if not line:
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] == '\n':
line = line[:-1]
callback(line)
fp.close()
conn.close()
return self.voidresp()
def storbinary(self, cmd, fp, blocksize=8192, callback=None, rest=None):
"""Store a file in binary mode. A new port is created for you.
Args:
cmd: A STOR command.
fp: A file-like object with a read(num_bytes) method.
blocksize: The maximum data size to read from fp and send over
the connection at once. [default: 8192]
callback: An optional single parameter callable that is called on
on each block of data after it is sent. [default: None]
rest: Passed to transfercmd(). [default: None]
Returns:
The response code.
"""
self.voidcmd('TYPE I')
conn = self.transfercmd(cmd, rest)
while 1:
buf = fp.read(blocksize)
if not buf: break
conn.sendall(buf)
if callback: callback(buf)
conn.close()
return self.voidresp()
def storlines(self, cmd, fp, callback=None):
"""Store a file in line mode. A new port is created for you.
Args:
cmd: A STOR command.
fp: A file-like object with a readline() method.
callback: An optional single parameter callable that is called on
on each line after it is sent. [default: None]
Returns:
The response code.
"""
self.voidcmd('TYPE A')
conn = self.transfercmd(cmd)
while 1:
buf = fp.readline()
if not buf: break
if buf[-2:] != CRLF:
if buf[-1] in CRLF: buf = buf[:-1]
buf = buf + CRLF
conn.sendall(buf)
if callback: callback(buf)
conn.close()
return self.voidresp()
def acct(self, password):
'''Send new account name.'''
cmd = 'ACCT ' + password
return self.voidcmd(cmd)
def nlst(self, *args):
'''Return a list of files in a given directory (default the current).'''
cmd = 'NLST'
for arg in args:
cmd = cmd + (' ' + arg)
files = []
self.retrlines(cmd, files.append)
return files
def dir(self, *args):
'''List a directory in long form.
By default list current directory to stdout.
Optional last argument is callback function; all
non-empty arguments before it are concatenated to the
LIST command. (This *should* only be used for a pathname.)'''
cmd = 'LIST'
func = None
if args[-1:] and type(args[-1]) != type(''):
args, func = args[:-1], args[-1]
for arg in args:
if arg:
cmd = cmd + (' ' + arg)
self.retrlines(cmd, func)
def rename(self, fromname, toname):
'''Rename a file.'''
resp = self.sendcmd('RNFR ' + fromname)
if resp[0] != '3':
raise error_reply, resp
return self.voidcmd('RNTO ' + toname)
def delete(self, filename):
'''Delete a file.'''
resp = self.sendcmd('DELE ' + filename)
if resp[:3] in ('250', '200'):
return resp
else:
raise error_reply, resp
def cwd(self, dirname):
'''Change to a directory.'''
if dirname == '..':
try:
return self.voidcmd('CDUP')
except error_perm, msg:
if msg.args[0][:3] != '500':
raise
elif dirname == '':
dirname = '.' # does nothing, but could return error
cmd = 'CWD ' + dirname
return self.voidcmd(cmd)
def size(self, filename):
'''Retrieve the size of a file.'''
# The SIZE command is defined in RFC-3659
resp = self.sendcmd('SIZE ' + filename)
if resp[:3] == '213':
s = resp[3:].strip()
try:
return int(s)
except (OverflowError, ValueError):
return long(s)
def mkd(self, dirname):
'''Make a directory, return its full pathname.'''
resp = self.sendcmd('MKD ' + dirname)
return parse257(resp)
def rmd(self, dirname):
'''Remove a directory.'''
return self.voidcmd('RMD ' + dirname)
def pwd(self):
'''Return current working directory.'''
resp = self.sendcmd('PWD')
return parse257(resp)
def quit(self):
'''Quit, and close the connection.'''
resp = self.voidcmd('QUIT')
self.close()
return resp
def close(self):
'''Close the connection without assuming anything about it.'''
if self.file:
self.file.close()
self.sock.close()
self.file = self.sock = None
try:
import ssl
except ImportError:
pass
else:
class FTP_TLS(FTP):
'''A FTP subclass which adds TLS support to FTP as described
in RFC-4217.
Connect as usual to port 21 implicitly securing the FTP control
connection before authenticating.
Securing the data connection requires user to explicitly ask
for it by calling prot_p() method.
Usage example:
>>> from ftplib import FTP_TLS
>>> ftps = FTP_TLS('ftp.python.org')
>>> ftps.login() # login anonymously previously securing control channel
'230 Guest login ok, access restrictions apply.'
>>> ftps.prot_p() # switch to secure data connection
'200 Protection level set to P'
>>> ftps.retrlines('LIST') # list directory content securely
total 9
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 ..
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc
d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming
drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib
drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub
drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr
-rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg
'226 Transfer complete.'
>>> ftps.quit()
'221 Goodbye.'
>>>
'''
ssl_version = ssl.PROTOCOL_TLSv1
def __init__(self, host='', user='', passwd='', acct='', keyfile=None,
certfile=None, timeout=_GLOBAL_DEFAULT_TIMEOUT):
self.keyfile = keyfile
self.certfile = certfile
self._prot_p = False
FTP.__init__(self, host, user, passwd, acct, timeout)
def login(self, user='', passwd='', acct='', secure=True):
if secure and not isinstance(self.sock, ssl.SSLSocket):
self.auth()
return FTP.login(self, user, passwd, acct)
def auth(self):
'''Set up secure control connection by using TLS/SSL.'''
if isinstance(self.sock, ssl.SSLSocket):
raise ValueError("Already using TLS")
if self.ssl_version == ssl.PROTOCOL_TLSv1:
resp = self.voidcmd('AUTH TLS')
else:
resp = self.voidcmd('AUTH SSL')
self.sock = ssl.wrap_socket(self.sock, self.keyfile, self.certfile,
ssl_version=self.ssl_version)
self.file = self.sock.makefile(mode='rb')
return resp
def prot_p(self):
'''Set up secure data connection.'''
# PROT defines whether or not the data channel is to be protected.
# Though RFC-2228 defines four possible protection levels,
# RFC-4217 only recommends two, Clear and Private.
# Clear (PROT C) means that no security is to be used on the
# data-channel, Private (PROT P) means that the data-channel
# should be protected by TLS.
# PBSZ command MUST still be issued, but must have a parameter of
# '0' to indicate that no buffering is taking place and the data
# connection should not be encapsulated.
self.voidcmd('PBSZ 0')
resp = self.voidcmd('PROT P')
self._prot_p = True
return resp
def prot_c(self):
'''Set up clear text data connection.'''
resp = self.voidcmd('PROT C')
self._prot_p = False
return resp
# --- Overridden FTP methods
def ntransfercmd(self, cmd, rest=None):
conn, size = FTP.ntransfercmd(self, cmd, rest)
if self._prot_p:
conn = ssl.wrap_socket(conn, self.keyfile, self.certfile,
ssl_version=self.ssl_version)
return conn, size
def retrbinary(self, cmd, callback, blocksize=8192, rest=None):
self.voidcmd('TYPE I')
conn = self.transfercmd(cmd, rest)
try:
while 1:
data = conn.recv(blocksize)
if not data:
break
callback(data)
# shutdown ssl layer
if isinstance(conn, ssl.SSLSocket):
conn.unwrap()
finally:
conn.close()
return self.voidresp()
def retrlines(self, cmd, callback = None):
if callback is None: callback = print_line
resp = self.sendcmd('TYPE A')
conn = self.transfercmd(cmd)
fp = conn.makefile('rb')
try:
while 1:
line = fp.readline()
if self.debugging > 2: print '*retr*', repr(line)
if not line:
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] == '\n':
line = line[:-1]
callback(line)
# shutdown ssl layer
if isinstance(conn, ssl.SSLSocket):
conn.unwrap()
finally:
fp.close()
conn.close()
return self.voidresp()
def storbinary(self, cmd, fp, blocksize=8192, callback=None, rest=None):
self.voidcmd('TYPE I')
conn = self.transfercmd(cmd, rest)
try:
while 1:
buf = fp.read(blocksize)
if not buf: break
conn.sendall(buf)
if callback: callback(buf)
# shutdown ssl layer
if isinstance(conn, ssl.SSLSocket):
conn.unwrap()
finally:
conn.close()
return self.voidresp()
def storlines(self, cmd, fp, callback=None):
self.voidcmd('TYPE A')
conn = self.transfercmd(cmd)
try:
while 1:
buf = fp.readline()
if not buf: break
if buf[-2:] != CRLF:
if buf[-1] in CRLF: buf = buf[:-1]
buf = buf + CRLF
conn.sendall(buf)
if callback: callback(buf)
# shutdown ssl layer
if isinstance(conn, ssl.SSLSocket):
conn.unwrap()
finally:
conn.close()
return self.voidresp()
__all__.append('FTP_TLS')
all_errors = (Error, IOError, EOFError, ssl.SSLError)
_150_re = None
def parse150(resp):
'''Parse the '150' response for a RETR request.
Returns the expected transfer size or None; size is not guaranteed to
be present in the 150 message.
'''
if resp[:3] != '150':
raise error_reply, resp
global _150_re
if _150_re is None:
import re
_150_re = re.compile("150 .* \((\d+) bytes\)", re.IGNORECASE)
m = _150_re.match(resp)
if not m:
return None
s = m.group(1)
try:
return int(s)
except (OverflowError, ValueError):
return long(s)
_227_re = None
def parse227(resp):
'''Parse the '227' response for a PASV request.
Raises error_proto if it does not contain '(h1,h2,h3,h4,p1,p2)'
Return ('host.addr.as.numbers', port#) tuple.'''
if resp[:3] != '227':
raise error_reply, resp
global _227_re
if _227_re is None:
import re
_227_re = re.compile(r'(\d+),(\d+),(\d+),(\d+),(\d+),(\d+)')
m = _227_re.search(resp)
if not m:
raise error_proto, resp
numbers = m.groups()
host = '.'.join(numbers[:4])
port = (int(numbers[4]) << 8) + int(numbers[5])
return host, port
def parse229(resp, peer):
'''Parse the '229' response for a EPSV request.
Raises error_proto if it does not contain '(|||port|)'
Return ('host.addr.as.numbers', port#) tuple.'''
if resp[:3] != '229':
raise error_reply, resp
left = resp.find('(')
if left < 0: raise error_proto, resp
right = resp.find(')', left + 1)
if right < 0:
raise error_proto, resp # should contain '(|||port|)'
if resp[left + 1] != resp[right - 1]:
raise error_proto, resp
parts = resp[left + 1:right].split(resp[left+1])
if len(parts) != 5:
raise error_proto, resp
host = peer[0]
port = int(parts[3])
return host, port
def parse257(resp):
'''Parse the '257' response for a MKD or PWD request.
This is a response to a MKD or PWD request: a directory name.
Returns the directoryname in the 257 reply.'''
if resp[:3] != '257':
raise error_reply, resp
if resp[3:5] != ' "':
return '' # Not compliant to RFC 959, but UNIX ftpd does this
dirname = ''
i = 5
n = len(resp)
while i < n:
c = resp[i]
i = i+1
if c == '"':
if i >= n or resp[i] != '"':
break
i = i+1
dirname = dirname + c
return dirname
def print_line(line):
'''Default retrlines callback to print a line.'''
print line
def ftpcp(source, sourcename, target, targetname = '', type = 'I'):
'''Copy file from one FTP-instance to another.'''
if not targetname: targetname = sourcename
type = 'TYPE ' + type
source.voidcmd(type)
target.voidcmd(type)
sourcehost, sourceport = parse227(source.sendcmd('PASV'))
target.sendport(sourcehost, sourceport)
# RFC 959: the user must "listen" [...] BEFORE sending the
# transfer request.
# So: STOR before RETR, because here the target is a "user".
treply = target.sendcmd('STOR ' + targetname)
if treply[:3] not in ('125', '150'): raise error_proto # RFC 959
sreply = source.sendcmd('RETR ' + sourcename)
if sreply[:3] not in ('125', '150'): raise error_proto # RFC 959
source.voidresp()
target.voidresp()
class Netrc:
"""Class to parse & provide access to 'netrc' format files.
See the netrc(4) man page for information on the file format.
WARNING: This class is obsolete -- use module netrc instead.
"""
__defuser = None
__defpasswd = None
__defacct = None
def __init__(self, filename=None):
if filename is None:
if "HOME" in os.environ:
filename = os.path.join(os.environ["HOME"],
".netrc")
else:
raise IOError, \
"specify file to load or set $HOME"
self.__hosts = {}
self.__macros = {}
fp = open(filename, "r")
in_macro = 0
while 1:
line = fp.readline()
if not line: break
if in_macro and line.strip():
macro_lines.append(line)
continue
elif in_macro:
self.__macros[macro_name] = tuple(macro_lines)
in_macro = 0
words = line.split()
host = user = passwd = acct = None
default = 0
i = 0
while i < len(words):
w1 = words[i]
if i+1 < len(words):
w2 = words[i + 1]
else:
w2 = None
if w1 == 'default':
default = 1
elif w1 == 'machine' and w2:
host = w2.lower()
i = i + 1
elif w1 == 'login' and w2:
user = w2
i = i + 1
elif w1 == 'password' and w2:
passwd = w2
i = i + 1
elif w1 == 'account' and w2:
acct = w2
i = i + 1
elif w1 == 'macdef' and w2:
macro_name = w2
macro_lines = []
in_macro = 1
break
i = i + 1
if default:
self.__defuser = user or self.__defuser
self.__defpasswd = passwd or self.__defpasswd
self.__defacct = acct or self.__defacct
if host:
if host in self.__hosts:
ouser, opasswd, oacct = \
self.__hosts[host]
user = user or ouser
passwd = passwd or opasswd
acct = acct or oacct
self.__hosts[host] = user, passwd, acct
fp.close()
def get_hosts(self):
"""Return a list of hosts mentioned in the .netrc file."""
return self.__hosts.keys()
def get_account(self, host):
"""Returns login information for the named host.
The return value is a triple containing userid,
password, and the accounting field.
"""
host = host.lower()
user = passwd = acct = None
if host in self.__hosts:
user, passwd, acct = self.__hosts[host]
user = user or self.__defuser
passwd = passwd or self.__defpasswd
acct = acct or self.__defacct
return user, passwd, acct
def get_macros(self):
"""Return a list of all defined macro names."""
return self.__macros.keys()
def get_macro(self, macro):
"""Return a sequence of lines which define a named macro."""
return self.__macros[macro]
def test():
'''Test program.
Usage: ftp [-d] [-r[file]] host [-l[dir]] [-d[dir]] [-p] [file] ...
-d dir
-l list
-p password
'''
if len(sys.argv) < 2:
print test.__doc__
sys.exit(0)
debugging = 0
rcfile = None
while sys.argv[1] == '-d':
debugging = debugging+1
del sys.argv[1]
if sys.argv[1][:2] == '-r':
# get name of alternate ~/.netrc file:
rcfile = sys.argv[1][2:]
del sys.argv[1]
host = sys.argv[1]
ftp = FTP(host)
ftp.set_debuglevel(debugging)
userid = passwd = acct = ''
try:
netrc = Netrc(rcfile)
except IOError:
if rcfile is not None:
sys.stderr.write("Could not open account file"
" -- using anonymous login.")
else:
try:
userid, passwd, acct = netrc.get_account(host)
except KeyError:
# no account for host
sys.stderr.write(
"No account -- using anonymous login.")
ftp.login(userid, passwd, acct)
for file in sys.argv[2:]:
if file[:2] == '-l':
ftp.dir(file[2:])
elif file[:2] == '-d':
cmd = 'CWD'
if file[2:]: cmd = cmd + ' ' + file[2:]
resp = ftp.sendcmd(cmd)
elif file == '-p':
ftp.set_pasv(not ftp.passiveserver)
else:
ftp.retrbinary('RETR ' + file, \
sys.stdout.write, 1024)
ftp.quit()
if __name__ == '__main__':
test()
| gpl-3.0 | -6,062,112,633,803,773,000 | 33.486486 | 98 | 0.519476 | false |
BlueBrain/NeuroM | neurom/features/__init__.py | 1 | 6149 | # Copyright (c) 2020, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""NeuroM, lightweight and fast.
Examples:
Obtain some morphometrics
>>> import neurom
>>> from neurom import features
>>> nrn = neurom.load_neuron('path/to/neuron')
>>> ap_seg_len = features.get('segment_lengths', nrn, neurite_type=neurom.APICAL_DENDRITE)
>>> ax_sec_len = features.get('section_lengths', nrn, neurite_type=neurom.AXON)
"""
import numpy as np
from neurom.core.types import NeuriteType, tree_type_checker
from neurom.core.neuron import iter_neurites
from neurom.exceptions import NeuroMError
from neurom.utils import deprecated
NEURITEFEATURES = dict()
NEURONFEATURES = dict()
@deprecated(
'`register_neurite_feature`',
'Please use the decorator `neurom.features.register.feature` to register custom features')
def register_neurite_feature(name, func):
"""Register a feature to be applied to neurites.
.. warning:: This feature has been deprecated in 1.6.0
Arguments:
name: name of the feature, used for access via get() function.
func: single parameter function of a neurite.
"""
def _fun(neurites, neurite_type=NeuriteType.all):
"""Wrap neurite function from outer scope and map into list."""
return list(func(n) for n in iter_neurites(neurites, filt=tree_type_checker(neurite_type)))
_register_feature('NEURITEFEATURES', name, _fun, shape=(...,))
def _find_feature_func(feature_name):
"""Returns the python function used when getting a feature with `neurom.get(feature_name)`."""
for feature_dict in (NEURITEFEATURES, NEURONFEATURES):
if feature_name in feature_dict:
return feature_dict[feature_name]
raise NeuroMError(f'Unable to find feature: {feature_name}')
def _get_feature_value_and_func(feature_name, obj, **kwargs):
"""Obtain a feature from a set of morphology objects.
Arguments:
feature(string): feature to extract
obj: a neuron, population or neurite tree
kwargs: parameters to forward to underlying worker functions
Returns:
A tuple (feature, func) of the feature value and its function
"""
feat = _find_feature_func(feature_name)
res = feat(obj, **kwargs)
if len(feat.shape) != 0:
res = np.array(list(res))
return res, feat
def get(feature_name, obj, **kwargs):
"""Obtain a feature from a set of morphology objects.
Features can be either Neurite features or Neuron features. For the list of Neurite features
see :mod:`neurom.features.neuritefunc`. For the list of Neuron features see
:mod:`neurom.features.neuronfunc`.
Arguments:
feature_name(string): feature to extract
obj: a neuron, a neuron population or a neurite tree
kwargs: parameters to forward to underlying worker functions
Returns:
features as a 1D, 2D or 3D numpy array.
"""
return _get_feature_value_and_func(feature_name, obj, **kwargs)[0]
def _register_feature(namespace, name, func, shape):
"""Register a feature to be applied.
Upon registration, an attribute 'shape' containing the expected
shape of the function return is added to 'func'.
Arguments:
namespace(string): a namespace (must be 'NEURITEFEATURES' or 'NEURONFEATURES')
name(string): name of the feature, used to access the feature via `neurom.features.get()`.
func(callable): single parameter function of a neurite.
shape(tuple): the expected shape of the feature values
"""
setattr(func, 'shape', shape)
assert namespace in {'NEURITEFEATURES', 'NEURONFEATURES'}
feature_dict = globals()[namespace]
if name in feature_dict:
raise NeuroMError('Attempt to hide registered feature %s' % name)
feature_dict[name] = func
def feature(shape, namespace=None, name=None):
"""Feature decorator to automatically register the feature in the appropriate namespace.
Arguments:
shape(tuple): the expected shape of the feature values
namespace(string): a namespace (must be 'NEURITEFEATURES' or 'NEURONFEATURES')
name(string): name of the feature, used to access the feature via `neurom.features.get()`.
"""
def inner(func):
_register_feature(namespace, name or func.__name__, func, shape)
return func
return inner
# These imports are necessary in order to register the features
from neurom.features import neuritefunc, neuronfunc # noqa, pylint: disable=wrong-import-position
| bsd-3-clause | -1,972,831,414,719,588,600 | 38.928571 | 99 | 0.714425 | false |
ionelmc/django-easyfilters | tests/test_app/tests/test_ranges.py | 2 | 3677 | from decimal import Decimal
import unittest
from django_easyfilters.ranges import auto_ranges
class TestRanges(unittest.TestCase):
def test_auto_ranges_simple(self):
"""
Test that auto_ranges produces 'nice' looking automatic ranges.
"""
# An easy case - max_items is just what we want
ranges1 = auto_ranges(Decimal('15.0'), Decimal('20.0'), 5)
self.assertEqual(ranges1,
[(Decimal('15.0'), Decimal('16.0')),
(Decimal('16.0'), Decimal('17.0')),
(Decimal('17.0'), Decimal('18.0')),
(Decimal('18.0'), Decimal('19.0')),
(Decimal('19.0'), Decimal('20.0'))])
def test_auto_ranges_flexible_max_items(self):
# max_items is a bit bigger than what we want,
# but we should be flexible if there is an easy target.
ranges1 = auto_ranges(Decimal('15.0'), Decimal('20.0'), 6)
self.assertEqual(ranges1,
[(Decimal('15.0'), Decimal('16.0')),
(Decimal('16.0'), Decimal('17.0')),
(Decimal('17.0'), Decimal('18.0')),
(Decimal('18.0'), Decimal('19.0')),
(Decimal('19.0'), Decimal('20.0'))])
def test_auto_ranges_round_limits(self):
# start and end limits should be rounded to something nice
# Check with 5-10, 50-100, 15-20, 150-200
ranges1 = auto_ranges(Decimal('15.1'), Decimal('19.9'), 5)
self.assertEqual(ranges1,
[(Decimal('15.0'), Decimal('16.0')),
(Decimal('16.0'), Decimal('17.0')),
(Decimal('17.0'), Decimal('18.0')),
(Decimal('18.0'), Decimal('19.0')),
(Decimal('19.0'), Decimal('20.0'))])
ranges2 = auto_ranges(Decimal('151'), Decimal('199'), 5)
self.assertEqual(ranges2,
[(Decimal('150'), Decimal('160')),
(Decimal('160'), Decimal('170')),
(Decimal('170'), Decimal('180')),
(Decimal('180'), Decimal('190')),
(Decimal('190'), Decimal('200'))])
ranges3 = auto_ranges(Decimal('5.1'), Decimal('9.9'), 5)
self.assertEqual(ranges3,
[(Decimal('5.0'), Decimal('6.0')),
(Decimal('6.0'), Decimal('7.0')),
(Decimal('7.0'), Decimal('8.0')),
(Decimal('8.0'), Decimal('9.0')),
(Decimal('9.0'), Decimal('10.0'))])
ranges4 = auto_ranges(Decimal('51'), Decimal('99'), 5)
self.assertEqual(ranges4,
[(Decimal('50'), Decimal('60')),
(Decimal('60'), Decimal('70')),
(Decimal('70'), Decimal('80')),
(Decimal('80'), Decimal('90')),
(Decimal('90'), Decimal('100'))])
ranges5 = auto_ranges(Decimal('3'), Decimal('6'), 5)
self.assertEqual(ranges5,
[(Decimal('3'), Decimal('4')),
(Decimal('4'), Decimal('5')),
(Decimal('5'), Decimal('6'))])
def test_auto_ranges_type(self):
"""
auto_ranges should return the same type of thing it is passed
"""
r = auto_ranges(1, 10, 10)
self.assertEqual(type(r[0][0]), int)
r2 = auto_ranges(Decimal('1'), Decimal('10'), 10)
self.assertEqual(type(r2[0][0]), Decimal)
| mit | -6,502,209,669,346,150,000 | 42.77381 | 71 | 0.452271 | false |
transifex/openformats | openformats/tests/formats/structuredkeyvaluejson/test_keyvaluejson.py | 1 | 19720 | # -*- coding: utf-8 -*-
import unittest
import six
from openformats.formats.json import StructuredJsonHandler
from openformats.exceptions import ParseError
from openformats.strings import OpenString
from openformats.tests.formats.common import CommonFormatTestMixin
from openformats.tests.utils.strings import (generate_random_string,
bytes_to_string)
class StructuredJsonTestCase(CommonFormatTestMixin, unittest.TestCase):
HANDLER_CLASS = StructuredJsonHandler
TESTFILE_BASE = "openformats/tests/formats/structuredkeyvaluejson/files"
def setUp(self):
super(StructuredJsonTestCase, self).setUp()
self.handler = StructuredJsonHandler()
self.random_string = generate_random_string()
self.pluralized_string = "{ item_count, plural, one {You have {file_count} file.} other {You have {file_count} files.} }" # noqa
self.random_openstring = OpenString("a",
self.random_string, order=0)
self.random_hash = self.random_openstring.template_replacement
def test_simple(self):
template, stringset = self.handler.parse('{"a": {"string":"%s"}}' %
self.random_string)
compiled = self.handler.compile(template, [self.random_openstring])
self.assertEqual(template,
'{"a": {"string":"%s"}}' % self.random_hash)
self.assertEqual(len(stringset), 1)
self.assertEqual(stringset[0].__dict__,
self.random_openstring.__dict__)
self.assertEqual(compiled,
'{"a": {"string":"%s"}}' % self.random_string)
def test_dots_in_key(self):
first_level_key = "a.b"
source = '{"%s": {"c": {"string": "%s"}}}' % (first_level_key, self.random_string)
openstring = OpenString(
"{}.c".format(self.handler._escape_key(first_level_key)),
self.random_string, order=0
)
random_hash = openstring.template_replacement
template, stringset = self.handler.parse(source)
compiled = self.handler.compile(template, [openstring])
self.assertEqual(template,
'{"a.b": {"c": {"string": "%s"}}}' % random_hash)
self.assertEqual(len(stringset), 1)
self.assertEqual(stringset[0].__dict__, openstring.__dict__)
self.assertEqual(compiled, source)
def test_escaped_character_in_key(self):
first_level_key = "a\/b"
source = '{"%s": {"c": {"string": "%s"}}}' % (first_level_key, self.random_string)
openstring = OpenString(
"{}.c".format(self.handler._escape_key(first_level_key)),
self.random_string, order=0
)
random_hash = openstring.template_replacement
template, stringset = self.handler.parse(source)
compiled = self.handler.compile(template, [openstring])
self.assertEqual(template,
'{"a\/b": {"c": {"string": "%s"}}}' % random_hash)
self.assertEqual(len(stringset), 1)
self.assertEqual(stringset[0].__dict__, openstring.__dict__)
self.assertEqual(compiled, source)
def test_embedded_dicts(self):
source = '{"a": {"b": {"string": "%s"}}}' % self.random_string
openstring = OpenString("a.b", self.random_string, order=0)
random_hash = openstring.template_replacement
template, stringset = self.handler.parse(source)
compiled = self.handler.compile(template, [openstring])
self.assertEqual(template,
'{"a": {"b": {"string": "%s"}}}' % random_hash)
self.assertEqual(len(stringset), 1)
self.assertEqual(stringset[0].__dict__, openstring.__dict__)
self.assertEqual(compiled, source)
def test_compile_ignores_removed_strings_for_dicts(self):
# The original JsonHandler, when compiling back from a template,
# removes any strings that are not passed as an argument in the
# compile() function. StructuredJsonHandler on the other hand, simply
# ignores those key-values and leave them as is in the template. This
# test ensures that this is the case.
# For more information, see _copy_until_and_remove_section() in both
# handlers.
string1 = self.random_string
string2 = generate_random_string()
openstring1 = self.random_openstring
openstring2 = OpenString("b", string2, order=1)
hash1 = self.random_hash
hash2 = openstring2.template_replacement
source = ('{"a": {"string":"%s"}, "b": {"string":"%s"}}' %
(string1, string2))
template, stringset = self.handler.parse(source)
compiled = self.handler.compile(template, [openstring1])
self.assertEqual(template,
'{"a": {"string":"%s"}, "b": {"string":"%s"}}' %
(hash1, hash2))
self.assertEqual(len(stringset), 2)
self.assertEqual(stringset[0].__dict__, openstring1.__dict__)
self.assertEqual(stringset[1].__dict__, openstring2.__dict__)
self.assertEqual(
compiled,
'{"a": {"string":"%s"}, "b": {"string":"%s"}}' % (string1, hash2)
)
def test_invalid_json(self):
with self.assertRaises(ParseError) as context:
self.handler.parse(u'invalid_json')
self.assertIn(six.text_type(context.exception),
("Expecting value: line 1 column 1 (char 0)",
"No JSON object could be decoded"))
def test_invalid_json_type(self):
template, stringset = self.handler.parse('[false]')
self.assertEqual(stringset, [])
self.assertEqual(template, '[false]')
template, stringset = self.handler.parse('{"false": false}')
self.assertEqual(stringset, [])
self.assertEqual(template, '{"false": false}')
def test_not_json_container(self):
self._test_parse_error('"hello"',
'Was expecting whitespace or one of `[{` on '
'line 1, found `"` instead')
self._test_parse_error('3',
"Was expecting whitespace or one of `[{` on "
"line 1, found `3` instead")
self._test_parse_error('false',
"Was expecting whitespace or one of `[{` on "
"line 1, found `f` instead")
def test_skipping_stuff_within_strings(self):
source = '{"a": {"string":"b, ,c"}}'
template, stringset = self.handler.parse(source)
compiled = self.handler.compile(template, stringset)
self.assertEqual(compiled, source)
def test_duplicate_keys(self):
self._test_parse_error('{"a": {"string": "hello"}, "a": {"string": "hello"}}', # noqa
"Duplicate string key ('a') in line 1")
def test_display_json_errors(self):
self._test_parse_error('["]',
"Unterminated string starting at: line 1 "
"column 2 (char 1)")
def test_unescape(self):
cases = (
# simple => simple
([u's', u'i', u'm', u'p', u'l', u'e'],
[u's', u'i', u'm', u'p', u'l', u'e']),
# hεllo => hεllo
([u'h', u'ε', u'l', u'l', u'o'],
[u'h', u'ε', u'l', u'l', u'o']),
# h\u03b5llo => hεllo
([u'h', u'\\', u'u', u'0', u'3', u'b', u'5', u'l', u'l', u'o'],
[u'h', u'ε', u'l', u'l', u'o']),
# a\"b => a"b
([u'a', u'\\', u'"', u'b'], [u'a', u'"', u'b']),
# a\/b => a/b
([u'a', u'\\', u'/', u'b'], [u'a', u'/', u'b']),
# a\/b => a?b, ? = BACKSPACE
([u'a', u'\\', u'b', u'b'], [u'a', u'\b', u'b']),
# a\fb => a?b, ? = FORMFEED
([u'a', u'\\', u'f', u'b'], [u'a', u'\f', u'b']),
# a\nb => a?b, ? = NEWLINE
([u'a', u'\\', u'n', u'b'], [u'a', u'\n', u'b']),
# a\rb => a?b, ? = CARRIAGE_RETURN
([u'a', u'\\', u'r', u'b'], [u'a', u'\r', u'b']),
# a\tb => a?b, ? = TAB
([u'a', u'\\', u't', u'b'], [u'a', u'\t', u'b']),
)
for raw, rich in cases:
self.assertEqual(StructuredJsonHandler.unescape(
bytes_to_string(raw)), bytes_to_string(rich)
)
def test_escape(self):
cases = (
# simple => simple
([u's', u'i', u'm', u'p', u'l', u'e'],
[u's', u'i', u'm', u'p', u'l', u'e']),
# hεllo => hεllo
([u'h', u'ε', u'l', u'l', u'o'],
[u'h', u'ε', u'l', u'l', u'o']),
# h\u03b5llo => h\\u03b5llo
([u'h', u'\\', u'u', u'0', u'3', u'b', u'5', u'l', u'l', u'o'],
[u'h', u'\\', u'\\', u'u', u'0', u'3', u'b', u'5', u'l', u'l',
u'o']),
# a"b =>a\"b
([u'a', u'"', u'b'], [u'a', u'\\', u'"', u'b']),
# a/b =>a/b
([u'a', u'/', u'b'], [u'a', u'/', u'b']),
# a?b =>a\/b, ? = BACKSPACE
([u'a', u'\b', u'b'], [u'a', u'\\', u'b', u'b']),
# a?b =>a\fb, ? = FORMFEED
([u'a', u'\f', u'b'], [u'a', u'\\', u'f', u'b']),
# a?b =>a\nb, ? = NEWLINE
([u'a', u'\n', u'b'], [u'a', u'\\', u'n', u'b']),
# a?b =>a\rb, ? = CARRIAGE_RETURN
([u'a', u'\r', u'b'], [u'a', u'\\', u'r', u'b']),
# a?b => a\tb, ? = TAB
([u'a', u'\t', u'b'], [u'a', u'\\', u't', u'b']),
)
for rich, raw in cases:
self.assertEqual(StructuredJsonHandler.escape(
bytes_to_string(rich)), bytes_to_string(raw)
)
# PLURALS
def test_invalid_plural_format(self):
# Test various cases of messed-up braces
self._test_parse_error_message(
'{ "total_files": {"string": "{ item_count, plural, one {You have {file_count file.} other {You have {file_count} files.} }" }}', # noqa
'Invalid format of pluralized entry with key: "total_files"'
)
self._test_parse_error_message(
'{ "total_files": {"string": "{ item_count, plural, one {You have file_count} file.} other {You have {file_count} files.} }" }}', # noqa
'Invalid format of pluralized entry with key: "total_files"'
)
self._test_parse_error_message(
'{ "total_files": {"string": "{ item_count, plural, one {You have {file_count} file. other {You have {file_count} files.} }" }}', # noqa
'Invalid format of pluralized entry with key: "total_files"'
)
self._test_parse_error_message(
'{ "total_files": {"string": "{ item_count, plural, one {You have {file_count} file}. other {You have file_count} files.} }" }}', # noqa
'Invalid format of pluralized entry with key: "total_files"'
)
def test_invalid_plural_rules(self):
# Only the following strings are allowed as plural rules:
# zero, one, few, many, other
# Anything else, including their TX int equivalents are invalid.
self._test_parse_error_message(
'{ "total_files": {"string": "{ item_count, plural, 1 {file} 5 {{file_count} files} }" }}', # noqa
'Invalid plural rule(s): "1, 5" in pluralized entry with key: total_files' # noqa
)
self._test_parse_error_message(
'{ "total_files": {"string": "{ item_count, plural, once {file} mother {{file_count} files} }" }}', # noqa
'Invalid plural rule(s): "once, mother" in pluralized entry with key: total_files' # noqa
)
self._test_parse_error_message(
'{ "total_files": {"string": "{ item_count, plural, =3 {file} other {{file_count} files} }" }}', # noqa
'Invalid plural rule(s): "=3" in pluralized entry with key: total_files' # noqa
)
def test_irrelevant_whitespace_ignored(self):
# Whitespace between the various parts of the message format structure
# should be ignored.
expected_translations = {0: 'Empty', 5: '{count} files'}
self._test_translations_equal(
'{'
' "k": {"string": "{ cnt, plural, zero {Empty} other {{count} files} }"}' # noqa
'}',
expected_translations
)
self._test_translations_equal(
'{'
' "k": {"string": "{cnt,plural,zero{Empty}other{{count} files} }"}' # noqa
'}',
expected_translations
)
self._test_translations_equal(
'{ "k": {"string": "{ cnt, plural, zero {Empty} other {{count} files} } " }}', # noqa
expected_translations
)
self._test_translations_equal(
' {'
' "k": {"string": "{cnt,plural,zero{Empty}other{{count} files} }"}' # noqa
'} ',
expected_translations
)
self._test_translations_equal(
'{'
' "k": {"string": " {cnt, plural, zero {Empty} other {{count} files} }"}' # noqa
'}',
expected_translations
)
self._test_translations_equal(
'{'
' "k": {"string": "{cnt , plural , zero {Empty} other {{count} files} }"}' # noqa
'}',
expected_translations
)
# Escaped new lines should be allowed
self._test_translations_equal(
'{'
' "k": {"string": "{cnt, plural,\\n zero {Empty} other {{count} files} \\n}"}' # noqa
'}',
expected_translations
)
# Rendering a template with escaped new lines should work. However,
# these characters cannot be inside the pluralized string, because the
# template would be very hard to create in that case (e.g. not allowed
# in: 'zero {Empty} \n other {{count} files}'
source = '{"a": {"string": "{cnt, plural,\\n one {0} other {{count} files} \\n}"}}' # noqa
template, stringset = self.handler.parse(source)
compiled = self.handler.compile(template, stringset)
self.assertEqual(compiled, source)
def test_non_supported_icu_argument(self):
# Non-supported ICU arguments (everything other than `plural`)
# should make a string be treated as non-pluralized
string = '{"k": {"string" :"{ gender_of_host, select, female {{host} appeared} male {{host} appeared} }"}}' # noqa
_, stringset = self.handler.parse(string)
self.assertEqual(
stringset[0].string,
'{ gender_of_host, select, female {{host} appeared} male {{host} appeared} }' # noqa
)
def test_nesting_with_plurals(self):
expected_translations = {0: 'Empty', 5: '{count} files'}
self._test_translations_equal(
'{ "k": { "a": {"string" :"{ cnt, plural, zero {Empty} other {{count} files} }", "b": "c" } }}', # noqa
expected_translations
)
def test_whitespace_in_translations_not_ignored(self):
# Whitespace between the various parts of the message format structure
# should be ignored.
self._test_translations_equal(
'{"k": {"string": "{ cnt, plural, zero { Empty} other {{count} files} }"}}', # noqa
{0: ' Empty', 5: '{count} files'}
)
self._test_translations_equal(
'{"k": {"string": "{ cnt, plural, zero { Empty } other {{count} files } }"}}', # noqa
{0: ' Empty ', 5: '{count} files '}
)
def test_openstring_structure(self):
_, stringset = self.handler.parse(
'{"a": {"string":"%s", "developer_comment": "developer_comment",'
'"character_limit": 150, "context": "context"}}'
% self.random_string
)
self.assertEqual(len(stringset), 1)
self.assertEqual(stringset[0].string, self.random_string)
self.assertEqual(stringset[0].developer_comment, "developer_comment")
self.assertEqual(stringset[0].character_limit, 150)
self.assertEqual(stringset[0].context, "context")
def test_openstring_structure_with_nested_format(self):
_, stringset = self.handler.parse(
'{"a": {"level": {"string":"%s", "developer_comment": "developer_comment",' # noqa
'"character_limit": 150, "context": "context"}}}'
% self.random_string
)
self.assertEqual(len(stringset), 1)
self.assertEqual(stringset[0].string, self.random_string)
self.assertEqual(stringset[0].developer_comment, "developer_comment")
self.assertEqual(stringset[0].developer_comment, "developer_comment")
self.assertEqual(stringset[0].character_limit, 150)
self.assertEqual(stringset[0].context, "context")
def test_openstring_structure_with_default_values(self):
_, stringset = self.handler.parse(
'{"a": {"string":"%s"}}' % self.random_string
)
self.assertEqual(len(stringset), 1)
self.assertEqual(stringset[0].string, self.random_string)
self.assertEqual(stringset[0].developer_comment, "")
self.assertEqual(stringset[0].character_limit, None)
self.assertEqual(stringset[0].context, "")
def test_pluralized_openstring_structure(self):
_, stringset = self.handler.parse(
'{"a": {"string":"%s", "developer_comment": "developer_comment",'
'"character_limit": 150, "context": "context"}}'
% self.pluralized_string
)
self.assertEqual(len(stringset), 1)
self.assertEqual(stringset[0].developer_comment, "developer_comment")
self.assertEqual(stringset[0].character_limit, 150)
self.assertEqual(stringset[0].context, "context")
def test_pluralized_openstring_structure_with_nested_format(self):
_, stringset = self.handler.parse(
'{"a": {"level": {"string":"%s", "developer_comment": "developer_comment",' # noqa
'"character_limit": 150, "context": "context"}}}'
% self.pluralized_string
)
self.assertEqual(len(stringset), 1)
self.assertEqual(stringset[0].developer_comment, "developer_comment")
self.assertEqual(stringset[0].developer_comment, "developer_comment")
self.assertEqual(stringset[0].character_limit, 150)
self.assertEqual(stringset[0].context, "context")
def test_pluralized_openstring_structure_with_default_values(self):
_, stringset = self.handler.parse(
'{"a": {"string":"%s"}}' % self.pluralized_string
)
self.assertEqual(len(stringset), 1)
self.assertEqual(stringset[0].developer_comment, "")
self.assertEqual(stringset[0].character_limit, None)
self.assertEqual(stringset[0].context, "")
def _test_parse_error_message(self, source, msg_substr):
error_raised = False
try:
self.handler.parse(source)
except ParseError as e:
self.assertIn(msg_substr, six.text_type(e))
error_raised = True
self.assertTrue(error_raised)
def _test_translations_equal(self, source, translations_by_rule):
template, stringset = self.handler.parse(source)
for rule_int in six.iterkeys(translations_by_rule):
self.assertEqual(
translations_by_rule[rule_int],
stringset[0].string[rule_int]
)
| gpl-3.0 | -984,731,356,585,395,100 | 43.693878 | 149 | 0.53445 | false |
DARKPOP/external_chromium_org | ppapi/generators/idl_thunk.py | 44 | 20936 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Generator for C++ style thunks """
import glob
import os
import re
import sys
from idl_log import ErrOut, InfoOut, WarnOut
from idl_node import IDLAttribute, IDLNode
from idl_ast import IDLAst
from idl_option import GetOption, Option, ParseOptions
from idl_outfile import IDLOutFile
from idl_parser import ParseFiles
from idl_c_proto import CGen, GetNodeComments, CommentLines, Comment
from idl_generator import Generator, GeneratorByFile
Option('thunkroot', 'Base directory of output',
default=os.path.join('..', 'thunk'))
class TGenError(Exception):
def __init__(self, msg):
self.value = msg
def __str__(self):
return repr(self.value)
class ThunkBodyMetadata(object):
"""Metadata about thunk body. Used for selecting which headers to emit."""
def __init__(self):
self._apis = set()
self._builtin_includes = set()
self._includes = set()
def AddApi(self, api):
self._apis.add(api)
def Apis(self):
return self._apis
def AddInclude(self, include):
self._includes.add(include)
def Includes(self):
return self._includes
def AddBuiltinInclude(self, include):
self._builtin_includes.add(include)
def BuiltinIncludes(self):
return self._builtin_includes
def _GetBaseFileName(filenode):
"""Returns the base name for output files, given the filenode.
Examples:
'dev/ppb_find_dev.h' -> 'ppb_find_dev'
'trusted/ppb_buffer_trusted.h' -> 'ppb_buffer_trusted'
"""
path, name = os.path.split(filenode.GetProperty('NAME'))
name = os.path.splitext(name)[0]
return name
def _GetHeaderFileName(filenode):
"""Returns the name for the header for this file."""
path, name = os.path.split(filenode.GetProperty('NAME'))
name = os.path.splitext(name)[0]
if path:
header = "ppapi/c/%s/%s.h" % (path, name)
else:
header = "ppapi/c/%s.h" % name
return header
def _GetThunkFileName(filenode, relpath):
"""Returns the thunk file name."""
path = os.path.split(filenode.GetProperty('NAME'))[0]
name = _GetBaseFileName(filenode)
# We don't reattach the path for thunk.
if relpath: name = os.path.join(relpath, name)
name = '%s%s' % (name, '_thunk.cc')
return name
def _StripFileName(filenode):
"""Strips path and dev, trusted, and private suffixes from the file name."""
api_basename = _GetBaseFileName(filenode)
if api_basename.endswith('_dev'):
api_basename = api_basename[:-len('_dev')]
if api_basename.endswith('_trusted'):
api_basename = api_basename[:-len('_trusted')]
if api_basename.endswith('_private'):
api_basename = api_basename[:-len('_private')]
return api_basename
def _StripApiName(api_name):
"""Strips Dev, Private, and Trusted suffixes from the API name."""
if api_name.endswith('Trusted'):
api_name = api_name[:-len('Trusted')]
if api_name.endswith('_Dev'):
api_name = api_name[:-len('_Dev')]
if api_name.endswith('_Private'):
api_name = api_name[:-len('_Private')]
return api_name
def _MakeEnterLine(filenode, interface, member, arg, handle_errors, callback,
meta):
"""Returns an EnterInstance/EnterResource string for a function."""
api_name = _StripApiName(interface.GetName()) + '_API'
if member.GetProperty('api'): # Override API name.
manually_provided_api = True
# TODO(teravest): Automatically guess the API header file.
api_name = member.GetProperty('api')
else:
manually_provided_api = False
if arg[0] == 'PP_Instance':
if callback is None:
arg_string = arg[1]
else:
arg_string = '%s, %s' % (arg[1], callback)
if interface.GetProperty('singleton') or member.GetProperty('singleton'):
if not manually_provided_api:
meta.AddApi('ppapi/thunk/%s_api.h' % _StripFileName(filenode))
return 'EnterInstanceAPI<%s> enter(%s);' % (api_name, arg_string)
else:
return 'EnterInstance enter(%s);' % arg_string
elif arg[0] == 'PP_Resource':
enter_type = 'EnterResource<%s>' % api_name
if not manually_provided_api:
meta.AddApi('ppapi/thunk/%s_api.h' % _StripFileName(filenode))
if callback is None:
return '%s enter(%s, %s);' % (enter_type, arg[1],
str(handle_errors).lower())
else:
return '%s enter(%s, %s, %s);' % (enter_type, arg[1],
callback,
str(handle_errors).lower())
else:
raise TGenError("Unknown type for _MakeEnterLine: %s" % arg[0])
def _GetShortName(interface, filter_suffixes):
"""Return a shorter interface name that matches Is* and Create* functions."""
parts = interface.GetName().split('_')[1:]
tail = parts[len(parts) - 1]
if tail in filter_suffixes:
parts = parts[:-1]
return ''.join(parts)
def _IsTypeCheck(interface, node, args):
"""Returns true if node represents a type-checking function."""
if len(args) == 0 or args[0][0] != 'PP_Resource':
return False
return node.GetName() == 'Is%s' % _GetShortName(interface, ['Dev', 'Private'])
def _GetCreateFuncName(interface):
"""Returns the creation function name for an interface."""
return 'Create%s' % _GetShortName(interface, ['Dev'])
def _GetDefaultFailureValue(t):
"""Returns the default failure value for a given type.
Returns None if no default failure value exists for the type.
"""
values = {
'PP_Bool': 'PP_FALSE',
'PP_Resource': '0',
'struct PP_Var': 'PP_MakeUndefined()',
'float': '0.0f',
'int32_t': 'enter.retval()',
'uint16_t': '0',
'uint32_t': '0',
'uint64_t': '0',
'void*': 'NULL'
}
if t in values:
return values[t]
return None
def _MakeCreateMemberBody(interface, member, args):
"""Returns the body of a Create() function.
Args:
interface - IDLNode for the interface
member - IDLNode for member function
args - List of arguments for the Create() function
"""
if args[0][0] == 'PP_Resource':
body = 'Resource* object =\n'
body += ' PpapiGlobals::Get()->GetResourceTracker()->'
body += 'GetResource(%s);\n' % args[0][1]
body += 'if (!object)\n'
body += ' return 0;\n'
body += 'EnterResourceCreation enter(object->pp_instance());\n'
elif args[0][0] == 'PP_Instance':
body = 'EnterResourceCreation enter(%s);\n' % args[0][1]
else:
raise TGenError('Unknown arg type for Create(): %s' % args[0][0])
body += 'if (enter.failed())\n'
body += ' return 0;\n'
arg_list = ', '.join([a[1] for a in args])
if member.GetProperty('create_func'):
create_func = member.GetProperty('create_func')
else:
create_func = _GetCreateFuncName(interface)
body += 'return enter.functions()->%s(%s);' % (create_func,
arg_list)
return body
def _GetOutputParams(member, release):
"""Returns output parameters (and their types) for a member function.
Args:
member - IDLNode for the member function
release - Release to get output parameters for
Returns:
A list of name strings for all output parameters of the member
function.
"""
out_params = []
callnode = member.GetOneOf('Callspec')
if callnode:
cgen = CGen()
for param in callnode.GetListOf('Param'):
mode = cgen.GetParamMode(param)
if mode == 'out':
# We use the 'store' mode when getting the parameter type, since we
# need to call sizeof() for memset().
_, pname, _, _ = cgen.GetComponents(param, release, 'store')
out_params.append(pname)
return out_params
def _MakeNormalMemberBody(filenode, release, node, member, rtype, args,
include_version, meta):
"""Returns the body of a typical function.
Args:
filenode - IDLNode for the file
release - release to generate body for
node - IDLNode for the interface
member - IDLNode for the member function
rtype - Return type for the member function
args - List of 4-tuple arguments for the member function
include_version - whether to include the version in the invocation
meta - ThunkBodyMetadata for header hints
"""
if len(args) == 0:
# Calling into the "Shared" code for the interface seems like a reasonable
# heuristic when we don't have any arguments; some thunk code follows this
# convention today.
meta.AddApi('ppapi/shared_impl/%s_shared.h' % _StripFileName(filenode))
return 'return %s::%s();' % (_StripApiName(node.GetName()) + '_Shared',
member.GetName())
is_callback_func = args[len(args) - 1][0] == 'struct PP_CompletionCallback'
if is_callback_func:
call_args = args[:-1] + [('', 'enter.callback()', '', '')]
meta.AddInclude('ppapi/c/pp_completion_callback.h')
else:
call_args = args
if args[0][0] == 'PP_Instance':
call_arglist = ', '.join(a[1] for a in call_args)
function_container = 'functions'
elif args[0][0] == 'PP_Resource':
call_arglist = ', '.join(a[1] for a in call_args[1:])
function_container = 'object'
else:
# Calling into the "Shared" code for the interface seems like a reasonable
# heuristic when the first argument isn't a PP_Instance or a PP_Resource;
# some thunk code follows this convention today.
meta.AddApi('ppapi/shared_impl/%s_shared.h' % _StripFileName(filenode))
return 'return %s::%s(%s);' % (_StripApiName(node.GetName()) + '_Shared',
member.GetName(),
', '.join(a[1] for a in args))
function_name = member.GetName()
if include_version:
version = node.GetVersion(release).replace('.', '_')
function_name += version
invocation = 'enter.%s()->%s(%s)' % (function_container,
function_name,
call_arglist)
handle_errors = not (member.GetProperty('report_errors') == 'False')
out_params = _GetOutputParams(member, release)
if is_callback_func:
body = '%s\n' % _MakeEnterLine(filenode, node, member, args[0],
handle_errors, args[len(args) - 1][1], meta)
failure_value = member.GetProperty('on_failure')
if failure_value is None:
failure_value = 'enter.retval()'
failure_return = 'return %s;' % failure_value
success_return = 'return enter.SetResult(%s);' % invocation
elif rtype == 'void':
body = '%s\n' % _MakeEnterLine(filenode, node, member, args[0],
handle_errors, None, meta)
failure_return = 'return;'
success_return = '%s;' % invocation # We don't return anything for void.
else:
body = '%s\n' % _MakeEnterLine(filenode, node, member, args[0],
handle_errors, None, meta)
failure_value = member.GetProperty('on_failure')
if failure_value is None:
failure_value = _GetDefaultFailureValue(rtype)
if failure_value is None:
raise TGenError('There is no default value for rtype %s. '
'Maybe you should provide an on_failure attribute '
'in the IDL file.' % rtype)
failure_return = 'return %s;' % failure_value
success_return = 'return %s;' % invocation
if member.GetProperty('always_set_output_parameters'):
body += 'if (enter.failed()) {\n'
for param in out_params:
body += ' memset(%s, 0, sizeof(*%s));\n' % (param, param)
body += ' %s\n' % failure_return
body += '}\n'
body += '%s' % success_return
meta.AddBuiltinInclude('string.h')
else:
body += 'if (enter.failed())\n'
body += ' %s\n' % failure_return
body += '%s' % success_return
return body
def DefineMember(filenode, node, member, release, include_version, meta):
"""Returns a definition for a member function of an interface.
Args:
filenode - IDLNode for the file
node - IDLNode for the interface
member - IDLNode for the member function
release - release to generate
include_version - include the version in emitted function name.
meta - ThunkMetadata for header hints
Returns:
A string with the member definition.
"""
cgen = CGen()
rtype, name, arrays, args = cgen.GetComponents(member, release, 'return')
log_body = '\"%s::%s()\";' % (node.GetName(), member.GetName())
if len(log_body) > 69: # Prevent lines over 80 characters.
body = 'VLOG(4) <<\n'
body += ' %s\n' % log_body
else:
body = 'VLOG(4) << %s\n' % log_body
if _IsTypeCheck(node, member, args):
body += '%s\n' % _MakeEnterLine(filenode, node, member, args[0], False,
None, meta)
body += 'return PP_FromBool(enter.succeeded());'
elif member.GetName() == 'Create' or member.GetName() == 'CreateTrusted':
body += _MakeCreateMemberBody(node, member, args)
else:
body += _MakeNormalMemberBody(filenode, release, node, member, rtype, args,
include_version, meta)
signature = cgen.GetSignature(member, release, 'return', func_as_ptr=False,
include_version=include_version)
return '%s\n%s\n}' % (cgen.Indent('%s {' % signature, tabs=0),
cgen.Indent(body, tabs=1))
def _IsNewestMember(member, members, releases):
"""Returns true if member is the newest node with its name in members.
Currently, every node in the AST only has one version. This means that we
will have two sibling nodes with the same name to represent different
versions.
See http://crbug.com/157017 .
Special handling is required for nodes which share their name with others,
but aren't the newest version in the IDL.
Args:
member - The member which is checked if it's newest
members - The list of members to inspect
releases - The set of releases to check for versions in.
"""
build_list = member.GetUniqueReleases(releases)
release = build_list[0] # Pick the oldest release.
same_name_siblings = filter(
lambda n: str(n) == str(member) and n != member, members)
for s in same_name_siblings:
sibling_build_list = s.GetUniqueReleases(releases)
sibling_release = sibling_build_list[0]
if sibling_release > release:
return False
return True
class TGen(GeneratorByFile):
def __init__(self):
Generator.__init__(self, 'Thunk', 'tgen', 'Generate the C++ thunk.')
def GenerateFile(self, filenode, releases, options):
savename = _GetThunkFileName(filenode, GetOption('thunkroot'))
my_min, my_max = filenode.GetMinMax(releases)
if my_min > releases[-1] or my_max < releases[0]:
if os.path.isfile(savename):
print "Removing stale %s for this range." % filenode.GetName()
os.remove(os.path.realpath(savename))
return False
do_generate = filenode.GetProperty('generate_thunk')
if not do_generate:
return False
thunk_out = IDLOutFile(savename)
body, meta = self.GenerateBody(thunk_out, filenode, releases, options)
# TODO(teravest): How do we handle repeated values?
if filenode.GetProperty('thunk_include'):
meta.AddInclude(filenode.GetProperty('thunk_include'))
self.WriteHead(thunk_out, filenode, releases, options, meta)
thunk_out.Write('\n\n'.join(body))
self.WriteTail(thunk_out, filenode, releases, options)
return thunk_out.Close()
def WriteHead(self, out, filenode, releases, options, meta):
__pychecker__ = 'unusednames=options'
cgen = CGen()
cright_node = filenode.GetChildren()[0]
assert(cright_node.IsA('Copyright'))
out.Write('%s\n' % cgen.Copyright(cright_node, cpp_style=True))
# Wrap the From ... modified ... comment if it would be >80 characters.
from_text = 'From %s' % (
filenode.GetProperty('NAME').replace(os.sep,'/'))
modified_text = 'modified %s.' % (
filenode.GetProperty('DATETIME'))
if len(from_text) + len(modified_text) < 74:
out.Write('// %s %s\n\n' % (from_text, modified_text))
else:
out.Write('// %s,\n// %s\n\n' % (from_text, modified_text))
if meta.BuiltinIncludes():
for include in sorted(meta.BuiltinIncludes()):
out.Write('#include <%s>\n' % include)
out.Write('\n')
# TODO(teravest): Don't emit includes we don't need.
includes = ['ppapi/c/pp_errors.h',
'ppapi/shared_impl/tracked_callback.h',
'ppapi/thunk/enter.h',
'ppapi/thunk/ppapi_thunk_export.h']
includes.append(_GetHeaderFileName(filenode))
for api in meta.Apis():
includes.append('%s' % api.lower())
for i in meta.Includes():
includes.append(i)
for include in sorted(includes):
out.Write('#include "%s"\n' % include)
out.Write('\n')
out.Write('namespace ppapi {\n')
out.Write('namespace thunk {\n')
out.Write('\n')
out.Write('namespace {\n')
out.Write('\n')
def GenerateBody(self, out, filenode, releases, options):
"""Generates a member function lines to be written and metadata.
Returns a tuple of (body, meta) where:
body - a list of lines with member function bodies
meta - a ThunkMetadata instance for hinting which headers are needed.
"""
__pychecker__ = 'unusednames=options'
out_members = []
meta = ThunkBodyMetadata()
for node in filenode.GetListOf('Interface'):
# Skip if this node is not in this release
if not node.InReleases(releases):
print "Skipping %s" % node
continue
# Generate Member functions
if node.IsA('Interface'):
members = node.GetListOf('Member')
for child in members:
build_list = child.GetUniqueReleases(releases)
# We have to filter out releases this node isn't in.
build_list = filter(lambda r: child.InReleases([r]), build_list)
if len(build_list) == 0:
continue
release = build_list[-1]
include_version = not _IsNewestMember(child, members, releases)
member = DefineMember(filenode, node, child, release, include_version,
meta)
if not member:
continue
out_members.append(member)
return (out_members, meta)
def WriteTail(self, out, filenode, releases, options):
__pychecker__ = 'unusednames=options'
cgen = CGen()
version_list = []
out.Write('\n\n')
for node in filenode.GetListOf('Interface'):
build_list = node.GetUniqueReleases(releases)
for build in build_list:
version = node.GetVersion(build).replace('.', '_')
thunk_name = 'g_' + node.GetName().lower() + '_thunk_' + \
version
thunk_type = '_'.join((node.GetName(), version))
version_list.append((thunk_type, thunk_name))
declare_line = 'const %s %s = {' % (thunk_type, thunk_name)
if len(declare_line) > 80:
declare_line = 'const %s\n %s = {' % (thunk_type, thunk_name)
out.Write('%s\n' % declare_line)
generated_functions = []
members = node.GetListOf('Member')
for child in members:
rtype, name, arrays, args = cgen.GetComponents(
child, build, 'return')
if child.InReleases([build]):
if not _IsNewestMember(child, members, releases):
version = child.GetVersion(
child.first_release[build]).replace('.', '_')
name += '_' + version
generated_functions.append(name)
out.Write(',\n'.join([' &%s' % f for f in generated_functions]))
out.Write('\n};\n\n')
out.Write('} // namespace\n')
out.Write('\n')
for thunk_type, thunk_name in version_list:
thunk_decl = ('PPAPI_THUNK_EXPORT const %s* Get%s_Thunk() {\n' %
(thunk_type, thunk_type))
if len(thunk_decl) > 80:
thunk_decl = ('PPAPI_THUNK_EXPORT const %s*\n Get%s_Thunk() {\n' %
(thunk_type, thunk_type))
out.Write(thunk_decl)
out.Write(' return &%s;\n' % thunk_name)
out.Write('}\n')
out.Write('\n')
out.Write('} // namespace thunk\n')
out.Write('} // namespace ppapi\n')
tgen = TGen()
def Main(args):
# Default invocation will verify the golden files are unchanged.
failed = 0
if not args:
args = ['--wnone', '--diff', '--test', '--thunkroot=.']
ParseOptions(args)
idldir = os.path.split(sys.argv[0])[0]
idldir = os.path.join(idldir, 'test_thunk', '*.idl')
filenames = glob.glob(idldir)
ast = ParseFiles(filenames)
if tgen.GenerateRange(ast, ['M13', 'M14', 'M15'], {}):
print "Golden file for M13-M15 failed."
failed = 1
else:
print "Golden file for M13-M15 passed."
return failed
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause | -2,837,584,663,561,505,300 | 34.424704 | 80 | 0.62094 | false |
jaor/bigmler | bigmler/options/execute.py | 2 | 7974 | # -*- coding: utf-8 -*-
#
# Copyright 2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Options for BigMLer execute option
"""
def get_execute_options(defaults=None):
"""Execute-related options
"""
if defaults is None:
defaults = {}
options = {
# A BigML script is provided
'--script': {
"action": 'store',
"dest": 'script',
"default": defaults.get('script', None),
"help": "BigML script Id."},
# A BigML json file containing a script structure
'--script-file': {
'action': 'store',
'dest': 'script_file',
'default': defaults.get('script_file', None),
'help': "BigML script JSON structure file."},
# The path to a file containing script ids.
'--scripts': {
'action': 'store',
'dest': 'scripts',
'default': defaults.get('scripts', None),
'help': ("Path to a file containing script/ids. Just"
" one script per line"
" (e.g., script/50a20697035d0706da0004a4).")},
# A BigML library is provided
'--library': {
"action": 'store',
"dest": 'library',
"default": defaults.get('library', None),
"help": "BigML library Id."},
# A BigML json file containing a library structure
'--library-file': {
'action': 'store',
'dest': 'library_file',
'default': defaults.get('library_file', None),
'help': "BigML library JSON structure file."},
# The path to a file containing library ids.
'--libraries': {
'action': 'store',
'dest': 'libraries',
'default': defaults.get('libraries', None),
'help': ("Path to a file containing libraries/ids. Just"
" one library per line"
" (e.g., library/50a20697035d0706da0004a4).")},
# A BigML execution is provided
'--execution': {
"action": 'store',
"dest": 'execution',
"default": defaults.get('execution', None),
"help": "BigML execution Id."},
# A BigML json file containing a execution structure
'--execution-file': {
'action': 'store',
'dest': 'execution_file',
'default': defaults.get('execution_file', None),
'help': "BigML execution JSON structure file."},
# The path to a file containing execution ids.
'--executions': {
'action': 'store',
'dest': 'executions',
'default': defaults.get('executions', None),
'help': ("Path to a file containing execution/ids. Just"
" one execution per line"
" (e.g., execution/50a20697035d0706da0004a4).")},
# Path to the file that contains Whizzml source code
'--code-file': {
"action": 'store',
'dest': 'code_file',
'default': defaults.get('code_file', None),
'help': ("Path to the file that contains Whizzml source code.")},
# Path to the file that contains Whizzml source code
'--code': {
"action": 'store',
'dest': 'code',
'default': defaults.get('code', None),
'help': ("String of Whizzml source code.")},
# Name of the file to output the code.
"--output": {
'action': 'store',
'dest': 'output',
'default': defaults.get('output', None),
'help': "Path to the file to output the execution results."},
# Comma-separated list of libraries IDs to be included as imports
# in scripts or other libraries.
"--imports": {
'action': 'store',
'dest': 'imports',
'default': defaults.get('imports', None),
'help': ("Comma-separated list of libraries IDs to be"
" included as imports in scripts or other libraries.")},
# File that contains the paths to the import code files, one per line.
"--embedded-imports": {
'action': 'store',
'dest': 'embedded_imports',
'default': defaults.get('embedded_imports', None),
'help': ("File that contains the paths to the import code files,"
" one per line.")},
# Path to the JSON file with the values of the execution parms.
"--inputs": {
'action': 'store',
'dest': 'inputs',
'default': defaults.get('inputs', None),
'help': ("Path to the JSON file with the description of "
"the execution inputs")},
# Path to the JSON file with the description of the execution parms for
# a list of scripts
"--input-maps": {
'action': 'store',
'dest': 'input_maps',
'default': defaults.get('input_maps', None),
'help': ("Path to the JSON file with the description of "
"the execution inputs for a list of scripts")},
# Path to the JSON file with the description of the input parms.
"--declare-inputs": {
'action': 'store',
'dest': 'declare_inputs',
'default': defaults.get('declare_inputs', None),
'help': ("Path to the JSON file with the description of "
"the input parameters")},
# Path to the JSON file with the names of the output parameters.
"--outputs": {
'action': 'store',
'dest': 'outputs',
'default': defaults.get('outputs', None),
'help': ("Path to the JSON file with the names of the output"
" parameters")},
# Path to the JSON file with the description of the script output
# parameters.
"--declare-outputs": {
'action': 'store',
'dest': 'declare_outputs',
'default': defaults.get('declare_outputs', None),
'help': ("Path to the JSON file with the description of "
"the script outputs")},
# Path to the JSON file with the creation defaults for
# the script parameters.
"--creation-defaults": {
'action': 'store',
'dest': 'creation_defaults',
'default': defaults.get('creation_defaults', None),
'help': ("Path to the JSON file with the default "
"configurations for created resources.")},
# Don't execute the script. Compile only.
'--no-execute': {
"action": 'store_true',
"dest": 'no_execute',
"default": defaults.get('no_execute', False),
"help": "Don't execute the script. Compile only.'"},
# Execute the script. (opposed to --no-execute).
'--execute': {
"action": 'store_false',
"dest": 'no_execute',
"default": defaults.get('no_execute', False),
"help": "Execute the script. (Opposed to --no-execute)."},
# Create a library instead of a script.
'--to-library': {
"action": 'store_true',
"dest": 'to_library',
"default": defaults.get('to_library', False),
"help": "Compile the code as a library."}
}
return options
| apache-2.0 | 8,454,337,657,930,820,000 | 36.791469 | 79 | 0.528969 | false |
coteyr/home-assistant | homeassistant/components/switch/wink.py | 1 | 1109 | """
homeassistant.components.switch.wink
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for Wink switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.wink/
"""
import logging
from homeassistant.components.wink import WinkToggleDevice
from homeassistant.const import CONF_ACCESS_TOKEN
REQUIREMENTS = ['python-wink==0.6.2']
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the Wink platform. """
import pywink
if discovery_info is None:
token = config.get(CONF_ACCESS_TOKEN)
if token is None:
logging.getLogger(__name__).error(
"Missing wink access_token. "
"Get one at https://winkbearertoken.appspot.com/")
return
pywink.set_bearer_token(token)
add_devices(WinkToggleDevice(switch) for switch in pywink.get_switches())
add_devices(WinkToggleDevice(switch) for switch in
pywink.get_powerstrip_outlets())
add_devices(WinkToggleDevice(switch) for switch in pywink.get_sirens())
| mit | 7,189,512,233,521,884,000 | 30.685714 | 77 | 0.669973 | false |
alon/polinax | libs/external_libs/markdown-1.7/markdown.py | 21 | 61612 | #!/usr/bin/env python
version = "1.7"
version_info = (1,7,0,"rc-2")
__revision__ = "$Rev: 72 $"
"""
Python-Markdown
===============
Converts Markdown to HTML. Basic usage as a module:
import markdown
md = Markdown()
html = md.convert(your_text_string)
See http://www.freewisdom.org/projects/python-markdown/ for more
information and instructions on how to extend the functionality of the
script. (You might want to read that before you try modifying this
file.)
Started by [Manfred Stienstra](http://www.dwerg.net/). Continued and
maintained by [Yuri Takhteyev](http://www.freewisdom.org) and [Waylan
Limberg](http://achinghead.com/).
Contact: yuri [at] freewisdom.org
waylan [at] gmail.com
License: GPL 2 (http://www.gnu.org/copyleft/gpl.html) or BSD
"""
import re, sys, codecs
from logging import getLogger, StreamHandler, Formatter, \
DEBUG, INFO, WARN, ERROR, CRITICAL
MESSAGE_THRESHOLD = CRITICAL
# Configure debug message logger (the hard way - to support python 2.3)
logger = getLogger('MARKDOWN')
logger.setLevel(DEBUG) # This is restricted by handlers later
console_hndlr = StreamHandler()
formatter = Formatter('%(name)s-%(levelname)s: "%(message)s"')
console_hndlr.setFormatter(formatter)
console_hndlr.setLevel(MESSAGE_THRESHOLD)
logger.addHandler(console_hndlr)
def message(level, text):
''' A wrapper method for logging debug messages. '''
logger.log(level, text)
# --------------- CONSTANTS YOU MIGHT WANT TO MODIFY -----------------
TAB_LENGTH = 4 # expand tabs to this many spaces
ENABLE_ATTRIBUTES = True # @id = xyz -> <... id="xyz">
SMART_EMPHASIS = 1 # this_or_that does not become this<i>or</i>that
HTML_REMOVED_TEXT = "[HTML_REMOVED]" # text used instead of HTML in safe mode
RTL_BIDI_RANGES = ( (u'\u0590', u'\u07FF'),
# from Hebrew to Nko (includes Arabic, Syriac and Thaana)
(u'\u2D30', u'\u2D7F'),
# Tifinagh
)
# Unicode Reference Table:
# 0590-05FF - Hebrew
# 0600-06FF - Arabic
# 0700-074F - Syriac
# 0750-077F - Arabic Supplement
# 0780-07BF - Thaana
# 07C0-07FF - Nko
BOMS = { 'utf-8': (codecs.BOM_UTF8, ),
'utf-16': (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE),
#'utf-32': (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)
}
def removeBOM(text, encoding):
convert = isinstance(text, unicode)
for bom in BOMS[encoding]:
bom = convert and bom.decode(encoding) or bom
if text.startswith(bom):
return text.lstrip(bom)
return text
# The following constant specifies the name used in the usage
# statement displayed for python versions lower than 2.3. (With
# python2.3 and higher the usage statement is generated by optparse
# and uses the actual name of the executable called.)
EXECUTABLE_NAME_FOR_USAGE = "python markdown.py"
# --------------- CONSTANTS YOU _SHOULD NOT_ HAVE TO CHANGE ----------
# a template for html placeholders
HTML_PLACEHOLDER_PREFIX = "qaodmasdkwaspemas"
HTML_PLACEHOLDER = HTML_PLACEHOLDER_PREFIX + "%dajkqlsmdqpakldnzsdfls"
BLOCK_LEVEL_ELEMENTS = ['p', 'div', 'blockquote', 'pre', 'table',
'dl', 'ol', 'ul', 'script', 'noscript',
'form', 'fieldset', 'iframe', 'math', 'ins',
'del', 'hr', 'hr/', 'style']
def isBlockLevel (tag):
return ( (tag in BLOCK_LEVEL_ELEMENTS) or
(tag[0] == 'h' and tag[1] in "0123456789") )
"""
======================================================================
========================== NANODOM ===================================
======================================================================
The three classes below implement some of the most basic DOM
methods. I use this instead of minidom because I need a simpler
functionality and do not want to require additional libraries.
Importantly, NanoDom does not do normalization, which is what we
want. It also adds extra white space when converting DOM to string
"""
ENTITY_NORMALIZATION_EXPRESSIONS = [ (re.compile("&"), "&"),
(re.compile("<"), "<"),
(re.compile(">"), ">")]
ENTITY_NORMALIZATION_EXPRESSIONS_SOFT = [ (re.compile("&(?!\#)"), "&"),
(re.compile("<"), "<"),
(re.compile(">"), ">"),
(re.compile("\""), """)]
def getBidiType(text):
if not text: return None
ch = text[0]
if not isinstance(ch, unicode) or not ch.isalpha():
return None
else:
for min, max in RTL_BIDI_RANGES:
if ( ch >= min and ch <= max ):
return "rtl"
else:
return "ltr"
class Document:
def __init__ (self):
self.bidi = "ltr"
def appendChild(self, child):
self.documentElement = child
child.isDocumentElement = True
child.parent = self
self.entities = {}
def setBidi(self, bidi):
if bidi:
self.bidi = bidi
def createElement(self, tag, textNode=None):
el = Element(tag)
el.doc = self
if textNode:
el.appendChild(self.createTextNode(textNode))
return el
def createTextNode(self, text):
node = TextNode(text)
node.doc = self
return node
def createEntityReference(self, entity):
if entity not in self.entities:
self.entities[entity] = EntityReference(entity)
return self.entities[entity]
def createCDATA(self, text):
node = CDATA(text)
node.doc = self
return node
def toxml (self):
return self.documentElement.toxml()
def normalizeEntities(self, text, avoidDoubleNormalizing=False):
if avoidDoubleNormalizing:
regexps = ENTITY_NORMALIZATION_EXPRESSIONS_SOFT
else:
regexps = ENTITY_NORMALIZATION_EXPRESSIONS
for regexp, substitution in regexps:
text = regexp.sub(substitution, text)
return text
def find(self, test):
return self.documentElement.find(test)
def unlink(self):
self.documentElement.unlink()
self.documentElement = None
class CDATA:
type = "cdata"
def __init__ (self, text):
self.text = text
def handleAttributes(self):
pass
def toxml (self):
return "<![CDATA[" + self.text + "]]>"
class Element:
type = "element"
def __init__ (self, tag):
self.nodeName = tag
self.attributes = []
self.attribute_values = {}
self.childNodes = []
self.bidi = None
self.isDocumentElement = False
def setBidi(self, bidi):
if bidi:
orig_bidi = self.bidi
if not self.bidi or self.isDocumentElement:
# Once the bidi is set don't change it (except for doc element)
self.bidi = bidi
self.parent.setBidi(bidi)
def unlink(self):
for child in self.childNodes:
if child.type == "element":
child.unlink()
self.childNodes = None
def setAttribute(self, attr, value):
if not attr in self.attributes:
self.attributes.append(attr)
self.attribute_values[attr] = value
def insertChild(self, position, child):
self.childNodes.insert(position, child)
child.parent = self
def removeChild(self, child):
self.childNodes.remove(child)
def replaceChild(self, oldChild, newChild):
position = self.childNodes.index(oldChild)
self.removeChild(oldChild)
self.insertChild(position, newChild)
def appendChild(self, child):
self.childNodes.append(child)
child.parent = self
def handleAttributes(self):
pass
def find(self, test, depth=0):
""" Returns a list of descendants that pass the test function """
matched_nodes = []
for child in self.childNodes:
if test(child):
matched_nodes.append(child)
if child.type == "element":
matched_nodes += child.find(test, depth+1)
return matched_nodes
def toxml(self):
if ENABLE_ATTRIBUTES:
for child in self.childNodes:
child.handleAttributes()
buffer = ""
if self.nodeName in ['h1', 'h2', 'h3', 'h4']:
buffer += "\n"
elif self.nodeName in ['li']:
buffer += "\n "
# Process children FIRST, then do the attributes
childBuffer = ""
if self.childNodes or self.nodeName in ['blockquote']:
childBuffer += ">"
for child in self.childNodes:
childBuffer += child.toxml()
if self.nodeName == 'p':
childBuffer += "\n"
elif self.nodeName == 'li':
childBuffer += "\n "
childBuffer += "</%s>" % self.nodeName
else:
childBuffer += "/>"
buffer += "<" + self.nodeName
if self.nodeName in ['p', 'li', 'ul', 'ol',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
if not self.attribute_values.has_key("dir"):
if self.bidi:
bidi = self.bidi
else:
bidi = self.doc.bidi
if bidi=="rtl":
self.setAttribute("dir", "rtl")
for attr in self.attributes:
value = self.attribute_values[attr]
value = self.doc.normalizeEntities(value,
avoidDoubleNormalizing=True)
buffer += ' %s="%s"' % (attr, value)
# Now let's actually append the children
buffer += childBuffer
if self.nodeName in ['p', 'br ', 'li', 'ul', 'ol',
'h1', 'h2', 'h3', 'h4'] :
buffer += "\n"
return buffer
class TextNode:
type = "text"
attrRegExp = re.compile(r'\{@([^\}]*)=([^\}]*)}') # {@id=123}
def __init__ (self, text):
self.value = text
def attributeCallback(self, match):
self.parent.setAttribute(match.group(1), match.group(2))
def handleAttributes(self):
self.value = self.attrRegExp.sub(self.attributeCallback, self.value)
def toxml(self):
text = self.value
self.parent.setBidi(getBidiType(text))
if not text.startswith(HTML_PLACEHOLDER_PREFIX):
if self.parent.nodeName == "p":
text = text.replace("\n", "\n ")
elif (self.parent.nodeName == "li"
and self.parent.childNodes[0]==self):
text = "\n " + text.replace("\n", "\n ")
text = self.doc.normalizeEntities(text)
return text
class EntityReference:
type = "entity_ref"
def __init__(self, entity):
self.entity = entity
def handleAttributes(self):
pass
def toxml(self):
return "&" + self.entity + ";"
"""
======================================================================
========================== PRE-PROCESSORS ============================
======================================================================
Preprocessors munge source text before we start doing anything too
complicated.
There are two types of preprocessors: TextPreprocessor and Preprocessor.
"""
class TextPreprocessor:
'''
TextPreprocessors are run before the text is broken into lines.
Each TextPreprocessor implements a "run" method that takes a pointer to a
text string of the document, modifies it as necessary and returns
either the same pointer or a pointer to a new string.
TextPreprocessors must extend markdown.TextPreprocessor.
'''
def run(self, text):
pass
class Preprocessor:
'''
Preprocessors are run after the text is broken into lines.
Each preprocessor implements a "run" method that takes a pointer to a
list of lines of the document, modifies it as necessary and returns
either the same pointer or a pointer to a new list.
Preprocessors must extend markdown.Preprocessor.
'''
def run(self, lines):
pass
class HtmlBlockPreprocessor(TextPreprocessor):
"""Removes html blocks from the source text and stores it."""
def _get_left_tag(self, block):
return block[1:].replace(">", " ", 1).split()[0].lower()
def _get_right_tag(self, left_tag, block):
return block.rstrip()[-len(left_tag)-2:-1].lower()
def _equal_tags(self, left_tag, right_tag):
if left_tag == 'div' or left_tag[0] in ['?', '@', '%']: # handle PHP, etc.
return True
if ("/" + left_tag) == right_tag:
return True
if (right_tag == "--" and left_tag == "--"):
return True
elif left_tag == right_tag[1:] \
and right_tag[0] != "<":
return True
else:
return False
def _is_oneliner(self, tag):
return (tag in ['hr', 'hr/'])
def run(self, text):
new_blocks = []
text = text.split("\n\n")
items = []
left_tag = ''
right_tag = ''
in_tag = False # flag
for block in text:
if block.startswith("\n"):
block = block[1:]
if not in_tag:
if block.startswith("<"):
left_tag = self._get_left_tag(block)
right_tag = self._get_right_tag(left_tag, block)
if not (isBlockLevel(left_tag) \
or block[1] in ["!", "?", "@", "%"]):
new_blocks.append(block)
continue
if self._is_oneliner(left_tag):
new_blocks.append(block.strip())
continue
if block[1] == "!":
# is a comment block
left_tag = "--"
right_tag = self._get_right_tag(left_tag, block)
# keep checking conditions below and maybe just append
if block.rstrip().endswith(">") \
and self._equal_tags(left_tag, right_tag):
new_blocks.append(
self.stash.store(block.strip()))
continue
else: #if not block[1] == "!":
# if is block level tag and is not complete
items.append(block.strip())
in_tag = True
continue
new_blocks.append(block)
else:
items.append(block.strip())
right_tag = self._get_right_tag(left_tag, block)
if self._equal_tags(left_tag, right_tag):
# if find closing tag
in_tag = False
new_blocks.append(
self.stash.store('\n\n'.join(items)))
items = []
if items:
new_blocks.append(self.stash.store('\n\n'.join(items)))
new_blocks.append('\n')
return "\n\n".join(new_blocks)
HTML_BLOCK_PREPROCESSOR = HtmlBlockPreprocessor()
class HeaderPreprocessor(Preprocessor):
"""
Replaces underlined headers with hashed headers to avoid
the nead for lookahead later.
"""
def run (self, lines):
i = -1
while i+1 < len(lines):
i = i+1
if not lines[i].strip():
continue
if lines[i].startswith("#"):
lines.insert(i+1, "\n")
if (i+1 <= len(lines)
and lines[i+1]
and lines[i+1][0] in ['-', '=']):
underline = lines[i+1].strip()
if underline == "="*len(underline):
lines[i] = "# " + lines[i].strip()
lines[i+1] = ""
elif underline == "-"*len(underline):
lines[i] = "## " + lines[i].strip()
lines[i+1] = ""
return lines
HEADER_PREPROCESSOR = HeaderPreprocessor()
class LinePreprocessor(Preprocessor):
"""Deals with HR lines (needs to be done before processing lists)"""
blockquote_re = re.compile(r'^(> )+')
def run (self, lines):
for i in range(len(lines)):
prefix = ''
m = self.blockquote_re.search(lines[i])
if m : prefix = m.group(0)
if self._isLine(lines[i][len(prefix):]):
lines[i] = prefix + self.stash.store("<hr />", safe=True)
return lines
def _isLine(self, block):
"""Determines if a block should be replaced with an <HR>"""
if block.startswith(" "): return 0 # a code block
text = "".join([x for x in block if not x.isspace()])
if len(text) <= 2:
return 0
for pattern in ['isline1', 'isline2', 'isline3']:
m = RE.regExp[pattern].match(text)
if (m and m.group(1)):
return 1
else:
return 0
LINE_PREPROCESSOR = LinePreprocessor()
class ReferencePreprocessor(Preprocessor):
'''
Removes reference definitions from the text and stores them for later use.
'''
def run (self, lines):
new_text = [];
for line in lines:
m = RE.regExp['reference-def'].match(line)
if m:
id = m.group(2).strip().lower()
t = m.group(4).strip() # potential title
if not t:
self.references[id] = (m.group(3), t)
elif (len(t) >= 2
and (t[0] == t[-1] == "\""
or t[0] == t[-1] == "\'"
or (t[0] == "(" and t[-1] == ")") ) ):
self.references[id] = (m.group(3), t[1:-1])
else:
new_text.append(line)
else:
new_text.append(line)
return new_text #+ "\n"
REFERENCE_PREPROCESSOR = ReferencePreprocessor()
"""
======================================================================
========================== INLINE PATTERNS ===========================
======================================================================
Inline patterns such as *emphasis* are handled by means of auxiliary
objects, one per pattern. Pattern objects must be instances of classes
that extend markdown.Pattern. Each pattern object uses a single regular
expression and needs support the following methods:
pattern.getCompiledRegExp() - returns a regular expression
pattern.handleMatch(m, doc) - takes a match object and returns
a NanoDom node (as a part of the provided
doc) or None
All of python markdown's built-in patterns subclass from Patter,
but you can add additional patterns that don't.
Also note that all the regular expressions used by inline must
capture the whole block. For this reason, they all start with
'^(.*)' and end with '(.*)!'. In case with built-in expression
Pattern takes care of adding the "^(.*)" and "(.*)!".
Finally, the order in which regular expressions are applied is very
important - e.g. if we first replace http://.../ links with <a> tags
and _then_ try to replace inline html, we would end up with a mess.
So, we apply the expressions in the following order:
* escape and backticks have to go before everything else, so
that we can preempt any markdown patterns by escaping them.
* then we handle auto-links (must be done before inline html)
* then we handle inline HTML. At this point we will simply
replace all inline HTML strings with a placeholder and add
the actual HTML to a hash.
* then inline images (must be done before links)
* then bracketed links, first regular then reference-style
* finally we apply strong and emphasis
"""
NOBRACKET = r'[^\]\[]*'
BRK = ( r'\[('
+ (NOBRACKET + r'(\[')*6
+ (NOBRACKET+ r'\])*')*6
+ NOBRACKET + r')\]' )
NOIMG = r'(?<!\!)'
BACKTICK_RE = r'\`([^\`]*)\`' # `e= m*c^2`
DOUBLE_BACKTICK_RE = r'\`\`(.*)\`\`' # ``e=f("`")``
ESCAPE_RE = r'\\(.)' # \<
EMPHASIS_RE = r'\*([^\*]*)\*' # *emphasis*
STRONG_RE = r'\*\*(.*)\*\*' # **strong**
STRONG_EM_RE = r'\*\*\*([^_]*)\*\*\*' # ***strong***
if SMART_EMPHASIS:
EMPHASIS_2_RE = r'(?<!\S)_(\S[^_]*)_' # _emphasis_
else:
EMPHASIS_2_RE = r'_([^_]*)_' # _emphasis_
STRONG_2_RE = r'__([^_]*)__' # __strong__
STRONG_EM_2_RE = r'___([^_]*)___' # ___strong___
LINK_RE = NOIMG + BRK + r'\s*\(([^\)]*)\)' # [text](url)
LINK_ANGLED_RE = NOIMG + BRK + r'\s*\(<([^\)]*)>\)' # [text](<url>)
IMAGE_LINK_RE = r'\!' + BRK + r'\s*\(([^\)]*)\)' # 
REFERENCE_RE = NOIMG + BRK+ r'\s*\[([^\]]*)\]' # [Google][3]
IMAGE_REFERENCE_RE = r'\!' + BRK + '\s*\[([^\]]*)\]' # ![alt text][2]
NOT_STRONG_RE = r'( \* )' # stand-alone * or _
AUTOLINK_RE = r'<(http://[^>]*)>' # <http://www.123.com>
AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>' # <[email protected]>
#HTML_RE = r'(\<[^\>]*\>)' # <...>
HTML_RE = r'(\<[a-zA-Z/][^\>]*\>)' # <...>
ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)' # &
LINE_BREAK_RE = r' \n' # two spaces at end of line
LINE_BREAK_2_RE = r' $' # two spaces at end of text
class Pattern:
def __init__ (self, pattern):
self.pattern = pattern
self.compiled_re = re.compile("^(.*)%s(.*)$" % pattern, re.DOTALL)
def getCompiledRegExp (self):
return self.compiled_re
BasePattern = Pattern # for backward compatibility
class SimpleTextPattern (Pattern):
def handleMatch(self, m, doc):
return doc.createTextNode(m.group(2))
class SimpleTagPattern (Pattern):
def __init__ (self, pattern, tag):
Pattern.__init__(self, pattern)
self.tag = tag
def handleMatch(self, m, doc):
el = doc.createElement(self.tag)
el.appendChild(doc.createTextNode(m.group(2)))
return el
class SubstituteTagPattern (SimpleTagPattern):
def handleMatch (self, m, doc):
return doc.createElement(self.tag)
class BacktickPattern (Pattern):
def __init__ (self, pattern):
Pattern.__init__(self, pattern)
self.tag = "code"
def handleMatch(self, m, doc):
el = doc.createElement(self.tag)
text = m.group(2).strip()
#text = text.replace("&", "&")
el.appendChild(doc.createTextNode(text))
return el
class DoubleTagPattern (SimpleTagPattern):
def handleMatch(self, m, doc):
tag1, tag2 = self.tag.split(",")
el1 = doc.createElement(tag1)
el2 = doc.createElement(tag2)
el1.appendChild(el2)
el2.appendChild(doc.createTextNode(m.group(2)))
return el1
class HtmlPattern (Pattern):
def handleMatch (self, m, doc):
rawhtml = m.group(2)
inline = True
place_holder = self.stash.store(rawhtml)
return doc.createTextNode(place_holder)
class LinkPattern (Pattern):
def handleMatch(self, m, doc):
el = doc.createElement('a')
el.appendChild(doc.createTextNode(m.group(2)))
parts = m.group(9).split('"')
# We should now have [], [href], or [href, title]
if parts:
el.setAttribute('href', parts[0].strip())
else:
el.setAttribute('href', "")
if len(parts) > 1:
# we also got a title
title = '"' + '"'.join(parts[1:]).strip()
title = dequote(title) #.replace('"', """)
el.setAttribute('title', title)
return el
class ImagePattern (Pattern):
def handleMatch(self, m, doc):
el = doc.createElement('img')
src_parts = m.group(9).split()
if src_parts:
el.setAttribute('src', src_parts[0])
else:
el.setAttribute('src', "")
if len(src_parts) > 1:
el.setAttribute('title', dequote(" ".join(src_parts[1:])))
if ENABLE_ATTRIBUTES:
text = doc.createTextNode(m.group(2))
el.appendChild(text)
text.handleAttributes()
truealt = text.value
el.childNodes.remove(text)
else:
truealt = m.group(2)
el.setAttribute('alt', truealt)
return el
class ReferencePattern (Pattern):
def handleMatch(self, m, doc):
if m.group(9):
id = m.group(9).lower()
else:
# if we got something like "[Google][]"
# we'll use "google" as the id
id = m.group(2).lower()
if not self.references.has_key(id): # ignore undefined refs
return None
href, title = self.references[id]
text = m.group(2)
return self.makeTag(href, title, text, doc)
def makeTag(self, href, title, text, doc):
el = doc.createElement('a')
el.setAttribute('href', href)
if title:
el.setAttribute('title', title)
el.appendChild(doc.createTextNode(text))
return el
class ImageReferencePattern (ReferencePattern):
def makeTag(self, href, title, text, doc):
el = doc.createElement('img')
el.setAttribute('src', href)
if title:
el.setAttribute('title', title)
el.setAttribute('alt', text)
return el
class AutolinkPattern (Pattern):
def handleMatch(self, m, doc):
el = doc.createElement('a')
el.setAttribute('href', m.group(2))
el.appendChild(doc.createTextNode(m.group(2)))
return el
class AutomailPattern (Pattern):
def handleMatch(self, m, doc):
el = doc.createElement('a')
email = m.group(2)
if email.startswith("mailto:"):
email = email[len("mailto:"):]
for letter in email:
entity = doc.createEntityReference("#%d" % ord(letter))
el.appendChild(entity)
mailto = "mailto:" + email
mailto = "".join(['&#%d;' % ord(letter) for letter in mailto])
el.setAttribute('href', mailto)
return el
ESCAPE_PATTERN = SimpleTextPattern(ESCAPE_RE)
NOT_STRONG_PATTERN = SimpleTextPattern(NOT_STRONG_RE)
BACKTICK_PATTERN = BacktickPattern(BACKTICK_RE)
DOUBLE_BACKTICK_PATTERN = BacktickPattern(DOUBLE_BACKTICK_RE)
STRONG_PATTERN = SimpleTagPattern(STRONG_RE, 'strong')
STRONG_PATTERN_2 = SimpleTagPattern(STRONG_2_RE, 'strong')
EMPHASIS_PATTERN = SimpleTagPattern(EMPHASIS_RE, 'em')
EMPHASIS_PATTERN_2 = SimpleTagPattern(EMPHASIS_2_RE, 'em')
STRONG_EM_PATTERN = DoubleTagPattern(STRONG_EM_RE, 'strong,em')
STRONG_EM_PATTERN_2 = DoubleTagPattern(STRONG_EM_2_RE, 'strong,em')
LINE_BREAK_PATTERN = SubstituteTagPattern(LINE_BREAK_RE, 'br ')
LINE_BREAK_PATTERN_2 = SubstituteTagPattern(LINE_BREAK_2_RE, 'br ')
LINK_PATTERN = LinkPattern(LINK_RE)
LINK_ANGLED_PATTERN = LinkPattern(LINK_ANGLED_RE)
IMAGE_LINK_PATTERN = ImagePattern(IMAGE_LINK_RE)
IMAGE_REFERENCE_PATTERN = ImageReferencePattern(IMAGE_REFERENCE_RE)
REFERENCE_PATTERN = ReferencePattern(REFERENCE_RE)
HTML_PATTERN = HtmlPattern(HTML_RE)
ENTITY_PATTERN = HtmlPattern(ENTITY_RE)
AUTOLINK_PATTERN = AutolinkPattern(AUTOLINK_RE)
AUTOMAIL_PATTERN = AutomailPattern(AUTOMAIL_RE)
"""
======================================================================
========================== POST-PROCESSORS ===========================
======================================================================
Markdown also allows post-processors, which are similar to
preprocessors in that they need to implement a "run" method. However,
they are run after core processing.
There are two types of post-processors: Postprocessor and TextPostprocessor
"""
class Postprocessor:
'''
Postprocessors are run before the dom it converted back into text.
Each Postprocessor implements a "run" method that takes a pointer to a
NanoDom document, modifies it as necessary and returns a NanoDom
document.
Postprocessors must extend markdown.Postprocessor.
There are currently no standard post-processors, but the footnote
extension uses one.
'''
def run(self, dom):
pass
class TextPostprocessor:
'''
TextPostprocessors are run after the dom it converted back into text.
Each TextPostprocessor implements a "run" method that takes a pointer to a
text string, modifies it as necessary and returns a text string.
TextPostprocessors must extend markdown.TextPostprocessor.
'''
def run(self, text):
pass
class RawHtmlTextPostprocessor(TextPostprocessor):
def __init__(self):
pass
def run(self, text):
for i in range(self.stash.html_counter):
html, safe = self.stash.rawHtmlBlocks[i]
if self.safeMode and not safe:
if str(self.safeMode).lower() == 'escape':
html = self.escape(html)
elif str(self.safeMode).lower() == 'remove':
html = ''
else:
html = HTML_REMOVED_TEXT
text = text.replace("<p>%s\n</p>" % (HTML_PLACEHOLDER % i),
html + "\n")
text = text.replace(HTML_PLACEHOLDER % i, html)
return text
def escape(self, html):
''' Basic html escaping '''
html = html.replace('&', '&')
html = html.replace('<', '<')
html = html.replace('>', '>')
return html.replace('"', '"')
RAWHTMLTEXTPOSTPROCESSOR = RawHtmlTextPostprocessor()
"""
======================================================================
========================== MISC AUXILIARY CLASSES ====================
======================================================================
"""
class HtmlStash:
"""This class is used for stashing HTML objects that we extract
in the beginning and replace with place-holders."""
def __init__ (self):
self.html_counter = 0 # for counting inline html segments
self.rawHtmlBlocks=[]
def store(self, html, safe=False):
"""Saves an HTML segment for later reinsertion. Returns a
placeholder string that needs to be inserted into the
document.
@param html: an html segment
@param safe: label an html segment as safe for safemode
@param inline: label a segmant as inline html
@returns : a placeholder string """
self.rawHtmlBlocks.append((html, safe))
placeholder = HTML_PLACEHOLDER % self.html_counter
self.html_counter += 1
return placeholder
class BlockGuru:
def _findHead(self, lines, fn, allowBlank=0):
"""Functional magic to help determine boundaries of indented
blocks.
@param lines: an array of strings
@param fn: a function that returns a substring of a string
if the string matches the necessary criteria
@param allowBlank: specifies whether it's ok to have blank
lines between matching functions
@returns: a list of post processes items and the unused
remainder of the original list"""
items = []
item = -1
i = 0 # to keep track of where we are
for line in lines:
if not line.strip() and not allowBlank:
return items, lines[i:]
if not line.strip() and allowBlank:
# If we see a blank line, this _might_ be the end
i += 1
# Find the next non-blank line
for j in range(i, len(lines)):
if lines[j].strip():
next = lines[j]
break
else:
# There is no more text => this is the end
break
# Check if the next non-blank line is still a part of the list
part = fn(next)
if part:
items.append("")
continue
else:
break # found end of the list
part = fn(line)
if part:
items.append(part)
i += 1
continue
else:
return items, lines[i:]
else:
i += 1
return items, lines[i:]
def detabbed_fn(self, line):
""" An auxiliary method to be passed to _findHead """
m = RE.regExp['tabbed'].match(line)
if m:
return m.group(4)
else:
return None
def detectTabbed(self, lines):
return self._findHead(lines, self.detabbed_fn,
allowBlank = 1)
def print_error(string):
"""Print an error string to stderr"""
sys.stderr.write(string +'\n')
def dequote(string):
""" Removes quotes from around a string """
if ( ( string.startswith('"') and string.endswith('"'))
or (string.startswith("'") and string.endswith("'")) ):
return string[1:-1]
else:
return string
"""
======================================================================
========================== CORE MARKDOWN =============================
======================================================================
This stuff is ugly, so if you are thinking of extending the syntax,
see first if you can do it via pre-processors, post-processors,
inline patterns or a combination of the three.
"""
class CorePatterns:
"""This class is scheduled for removal as part of a refactoring
effort."""
patterns = {
'header': r'(#*)([^#]*)(#*)', # # A title
'reference-def': r'(\ ?\ ?\ ?)\[([^\]]*)\]:\s*([^ ]*)(.*)',
# [Google]: http://www.google.com/
'containsline': r'([-]*)$|^([=]*)', # -----, =====, etc.
'ol': r'[ ]{0,3}[\d]*\.\s+(.*)', # 1. text
'ul': r'[ ]{0,3}[*+-]\s+(.*)', # "* text"
'isline1': r'(\**)', # ***
'isline2': r'(\-*)', # ---
'isline3': r'(\_*)', # ___
'tabbed': r'((\t)|( ))(.*)', # an indented line
'quoted': r'> ?(.*)', # a quoted block ("> ...")
}
def __init__ (self):
self.regExp = {}
for key in self.patterns.keys():
self.regExp[key] = re.compile("^%s$" % self.patterns[key],
re.DOTALL)
self.regExp['containsline'] = re.compile(r'^([-]*)$|^([=]*)$', re.M)
RE = CorePatterns()
class Markdown:
""" Markdown formatter class for creating an html document from
Markdown text """
def __init__(self, source=None, # depreciated
extensions=[],
extension_configs=None,
safe_mode = False):
"""Creates a new Markdown instance.
@param source: The text in Markdown format. Depreciated!
@param extensions: A list if extensions.
@param extension-configs: Configuration setting for extensions.
@param safe_mode: Disallow raw html. """
self.source = source
if source is not None:
message(WARN, "The `source` arg of Markdown.__init__() is depreciated and will be removed in the future. Use `instance.convert(source)` instead.")
self.safeMode = safe_mode
self.blockGuru = BlockGuru()
self.registeredExtensions = []
self.stripTopLevelTags = 1
self.docType = ""
self.textPreprocessors = [HTML_BLOCK_PREPROCESSOR]
self.preprocessors = [HEADER_PREPROCESSOR,
LINE_PREPROCESSOR,
# A footnote preprocessor will
# get inserted here
REFERENCE_PREPROCESSOR]
self.postprocessors = [] # a footnote postprocessor will get
# inserted later
self.textPostprocessors = [# a footnote postprocessor will get
# inserted here
RAWHTMLTEXTPOSTPROCESSOR]
self.prePatterns = []
self.inlinePatterns = [DOUBLE_BACKTICK_PATTERN,
BACKTICK_PATTERN,
ESCAPE_PATTERN,
REFERENCE_PATTERN,
LINK_ANGLED_PATTERN,
LINK_PATTERN,
IMAGE_LINK_PATTERN,
IMAGE_REFERENCE_PATTERN,
AUTOLINK_PATTERN,
AUTOMAIL_PATTERN,
LINE_BREAK_PATTERN_2,
LINE_BREAK_PATTERN,
HTML_PATTERN,
ENTITY_PATTERN,
NOT_STRONG_PATTERN,
STRONG_EM_PATTERN,
STRONG_EM_PATTERN_2,
STRONG_PATTERN,
STRONG_PATTERN_2,
EMPHASIS_PATTERN,
EMPHASIS_PATTERN_2
# The order of the handlers matters!!!
]
self.registerExtensions(extensions = extensions,
configs = extension_configs)
self.reset()
def registerExtensions(self, extensions, configs):
if not configs:
configs = {}
for ext in extensions:
extension_module_name = "mdx_" + ext
try:
module = __import__(extension_module_name)
except:
message(CRITICAL,
"couldn't load extension %s (looking for %s module)"
% (ext, extension_module_name) )
else:
if configs.has_key(ext):
configs_for_ext = configs[ext]
else:
configs_for_ext = []
extension = module.makeExtension(configs_for_ext)
extension.extendMarkdown(self, globals())
def registerExtension(self, extension):
""" This gets called by the extension """
self.registeredExtensions.append(extension)
def reset(self):
"""Resets all state variables so that we can start
with a new text."""
self.references={}
self.htmlStash = HtmlStash()
HTML_BLOCK_PREPROCESSOR.stash = self.htmlStash
LINE_PREPROCESSOR.stash = self.htmlStash
REFERENCE_PREPROCESSOR.references = self.references
HTML_PATTERN.stash = self.htmlStash
ENTITY_PATTERN.stash = self.htmlStash
REFERENCE_PATTERN.references = self.references
IMAGE_REFERENCE_PATTERN.references = self.references
RAWHTMLTEXTPOSTPROCESSOR.stash = self.htmlStash
RAWHTMLTEXTPOSTPROCESSOR.safeMode = self.safeMode
for extension in self.registeredExtensions:
extension.reset()
def _transform(self):
"""Transforms the Markdown text into a XHTML body document
@returns: A NanoDom Document """
# Setup the document
self.doc = Document()
self.top_element = self.doc.createElement("span")
self.top_element.appendChild(self.doc.createTextNode('\n'))
self.top_element.setAttribute('class', 'markdown')
self.doc.appendChild(self.top_element)
# Fixup the source text
text = self.source
text = text.replace("\r\n", "\n").replace("\r", "\n")
text += "\n\n"
text = text.expandtabs(TAB_LENGTH)
# Split into lines and run the preprocessors that will work with
# self.lines
self.lines = text.split("\n")
# Run the pre-processors on the lines
for prep in self.preprocessors :
self.lines = prep.run(self.lines)
# Create a NanoDom tree from the lines and attach it to Document
buffer = []
for line in self.lines:
if line.startswith("#"):
self._processSection(self.top_element, buffer)
buffer = [line]
else:
buffer.append(line)
self._processSection(self.top_element, buffer)
#self._processSection(self.top_element, self.lines)
# Not sure why I put this in but let's leave it for now.
self.top_element.appendChild(self.doc.createTextNode('\n'))
# Run the post-processors
for postprocessor in self.postprocessors:
postprocessor.run(self.doc)
return self.doc
def _processSection(self, parent_elem, lines,
inList = 0, looseList = 0):
"""Process a section of a source document, looking for high
level structural elements like lists, block quotes, code
segments, html blocks, etc. Some those then get stripped
of their high level markup (e.g. get unindented) and the
lower-level markup is processed recursively.
@param parent_elem: A NanoDom element to which the content
will be added
@param lines: a list of lines
@param inList: a level
@returns: None"""
# Loop through lines until none left.
while lines:
# Check if this section starts with a list, a blockquote or
# a code block
processFn = { 'ul': self._processUList,
'ol': self._processOList,
'quoted': self._processQuote,
'tabbed': self._processCodeBlock}
for regexp in ['ul', 'ol', 'quoted', 'tabbed']:
m = RE.regExp[regexp].match(lines[0])
if m:
processFn[regexp](parent_elem, lines, inList)
return
# We are NOT looking at one of the high-level structures like
# lists or blockquotes. So, it's just a regular paragraph
# (though perhaps nested inside a list or something else). If
# we are NOT inside a list, we just need to look for a blank
# line to find the end of the block. If we ARE inside a
# list, however, we need to consider that a sublist does not
# need to be separated by a blank line. Rather, the following
# markup is legal:
#
# * The top level list item
#
# Another paragraph of the list. This is where we are now.
# * Underneath we might have a sublist.
#
if inList:
start, lines = self._linesUntil(lines, (lambda line:
RE.regExp['ul'].match(line)
or RE.regExp['ol'].match(line)
or not line.strip()))
self._processSection(parent_elem, start,
inList - 1, looseList = looseList)
inList = inList-1
else: # Ok, so it's just a simple block
paragraph, lines = self._linesUntil(lines, lambda line:
not line.strip())
if len(paragraph) and paragraph[0].startswith('#'):
self._processHeader(parent_elem, paragraph)
elif paragraph:
self._processParagraph(parent_elem, paragraph,
inList, looseList)
if lines and not lines[0].strip():
lines = lines[1:] # skip the first (blank) line
def _processHeader(self, parent_elem, paragraph):
m = RE.regExp['header'].match(paragraph[0])
if m:
level = len(m.group(1))
h = self.doc.createElement("h%d" % level)
parent_elem.appendChild(h)
for item in self._handleInline(m.group(2).strip()):
h.appendChild(item)
else:
message(CRITICAL, "We've got a problem header!")
def _processParagraph(self, parent_elem, paragraph, inList, looseList):
list = self._handleInline("\n".join(paragraph))
if ( parent_elem.nodeName == 'li'
and not (looseList or parent_elem.childNodes)):
# If this is the first paragraph inside "li", don't
# put <p> around it - append the paragraph bits directly
# onto parent_elem
el = parent_elem
else:
# Otherwise make a "p" element
el = self.doc.createElement("p")
parent_elem.appendChild(el)
for item in list:
el.appendChild(item)
def _processUList(self, parent_elem, lines, inList):
self._processList(parent_elem, lines, inList,
listexpr='ul', tag = 'ul')
def _processOList(self, parent_elem, lines, inList):
self._processList(parent_elem, lines, inList,
listexpr='ol', tag = 'ol')
def _processList(self, parent_elem, lines, inList, listexpr, tag):
"""Given a list of document lines starting with a list item,
finds the end of the list, breaks it up, and recursively
processes each list item and the remainder of the text file.
@param parent_elem: A dom element to which the content will be added
@param lines: a list of lines
@param inList: a level
@returns: None"""
ul = self.doc.createElement(tag) # ul might actually be '<ol>'
parent_elem.appendChild(ul)
looseList = 0
# Make a list of list items
items = []
item = -1
i = 0 # a counter to keep track of where we are
for line in lines:
loose = 0
if not line.strip():
# If we see a blank line, this _might_ be the end of the list
i += 1
loose = 1
# Find the next non-blank line
for j in range(i, len(lines)):
if lines[j].strip():
next = lines[j]
break
else:
# There is no more text => end of the list
break
# Check if the next non-blank line is still a part of the list
if ( RE.regExp['ul'].match(next) or
RE.regExp['ol'].match(next) or
RE.regExp['tabbed'].match(next) ):
# get rid of any white space in the line
items[item].append(line.strip())
looseList = loose or looseList
continue
else:
break # found end of the list
# Now we need to detect list items (at the current level)
# while also detabing child elements if necessary
for expr in ['ul', 'ol', 'tabbed']:
m = RE.regExp[expr].match(line)
if m:
if expr in ['ul', 'ol']: # We are looking at a new item
#if m.group(1) :
# Removed the check to allow for a blank line
# at the beginning of the list item
items.append([m.group(1)])
item += 1
elif expr == 'tabbed': # This line needs to be detabbed
items[item].append(m.group(4)) #after the 'tab'
i += 1
break
else:
items[item].append(line) # Just regular continuation
i += 1 # added on 2006.02.25
else:
i += 1
# Add the dom elements
for item in items:
li = self.doc.createElement("li")
ul.appendChild(li)
self._processSection(li, item, inList + 1, looseList = looseList)
# Process the remaining part of the section
self._processSection(parent_elem, lines[i:], inList)
def _linesUntil(self, lines, condition):
""" A utility function to break a list of lines upon the
first line that satisfied a condition. The condition
argument should be a predicate function.
"""
i = -1
for line in lines:
i += 1
if condition(line): break
else:
i += 1
return lines[:i], lines[i:]
def _processQuote(self, parent_elem, lines, inList):
"""Given a list of document lines starting with a quote finds
the end of the quote, unindents it and recursively
processes the body of the quote and the remainder of the
text file.
@param parent_elem: DOM element to which the content will be added
@param lines: a list of lines
@param inList: a level
@returns: None """
dequoted = []
i = 0
blank_line = False # allow one blank line between paragraphs
for line in lines:
m = RE.regExp['quoted'].match(line)
if m:
dequoted.append(m.group(1))
i += 1
blank_line = False
elif not blank_line and line.strip() != '':
dequoted.append(line)
i += 1
elif not blank_line and line.strip() == '':
dequoted.append(line)
i += 1
blank_line = True
else:
break
blockquote = self.doc.createElement('blockquote')
parent_elem.appendChild(blockquote)
self._processSection(blockquote, dequoted, inList)
self._processSection(parent_elem, lines[i:], inList)
def _processCodeBlock(self, parent_elem, lines, inList):
"""Given a list of document lines starting with a code block
finds the end of the block, puts it into the dom verbatim
wrapped in ("<pre><code>") and recursively processes the
the remainder of the text file.
@param parent_elem: DOM element to which the content will be added
@param lines: a list of lines
@param inList: a level
@returns: None"""
detabbed, theRest = self.blockGuru.detectTabbed(lines)
pre = self.doc.createElement('pre')
code = self.doc.createElement('code')
parent_elem.appendChild(pre)
pre.appendChild(code)
text = "\n".join(detabbed).rstrip()+"\n"
#text = text.replace("&", "&")
code.appendChild(self.doc.createTextNode(text))
self._processSection(parent_elem, theRest, inList)
def _handleInline (self, line, patternIndex=0):
"""Transform a Markdown line with inline elements to an XHTML
fragment.
This function uses auxiliary objects called inline patterns.
See notes on inline patterns above.
@param line: A line of Markdown text
@param patternIndex: The index of the inlinePattern to start with
@return: A list of NanoDom nodes """
parts = [line]
while patternIndex < len(self.inlinePatterns):
i = 0
while i < len(parts):
x = parts[i]
if isinstance(x, (str, unicode)):
result = self._applyPattern(x, \
self.inlinePatterns[patternIndex], \
patternIndex)
if result:
i -= 1
parts.remove(x)
for y in result:
parts.insert(i+1,y)
i += 1
patternIndex += 1
for i in range(len(parts)):
x = parts[i]
if isinstance(x, (str, unicode)):
parts[i] = self.doc.createTextNode(x)
return parts
def _applyPattern(self, line, pattern, patternIndex):
""" Given a pattern name, this function checks if the line
fits the pattern, creates the necessary elements, and returns
back a list consisting of NanoDom elements and/or strings.
@param line: the text to be processed
@param pattern: the pattern to be checked
@returns: the appropriate newly created NanoDom element if the
pattern matches, None otherwise.
"""
# match the line to pattern's pre-compiled reg exp.
# if no match, move on.
m = pattern.getCompiledRegExp().match(line)
if not m:
return None
# if we got a match let the pattern make us a NanoDom node
# if it doesn't, move on
node = pattern.handleMatch(m, self.doc)
# check if any of this nodes have children that need processing
if isinstance(node, Element):
if not node.nodeName in ["code", "pre"]:
for child in node.childNodes:
if isinstance(child, TextNode):
result = self._handleInline(child.value, patternIndex+1)
if result:
if result == [child]:
continue
result.reverse()
#to make insertion easier
position = node.childNodes.index(child)
node.removeChild(child)
for item in result:
if isinstance(item, (str, unicode)):
if len(item) > 0:
node.insertChild(position,
self.doc.createTextNode(item))
else:
node.insertChild(position, item)
if node:
# Those are in the reverse order!
return ( m.groups()[-1], # the string to the left
node, # the new node
m.group(1)) # the string to the right of the match
else:
return None
def convert (self, source = None):
"""Return the document in XHTML format.
@returns: A serialized XHTML body."""
if source is not None: #Allow blank string
self.source = source
if not self.source:
return u""
try:
self.source = unicode(self.source)
except UnicodeDecodeError:
message(CRITICAL, 'UnicodeDecodeError: Markdown only accepts unicode or ascii input.')
return u""
for pp in self.textPreprocessors:
self.source = pp.run(self.source)
doc = self._transform()
xml = doc.toxml()
# Return everything but the top level tag
if self.stripTopLevelTags:
xml = xml.strip()[23:-7] + "\n"
for pp in self.textPostprocessors:
xml = pp.run(xml)
return (self.docType + xml).strip()
def __str__(self):
''' Report info about instance. Markdown always returns unicode. '''
if self.source is None:
status = 'in which no source text has been assinged.'
else:
status = 'which contains %d chars and %d line(s) of source.'%\
(len(self.source), self.source.count('\n')+1)
return 'An instance of "%s" %s'% (self.__class__, status)
__unicode__ = convert # markdown should always return a unicode string
# ====================================================================
def markdownFromFile(input = None,
output = None,
extensions = [],
encoding = None,
message_threshold = CRITICAL,
safe = False):
global console_hndlr
console_hndlr.setLevel(message_threshold)
message(DEBUG, "input file: %s" % input)
if not encoding:
encoding = "utf-8"
input_file = codecs.open(input, mode="r", encoding=encoding)
text = input_file.read()
input_file.close()
text = removeBOM(text, encoding)
new_text = markdown(text, extensions, safe_mode = safe)
if output:
output_file = codecs.open(output, "w", encoding=encoding)
output_file.write(new_text)
output_file.close()
else:
sys.stdout.write(new_text.encode(encoding))
def markdown(text,
extensions = [],
safe_mode = False):
message(DEBUG, "in markdown.markdown(), received text:\n%s" % text)
extension_names = []
extension_configs = {}
for ext in extensions:
pos = ext.find("(")
if pos == -1:
extension_names.append(ext)
else:
name = ext[:pos]
extension_names.append(name)
pairs = [x.split("=") for x in ext[pos+1:-1].split(",")]
configs = [(x.strip(), y.strip()) for (x, y) in pairs]
extension_configs[name] = configs
md = Markdown(extensions=extension_names,
extension_configs=extension_configs,
safe_mode = safe_mode)
return md.convert(text)
class Extension:
def __init__(self, configs = {}):
self.config = configs
def getConfig(self, key):
if self.config.has_key(key):
return self.config[key][0]
else:
return ""
def getConfigInfo(self):
return [(key, self.config[key][1]) for key in self.config.keys()]
def setConfig(self, key, value):
self.config[key][0] = value
OPTPARSE_WARNING = """
Python 2.3 or higher required for advanced command line options.
For lower versions of Python use:
%s INPUT_FILE > OUTPUT_FILE
""" % EXECUTABLE_NAME_FOR_USAGE
def parse_options():
try:
optparse = __import__("optparse")
except:
if len(sys.argv) == 2:
return {'input': sys.argv[1],
'output': None,
'message_threshold': CRITICAL,
'safe': False,
'extensions': [],
'encoding': None }
else:
print OPTPARSE_WARNING
return None
parser = optparse.OptionParser(usage="%prog INPUTFILE [options]")
parser.add_option("-f", "--file", dest="filename",
help="write output to OUTPUT_FILE",
metavar="OUTPUT_FILE")
parser.add_option("-e", "--encoding", dest="encoding",
help="encoding for input and output files",)
parser.add_option("-q", "--quiet", default = CRITICAL,
action="store_const", const=60, dest="verbose",
help="suppress all messages")
parser.add_option("-v", "--verbose",
action="store_const", const=INFO, dest="verbose",
help="print info messages")
parser.add_option("-s", "--safe", dest="safe", default=False,
metavar="SAFE_MODE",
help="same mode ('replace', 'remove' or 'escape' user's HTML tag)")
parser.add_option("--noisy",
action="store_const", const=DEBUG, dest="verbose",
help="print debug messages")
parser.add_option("-x", "--extension", action="append", dest="extensions",
help = "load extension EXTENSION", metavar="EXTENSION")
(options, args) = parser.parse_args()
if not len(args) == 1:
parser.print_help()
return None
else:
input_file = args[0]
if not options.extensions:
options.extensions = []
return {'input': input_file,
'output': options.filename,
'message_threshold': options.verbose,
'safe': options.safe,
'extensions': options.extensions,
'encoding': options.encoding }
if __name__ == '__main__':
""" Run Markdown from the command line. """
options = parse_options()
#if os.access(inFile, os.R_OK):
if not options:
sys.exit(0)
markdownFromFile(**options)
| gpl-2.0 | -5,551,722,385,668,865,000 | 30.939865 | 158 | 0.518454 | false |
KamranMackey/readthedocs.org | fabfile.py | 35 | 1521 | from fabric.api import lcd, local
from fabric.decorators import runs_once
import os
fabfile_dir = os.path.dirname(__file__)
def update_theme():
theme_dir = os.path.join(fabfile_dir, 'readthedocs', 'templates', 'sphinx')
if not os.path.exists('/tmp/sphinx_rtd_theme'):
local('git clone https://github.com/snide/sphinx_rtd_theme.git /tmp/sphinx_rtd_theme')
with lcd('/tmp/sphinx_rtd_theme'):
local('git remote update')
local('git reset --hard origin/master ')
local('cp -r /tmp/sphinx_rtd_theme/sphinx_rtd_theme %s' % theme_dir)
local('cp -r /tmp/sphinx_rtd_theme/sphinx_rtd_theme/static/fonts/ %s' % os.path.join(fabfile_dir, 'media', 'font'))
local('cp /tmp/sphinx_rtd_theme/sphinx_rtd_theme/static/css/badge_only.css %s' % os.path.join(fabfile_dir, 'media', 'css'))
local('cp /tmp/sphinx_rtd_theme/sphinx_rtd_theme/static/css/theme.css %s' %
os.path.join(fabfile_dir, 'media', 'css', 'sphinx_rtd_theme.css'))
def i18n():
with lcd('readthedocs'):
local('rm -rf rtd_tests/tests/builds/')
local('tx pull')
local('./manage.py makemessages --all')
#local('tx push -s')
local('./manage.py compilemessages')
def i18n_docs():
with lcd('docs'):
# Update our tanslations
local('tx pull -a')
local('sphinx-intl build')
# Push new ones
local('make gettext')
local('tx push -s')
@runs_once
def spider():
local('patu.py -d1 readthedocs.org')
| mit | -5,699,806,473,283,415,000 | 33.568182 | 131 | 0.619987 | false |
niun/motty | motty/com.py | 1 | 7626 | import logging
from logging import debug, info, warning, error, critical
import serial
from serial.tools import list_ports
import time
import threading
from collections.abc import Iterable
from motty.config import Config
class HistoryObserver(object):
def __init__(self):
pass
def onNewHistoryEntry(self, entry, history_index):
pass
def onUpdateHistoryEntry(self, entry, history_index, update_start_index):
pass
def onRemoveOldHistory(self, history_index, count):
pass
def notify(self, event_name, entry=None, history_index=None, update_start_index=0, count=0):
if event_name == "new":
self.onNewHistoryEntry(entry, history_index)
elif event_name == "update":
self.onUpdateHistoryEntry(entry, history_index, update_start_index)
elif event_name == "pop":
self.onRemoveOldHistory(history_index, count)
class HistoryEntry(object):
"""Holds a timed data stream record"""
def __init__(self, direction, port, ts=[], data=b""):
""":param direction: "rx" or "tx" for received or transmitted data
:param port: system name of the port, data was received / transmitted
:param ts: list of timestamp touples (index of corresponding byte, ts)
:param data: bytes string"""
self.direction = direction
self.port = port
self.ts = ts
self.data = data
def is_terminated(self, termbytes, timeout):
if (termbytes == b""):
return True
if (termbytes != None) and (self.data[-len(termbytes):] == termbytes):
return True
else:
# termbytes deactivated
# or no termbytes at end of data
# => check for timeout which would terminate as well:
if (timeout != None) and (timeout >= 0):
now = time.time()
if (now - self.endtime()) > timeout:
return True
return False
def update(self, ts, data):
offset = len(self.data)
self.ts += [(i+offset, s) for i, s in ts]
self.data += data
def starttime(self):
"""returns timestamp for the timestamped byte with the lowest index"""
# assuming the first timestamp is the earliest:
return self.ts[0][1]
def endtime(self):
"""returns latest timestamp"""
# assuming the last timestamp is the latest:
return self.ts[-1][1]
class History(object):
"""class used by the Communication class to hold received and send data"""
def __init__(self, maxentries=None, terminator=None, timeout=None, observers=[]):
self.maxentries = maxentries
self.terminator = terminator
self.timeout = timeout
assert(isinstance(observers, Iterable))
self.observers = observers
self.entries = []
def registerObserver(self, observer):
self.observers.append(observer)
def notifyObservers(self, event_name, **kwargs):
for observer in self.observers:
observer.notify(event_name, **kwargs)
def find(self, port, direction):
i = len(self.entries)
while i > 0:
i -= 1;
entry = self.entries[i]
if entry.port == port and entry.direction == direction:
return (i, entry)
return (0, None)
def update(self, entry):
"""append a new entry to the last existing one for the same port and
direction, if that entry was not terminated"""
i, match = self.find(entry.port, entry.direction)
if match and not match.is_terminated(self.terminator, self.timeout):
start = len(entry.data)
match.update(entry.ts, entry.data)
self.notifyObservers("update", entry=match, history_index=i, update_start_index=start)
else:
self.entries.append(entry)
i = len(self.entries) - 1
self.notifyObservers("new", entry=entry, history_index=i)
if self.entries[i].is_terminated(self.terminator, self.timeout):
self.entries.pop(i)
self.notifyObservers("pop",history_index=i, count=1)
class Port(serial.Serial):
"""Extends the pySerial.Serial class with some attributes to maintain
multiple open ports"""
def __init__(self, port, description="", rx=False, tx=False, **kwargs):
serial.Serial.__init__(self, port=port, **kwargs)
object.__setattr__(self, 'rx', rx)
object.__setattr__(self, 'tx', tx)
self.description = description or port
self.update_state()
def __setattr__(self, name, value):
serial.Serial.__setattr__(self, name, value)
if name in ("rx", "tx"):
self.update_state()
def update_state(self):
if self.rx or self.tx:
self.open()
else:
self.close()
def open(self):
if not self.isOpen():
serial.Serial.open(self)
def close(self):
if self.isOpen():
serial.Serial.flush(self)
serial.Serial.close(self)
def send(self, data):
self.write(data)
class Communication(threading.Thread):
"""manage communication with multiple serial ports in a seperate thread"""
def __init__(self, history):
threading.Thread.__init__(self)
cfg = Config()
self.ports = {}
for port, desc, _unused01 in list_ports.comports():
if cfg.ports[port]:
desc = cfg.ports[port]['description'] or desc
baudrate = cfg.ports[port]['baud'] or 9600
listen = cfg.ports[port]['listen']
listen = True if listen == None else listen
try:
self.ports[port] = Port(port, desc, rx=listen, tx=True, baudrate=baudrate)
except serial.SerialException as emsg:
warning("Unable to open port {}. {}".format(port,emsg))
assert(isinstance(history, History))
self.history = history
def run(self):
while (self.alive):
for port in iter(self.ports.values()):
if port.isOpen(): # TODO and port.rx:
ts = (0, time.time())
try:
data = port.read(port.inWaiting())
if len(data) > 0:
debug("[{}]<={} ".format(port.name,len(data)))
except serial.SerialException as e:
debug("E[{}]: {}".format(port.name,e))
data = b""
if len(data) > 0:
self.history.update(HistoryEntry("rx", port.name, [ts], data))
for port in iter(self.ports.values()):
port.close()
def start(self):
self.alive = True
threading.Thread.start(self)
def join(self, timeout=None):
self.alive = False
threading.Thread.join(self, timeout)
def send(self, port_name: str, data: bytes):
"""Send data on the serial port with name port_name
:param port_name: Name of the serial port given by the OS
:param data: bytes string to send
:note: Not Thread-safe. Calling only from one other thread
is safe because Communication.send() is blocking
"""
if port_name and data:
ts = (0, time.time())
num = self.ports[port_name].write(data)
self.history.update(HistoryEntry("tx", port_name, [ts], data))
else:
debug("Did not send anything (port: '{}', data: '{}')".format(port_name, data))
| mit | -6,095,279,312,184,106,000 | 33.821918 | 98 | 0.575793 | false |
Donkyhotay/MoonPy | twisted/mail/maildir.py | 4 | 15496 | # -*- test-case-name: twisted.mail.test.test_mail -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Maildir-style mailbox support
"""
import os
import stat
import socket
import time
from zope.interface import implements
try:
import cStringIO as StringIO
except ImportError:
import StringIO
from twisted.python.compat import set
from twisted.mail import pop3
from twisted.mail import smtp
from twisted.protocols import basic
from twisted.persisted import dirdbm
from twisted.python import log, failure
from twisted.python.hashlib import md5
from twisted.mail import mail
from twisted.internet import interfaces, defer, reactor
from twisted import cred
import twisted.cred.portal
import twisted.cred.credentials
import twisted.cred.checkers
import twisted.cred.error
INTERNAL_ERROR = '''\
From: Twisted.mail Internals
Subject: An Error Occurred
An internal server error has occurred. Please contact the
server administrator.
'''
class _MaildirNameGenerator:
"""
Utility class to generate a unique maildir name
@ivar _clock: An L{IReactorTime} provider which will be used to learn
the current time to include in names returned by L{generate} so that
they sort properly.
"""
n = 0
p = os.getpid()
s = socket.gethostname().replace('/', r'\057').replace(':', r'\072')
def __init__(self, clock):
self._clock = clock
def generate(self):
"""
Return a string which is intended to unique across all calls to this
function (across all processes, reboots, etc).
Strings returned by earlier calls to this method will compare less
than strings returned by later calls as long as the clock provided
doesn't go backwards.
"""
self.n = self.n + 1
t = self._clock.seconds()
seconds = str(int(t))
microseconds = '%07d' % (int((t - int(t)) * 10e6),)
return '%s.M%sP%sQ%s.%s' % (seconds, microseconds,
self.p, self.n, self.s)
_generateMaildirName = _MaildirNameGenerator(reactor).generate
def initializeMaildir(dir):
if not os.path.isdir(dir):
os.mkdir(dir, 0700)
for subdir in ['new', 'cur', 'tmp', '.Trash']:
os.mkdir(os.path.join(dir, subdir), 0700)
for subdir in ['new', 'cur', 'tmp']:
os.mkdir(os.path.join(dir, '.Trash', subdir), 0700)
# touch
open(os.path.join(dir, '.Trash', 'maildirfolder'), 'w').close()
class MaildirMessage(mail.FileMessage):
size = None
def __init__(self, address, fp, *a, **kw):
header = "Delivered-To: %s\n" % address
fp.write(header)
self.size = len(header)
mail.FileMessage.__init__(self, fp, *a, **kw)
def lineReceived(self, line):
mail.FileMessage.lineReceived(self, line)
self.size += len(line)+1
def eomReceived(self):
self.finalName = self.finalName+',S=%d' % self.size
return mail.FileMessage.eomReceived(self)
class AbstractMaildirDomain:
"""Abstract maildir-backed domain.
"""
alias = None
root = None
def __init__(self, service, root):
"""Initialize.
"""
self.root = root
def userDirectory(self, user):
"""Get the maildir directory for a given user
Override to specify where to save mails for users.
Return None for non-existing users.
"""
return None
##
## IAliasableDomain
##
def setAliasGroup(self, alias):
self.alias = alias
##
## IDomain
##
def exists(self, user, memo=None):
"""Check for existence of user in the domain
"""
if self.userDirectory(user.dest.local) is not None:
return lambda: self.startMessage(user)
try:
a = self.alias[user.dest.local]
except:
raise smtp.SMTPBadRcpt(user)
else:
aliases = a.resolve(self.alias, memo)
if aliases:
return lambda: aliases
log.err("Bad alias configuration: " + str(user))
raise smtp.SMTPBadRcpt(user)
def startMessage(self, user):
"""Save a message for a given user
"""
if isinstance(user, str):
name, domain = user.split('@', 1)
else:
name, domain = user.dest.local, user.dest.domain
dir = self.userDirectory(name)
fname = _generateMaildirName()
filename = os.path.join(dir, 'tmp', fname)
fp = open(filename, 'w')
return MaildirMessage('%s@%s' % (name, domain), fp, filename,
os.path.join(dir, 'new', fname))
def willRelay(self, user, protocol):
return False
def addUser(self, user, password):
raise NotImplementedError
def getCredentialsCheckers(self):
raise NotImplementedError
##
## end of IDomain
##
class _MaildirMailboxAppendMessageTask:
implements(interfaces.IConsumer)
osopen = staticmethod(os.open)
oswrite = staticmethod(os.write)
osclose = staticmethod(os.close)
osrename = staticmethod(os.rename)
def __init__(self, mbox, msg):
self.mbox = mbox
self.defer = defer.Deferred()
self.openCall = None
if not hasattr(msg, "read"):
msg = StringIO.StringIO(msg)
self.msg = msg
# This is needed, as this startup phase might call defer.errback and zero out self.defer
# By doing it on the reactor iteration appendMessage is able to use .defer without problems.
reactor.callLater(0, self.startUp)
def startUp(self):
self.createTempFile()
if self.fh != -1:
self.filesender = basic.FileSender()
self.filesender.beginFileTransfer(self.msg, self)
def registerProducer(self, producer, streaming):
self.myproducer = producer
self.streaming = streaming
if not streaming:
self.prodProducer()
def prodProducer(self):
self.openCall = None
if self.myproducer is not None:
self.openCall = reactor.callLater(0, self.prodProducer)
self.myproducer.resumeProducing()
def unregisterProducer(self):
self.myproducer = None
self.streaming = None
self.osclose(self.fh)
self.moveFileToNew()
def write(self, data):
try:
self.oswrite(self.fh, data)
except:
self.fail()
def fail(self, err=None):
if err is None:
err = failure.Failure()
if self.openCall is not None:
self.openCall.cancel()
self.defer.errback(err)
self.defer = None
def moveFileToNew(self):
while True:
newname = os.path.join(self.mbox.path, "new", _generateMaildirName())
try:
self.osrename(self.tmpname, newname)
break
except OSError, (err, estr):
import errno
# if the newname exists, retry with a new newname.
if err != errno.EEXIST:
self.fail()
newname = None
break
if newname is not None:
self.mbox.list.append(newname)
self.defer.callback(None)
self.defer = None
def createTempFile(self):
attr = (os.O_RDWR | os.O_CREAT | os.O_EXCL
| getattr(os, "O_NOINHERIT", 0)
| getattr(os, "O_NOFOLLOW", 0))
tries = 0
self.fh = -1
while True:
self.tmpname = os.path.join(self.mbox.path, "tmp", _generateMaildirName())
try:
self.fh = self.osopen(self.tmpname, attr, 0600)
return None
except OSError:
tries += 1
if tries > 500:
self.defer.errback(RuntimeError("Could not create tmp file for %s" % self.mbox.path))
self.defer = None
return None
class MaildirMailbox(pop3.Mailbox):
"""Implement the POP3 mailbox semantics for a Maildir mailbox
"""
AppendFactory = _MaildirMailboxAppendMessageTask
def __init__(self, path):
"""Initialize with name of the Maildir mailbox
"""
self.path = path
self.list = []
self.deleted = {}
initializeMaildir(path)
for name in ('cur', 'new'):
for file in os.listdir(os.path.join(path, name)):
self.list.append((file, os.path.join(path, name, file)))
self.list.sort()
self.list = [e[1] for e in self.list]
def listMessages(self, i=None):
"""Return a list of lengths of all files in new/ and cur/
"""
if i is None:
ret = []
for mess in self.list:
if mess:
ret.append(os.stat(mess)[stat.ST_SIZE])
else:
ret.append(0)
return ret
return self.list[i] and os.stat(self.list[i])[stat.ST_SIZE] or 0
def getMessage(self, i):
"""Return an open file-pointer to a message
"""
return open(self.list[i])
def getUidl(self, i):
"""Return a unique identifier for a message
This is done using the basename of the filename.
It is globally unique because this is how Maildirs are designed.
"""
# Returning the actual filename is a mistake. Hash it.
base = os.path.basename(self.list[i])
return md5(base).hexdigest()
def deleteMessage(self, i):
"""Delete a message
This only moves a message to the .Trash/ subfolder,
so it can be undeleted by an administrator.
"""
trashFile = os.path.join(
self.path, '.Trash', 'cur', os.path.basename(self.list[i])
)
os.rename(self.list[i], trashFile)
self.deleted[self.list[i]] = trashFile
self.list[i] = 0
def undeleteMessages(self):
"""Undelete any deleted messages it is possible to undelete
This moves any messages from .Trash/ subfolder back to their
original position, and empties out the deleted dictionary.
"""
for (real, trash) in self.deleted.items():
try:
os.rename(trash, real)
except OSError, (err, estr):
import errno
# If the file has been deleted from disk, oh well!
if err != errno.ENOENT:
raise
# This is a pass
else:
try:
self.list[self.list.index(0)] = real
except ValueError:
self.list.append(real)
self.deleted.clear()
def appendMessage(self, txt):
"""Appends a message into the mailbox."""
task = self.AppendFactory(self, txt)
return task.defer
class StringListMailbox:
"""
L{StringListMailbox} is an in-memory mailbox.
@ivar msgs: A C{list} of C{str} giving the contents of each message in the
mailbox.
@ivar _delete: A C{set} of the indexes of messages which have been deleted
since the last C{sync} call.
"""
implements(pop3.IMailbox)
def __init__(self, msgs):
self.msgs = msgs
self._delete = set()
def listMessages(self, i=None):
"""
Return the length of the message at the given offset, or a list of all
message lengths.
"""
if i is None:
return [self.listMessages(i) for i in range(len(self.msgs))]
if i in self._delete:
return 0
return len(self.msgs[i])
def getMessage(self, i):
"""
Return an in-memory file-like object for the message content at the
given offset.
"""
return StringIO.StringIO(self.msgs[i])
def getUidl(self, i):
"""
Return a hash of the contents of the message at the given offset.
"""
return md5(self.msgs[i]).hexdigest()
def deleteMessage(self, i):
"""
Mark the given message for deletion.
"""
self._delete.add(i)
def undeleteMessages(self):
"""
Reset deletion tracking, undeleting any messages which have been
deleted since the last call to C{sync}.
"""
self._delete = set()
def sync(self):
"""
Discard the contents of any message marked for deletion and reset
deletion tracking.
"""
for index in self._delete:
self.msgs[index] = ""
self._delete = set()
class MaildirDirdbmDomain(AbstractMaildirDomain):
"""A Maildir Domain where membership is checked by a dirdbm file
"""
implements(cred.portal.IRealm, mail.IAliasableDomain)
portal = None
_credcheckers = None
def __init__(self, service, root, postmaster=0):
"""Initialize
The first argument is where the Domain directory is rooted.
The second is whether non-existing addresses are simply
forwarded to postmaster instead of outright bounce
The directory structure of a MailddirDirdbmDomain is:
/passwd <-- a dirdbm file
/USER/{cur,new,del} <-- each user has these three directories
"""
AbstractMaildirDomain.__init__(self, service, root)
dbm = os.path.join(root, 'passwd')
if not os.path.exists(dbm):
os.makedirs(dbm)
self.dbm = dirdbm.open(dbm)
self.postmaster = postmaster
def userDirectory(self, name):
"""Get the directory for a user
If the user exists in the dirdbm file, return the directory
os.path.join(root, name), creating it if necessary.
Otherwise, returns postmaster's mailbox instead if bounces
go to postmaster, otherwise return None
"""
if not self.dbm.has_key(name):
if not self.postmaster:
return None
name = 'postmaster'
dir = os.path.join(self.root, name)
if not os.path.exists(dir):
initializeMaildir(dir)
return dir
##
## IDomain
##
def addUser(self, user, password):
self.dbm[user] = password
# Ensure it is initialized
self.userDirectory(user)
def getCredentialsCheckers(self):
if self._credcheckers is None:
self._credcheckers = [DirdbmDatabase(self.dbm)]
return self._credcheckers
##
## IRealm
##
def requestAvatar(self, avatarId, mind, *interfaces):
if pop3.IMailbox not in interfaces:
raise NotImplementedError("No interface")
if avatarId == cred.checkers.ANONYMOUS:
mbox = StringListMailbox([INTERNAL_ERROR])
else:
mbox = MaildirMailbox(os.path.join(self.root, avatarId))
return (
pop3.IMailbox,
mbox,
lambda: None
)
class DirdbmDatabase:
implements(cred.checkers.ICredentialsChecker)
credentialInterfaces = (
cred.credentials.IUsernamePassword,
cred.credentials.IUsernameHashedPassword
)
def __init__(self, dbm):
self.dirdbm = dbm
def requestAvatarId(self, c):
if c.username in self.dirdbm:
if c.checkPassword(self.dirdbm[c.username]):
return c.username
raise cred.error.UnauthorizedLogin()
| gpl-3.0 | 8,267,360,452,181,871,000 | 28.972921 | 105 | 0.583441 | false |
coreos/depot_tools | third_party/boto/roboto/awsqueryrequest.py | 70 | 18654 | # Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import sys
import os
import boto
import optparse
import copy
import boto.exception
import boto.roboto.awsqueryservice
import bdb
import traceback
try:
import epdb as debugger
except ImportError:
import pdb as debugger
def boto_except_hook(debugger_flag, debug_flag):
def excepthook(typ, value, tb):
if typ is bdb.BdbQuit:
sys.exit(1)
sys.excepthook = sys.__excepthook__
if debugger_flag and sys.stdout.isatty() and sys.stdin.isatty():
if debugger.__name__ == 'epdb':
debugger.post_mortem(tb, typ, value)
else:
debugger.post_mortem(tb)
elif debug_flag:
print traceback.print_tb(tb)
sys.exit(1)
else:
print value
sys.exit(1)
return excepthook
class Line(object):
def __init__(self, fmt, data, label):
self.fmt = fmt
self.data = data
self.label = label
self.line = '%s\t' % label
self.printed = False
def append(self, datum):
self.line += '%s\t' % datum
def print_it(self):
if not self.printed:
print self.line
self.printed = True
class RequiredParamError(boto.exception.BotoClientError):
def __init__(self, required):
self.required = required
s = 'Required parameters are missing: %s' % self.required
boto.exception.BotoClientError.__init__(self, s)
class EncoderError(boto.exception.BotoClientError):
def __init__(self, error_msg):
s = 'Error encoding value (%s)' % error_msg
boto.exception.BotoClientError.__init__(self, s)
class FilterError(boto.exception.BotoClientError):
def __init__(self, filters):
self.filters = filters
s = 'Unknown filters: %s' % self.filters
boto.exception.BotoClientError.__init__(self, s)
class Encoder:
@classmethod
def encode(cls, p, rp, v, label=None):
if p.name.startswith('_'):
return
try:
mthd = getattr(cls, 'encode_'+p.ptype)
mthd(p, rp, v, label)
except AttributeError:
raise EncoderError('Unknown type: %s' % p.ptype)
@classmethod
def encode_string(cls, p, rp, v, l):
if l:
label = l
else:
label = p.name
rp[label] = v
encode_file = encode_string
encode_enum = encode_string
@classmethod
def encode_integer(cls, p, rp, v, l):
if l:
label = l
else:
label = p.name
rp[label] = '%d' % v
@classmethod
def encode_boolean(cls, p, rp, v, l):
if l:
label = l
else:
label = p.name
if v:
v = 'true'
else:
v = 'false'
rp[label] = v
@classmethod
def encode_datetime(cls, p, rp, v, l):
if l:
label = l
else:
label = p.name
rp[label] = v
@classmethod
def encode_array(cls, p, rp, v, l):
v = boto.utils.mklist(v)
if l:
label = l
else:
label = p.name
label = label + '.%d'
for i, value in enumerate(v):
rp[label%(i+1)] = value
class AWSQueryRequest(object):
ServiceClass = None
Description = ''
Params = []
Args = []
Filters = []
Response = {}
CLITypeMap = {'string' : 'string',
'integer' : 'int',
'int' : 'int',
'enum' : 'choice',
'datetime' : 'string',
'dateTime' : 'string',
'file' : 'string',
'boolean' : None}
@classmethod
def name(cls):
return cls.__name__
def __init__(self, **args):
self.args = args
self.parser = None
self.cli_options = None
self.cli_args = None
self.cli_output_format = None
self.connection = None
self.list_markers = []
self.item_markers = []
self.request_params = {}
self.connection_args = None
def __repr__(self):
return self.name()
def get_connection(self, **args):
if self.connection is None:
self.connection = self.ServiceClass(**args)
return self.connection
@property
def status(self):
retval = None
if self.http_response is not None:
retval = self.http_response.status
return retval
@property
def reason(self):
retval = None
if self.http_response is not None:
retval = self.http_response.reason
return retval
@property
def request_id(self):
retval = None
if self.aws_response is not None:
retval = getattr(self.aws_response, 'requestId')
return retval
def process_filters(self):
filters = self.args.get('filters', [])
filter_names = [f['name'] for f in self.Filters]
unknown_filters = [f for f in filters if f not in filter_names]
if unknown_filters:
raise FilterError('Unknown filters: %s' % unknown_filters)
for i, filter in enumerate(self.Filters):
name = filter['name']
if name in filters:
self.request_params['Filter.%d.Name' % (i+1)] = name
for j, value in enumerate(boto.utils.mklist(filters[name])):
Encoder.encode(filter, self.request_params, value,
'Filter.%d.Value.%d' % (i+1, j+1))
def process_args(self, **args):
"""
Responsible for walking through Params defined for the request and:
* Matching them with keyword parameters passed to the request
constructor or via the command line.
* Checking to see if all required parameters have been specified
and raising an exception, if not.
* Encoding each value into the set of request parameters that will
be sent in the request to the AWS service.
"""
self.args.update(args)
self.connection_args = copy.copy(self.args)
if 'debug' in self.args and self.args['debug'] >= 2:
boto.set_stream_logger(self.name())
required = [p.name for p in self.Params+self.Args if not p.optional]
for param in self.Params+self.Args:
if param.long_name:
python_name = param.long_name.replace('-', '_')
else:
python_name = boto.utils.pythonize_name(param.name, '_')
value = None
if python_name in self.args:
value = self.args[python_name]
if value is None:
value = param.default
if value is not None:
if param.name in required:
required.remove(param.name)
if param.request_param:
if param.encoder:
param.encoder(param, self.request_params, value)
else:
Encoder.encode(param, self.request_params, value)
if python_name in self.args:
del self.connection_args[python_name]
if required:
l = []
for p in self.Params+self.Args:
if p.name in required:
if p.short_name and p.long_name:
l.append('(%s, %s)' % (p.optparse_short_name,
p.optparse_long_name))
elif p.short_name:
l.append('(%s)' % p.optparse_short_name)
else:
l.append('(%s)' % p.optparse_long_name)
raise RequiredParamError(','.join(l))
boto.log.debug('request_params: %s' % self.request_params)
self.process_markers(self.Response)
def process_markers(self, fmt, prev_name=None):
if fmt and fmt['type'] == 'object':
for prop in fmt['properties']:
self.process_markers(prop, fmt['name'])
elif fmt and fmt['type'] == 'array':
self.list_markers.append(prev_name)
self.item_markers.append(fmt['name'])
def send(self, verb='GET', **args):
self.process_args(**args)
self.process_filters()
conn = self.get_connection(**self.connection_args)
self.http_response = conn.make_request(self.name(),
self.request_params,
verb=verb)
self.body = self.http_response.read()
boto.log.debug(self.body)
if self.http_response.status == 200:
self.aws_response = boto.jsonresponse.Element(list_marker=self.list_markers,
item_marker=self.item_markers)
h = boto.jsonresponse.XmlHandler(self.aws_response, self)
h.parse(self.body)
return self.aws_response
else:
boto.log.error('%s %s' % (self.http_response.status,
self.http_response.reason))
boto.log.error('%s' % self.body)
raise conn.ResponseError(self.http_response.status,
self.http_response.reason,
self.body)
def add_standard_options(self):
group = optparse.OptionGroup(self.parser, 'Standard Options')
# add standard options that all commands get
group.add_option('-D', '--debug', action='store_true',
help='Turn on all debugging output')
group.add_option('--debugger', action='store_true',
default=False,
help='Enable interactive debugger on error')
group.add_option('-U', '--url', action='store',
help='Override service URL with value provided')
group.add_option('--region', action='store',
help='Name of the region to connect to')
group.add_option('-I', '--access-key-id', action='store',
help='Override access key value')
group.add_option('-S', '--secret-key', action='store',
help='Override secret key value')
group.add_option('--version', action='store_true',
help='Display version string')
if self.Filters:
self.group.add_option('--help-filters', action='store_true',
help='Display list of available filters')
self.group.add_option('--filter', action='append',
metavar=' name=value',
help='A filter for limiting the results')
self.parser.add_option_group(group)
def process_standard_options(self, options, args, d):
if hasattr(options, 'help_filters') and options.help_filters:
print 'Available filters:'
for filter in self.Filters:
print '%s\t%s' % (filter.name, filter.doc)
sys.exit(0)
if options.debug:
self.args['debug'] = 2
if options.url:
self.args['url'] = options.url
if options.region:
self.args['region'] = options.region
if options.access_key_id:
self.args['aws_access_key_id'] = options.access_key_id
if options.secret_key:
self.args['aws_secret_access_key'] = options.secret_key
if options.version:
# TODO - Where should the version # come from?
print 'version x.xx'
exit(0)
sys.excepthook = boto_except_hook(options.debugger,
options.debug)
def get_usage(self):
s = 'usage: %prog [options] '
l = [ a.long_name for a in self.Args ]
s += ' '.join(l)
for a in self.Args:
if a.doc:
s += '\n\n\t%s - %s' % (a.long_name, a.doc)
return s
def build_cli_parser(self):
self.parser = optparse.OptionParser(description=self.Description,
usage=self.get_usage())
self.add_standard_options()
for param in self.Params:
ptype = action = choices = None
if param.ptype in self.CLITypeMap:
ptype = self.CLITypeMap[param.ptype]
action = 'store'
if param.ptype == 'boolean':
action = 'store_true'
elif param.ptype == 'array':
if len(param.items) == 1:
ptype = param.items[0]['type']
action = 'append'
elif param.cardinality != 1:
action = 'append'
if ptype or action == 'store_true':
if param.short_name:
self.parser.add_option(param.optparse_short_name,
param.optparse_long_name,
action=action, type=ptype,
choices=param.choices,
help=param.doc)
elif param.long_name:
self.parser.add_option(param.optparse_long_name,
action=action, type=ptype,
choices=param.choices,
help=param.doc)
def do_cli(self):
if not self.parser:
self.build_cli_parser()
self.cli_options, self.cli_args = self.parser.parse_args()
d = {}
self.process_standard_options(self.cli_options, self.cli_args, d)
for param in self.Params:
if param.long_name:
p_name = param.long_name.replace('-', '_')
else:
p_name = boto.utils.pythonize_name(param.name)
value = getattr(self.cli_options, p_name)
if param.ptype == 'file' and value:
if value == '-':
value = sys.stdin.read()
else:
path = os.path.expanduser(value)
path = os.path.expandvars(path)
if os.path.isfile(path):
fp = open(path)
value = fp.read()
fp.close()
else:
self.parser.error('Unable to read file: %s' % path)
d[p_name] = value
for arg in self.Args:
if arg.long_name:
p_name = arg.long_name.replace('-', '_')
else:
p_name = boto.utils.pythonize_name(arg.name)
value = None
if arg.cardinality == 1:
if len(self.cli_args) >= 1:
value = self.cli_args[0]
else:
value = self.cli_args
d[p_name] = value
self.args.update(d)
if hasattr(self.cli_options, 'filter') and self.cli_options.filter:
d = {}
for filter in self.cli_options.filter:
name, value = filter.split('=')
d[name] = value
if 'filters' in self.args:
self.args['filters'].update(d)
else:
self.args['filters'] = d
try:
response = self.main()
self.cli_formatter(response)
except RequiredParamError, e:
print e
sys.exit(1)
except self.ServiceClass.ResponseError, err:
print 'Error(%s): %s' % (err.error_code, err.error_message)
sys.exit(1)
except boto.roboto.awsqueryservice.NoCredentialsError, err:
print 'Unable to find credentials.'
sys.exit(1)
except Exception, e:
print e
sys.exit(1)
def _generic_cli_formatter(self, fmt, data, label=''):
if fmt['type'] == 'object':
for prop in fmt['properties']:
if 'name' in fmt:
if fmt['name'] in data:
data = data[fmt['name']]
if fmt['name'] in self.list_markers:
label = fmt['name']
if label[-1] == 's':
label = label[0:-1]
label = label.upper()
self._generic_cli_formatter(prop, data, label)
elif fmt['type'] == 'array':
for item in data:
line = Line(fmt, item, label)
if isinstance(item, dict):
for field_name in item:
line.append(item[field_name])
elif isinstance(item, basestring):
line.append(item)
line.print_it()
def cli_formatter(self, data):
"""
This method is responsible for formatting the output for the
command line interface. The default behavior is to call the
generic CLI formatter which attempts to print something
reasonable. If you want specific formatting, you should
override this method and do your own thing.
:type data: dict
:param data: The data returned by AWS.
"""
if data:
self._generic_cli_formatter(self.Response, data)
| bsd-3-clause | -5,789,155,147,657,026,000 | 36.011905 | 88 | 0.518495 | false |
won0089/oppia | core/domain/recommendations_jobs_continuous.py | 1 | 4744 | # coding: utf-8
#
# Copyright 2015 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Jobs for recommendations."""
__author__ = 'Xinyu Wu'
import ast
from core import jobs
from core.domain import exp_services
from core.domain import recommendations_services
from core.domain import rights_manager
from core.platform import models
(exp_models, recommendations_models,) = models.Registry.import_models([
models.NAMES.exploration, models.NAMES.recommendations])
class ExplorationRecommendationsRealtimeModel(
jobs.BaseRealtimeDatastoreClassForContinuousComputations):
pass
class ExplorationRecommendationsAggregator(
jobs.BaseContinuousComputationManager):
"""A continuous-computation job that computes recommendations for each
exploration.
This job does not have a realtime component. There will be a delay in
propagating new updates to recommendations; the length of the delay
will be approximately the time it takes a batch job to run.
"""
@classmethod
def get_event_types_listened_to(cls):
return []
@classmethod
def _get_realtime_datastore_class(cls):
return ExplorationRecommendationsRealtimeModel
@classmethod
def _get_batch_job_manager_class(cls):
return ExplorationRecommendationsMRJobManager
@classmethod
def _handle_incoming_event(cls, active_realtime_layer, event_type, *args):
pass
class ExplorationRecommendationsMRJobManager(
jobs.BaseMapReduceJobManagerForContinuousComputations):
"""Manager for a MapReduce job that computes a list of recommended
explorations to play after completing some exploration.
"""
@classmethod
def _get_continuous_computation_class(cls):
return ExplorationRecommendationsAggregator
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExpSummaryModel]
@staticmethod
def map(item):
# Only process the exploration if it is not private
if item.status == rights_manager.ACTIVITY_STATUS_PRIVATE:
return
# Note: There is a threshold so that bad recommendations will be
# discarded even if an exploration has few similar explorations.
SIMILARITY_SCORE_THRESHOLD = 3.0
exp_summary_id = item.id
exp_summaries_dict = (
exp_services.get_non_private_exploration_summaries())
# Note: This is needed because the exp_summaries_dict is sometimes
# different from the summaries in the datastore, especially when
# new explorations are added.
if exp_summary_id not in exp_summaries_dict:
return
reference_exp_summary = exp_summaries_dict[exp_summary_id]
for compared_exp_id, compared_exp_summary in exp_summaries_dict.iteritems():
if compared_exp_id != exp_summary_id:
similarity_score = (
recommendations_services.get_item_similarity(
reference_exp_summary.category,
reference_exp_summary.language_code,
reference_exp_summary.owner_ids,
compared_exp_summary.category,
compared_exp_summary.language_code,
compared_exp_summary.exploration_model_last_updated,
compared_exp_summary.owner_ids,
compared_exp_summary.status))
if similarity_score >= SIMILARITY_SCORE_THRESHOLD:
yield (exp_summary_id, {
'similarity_score': similarity_score,
'exp_id': compared_exp_id
})
@staticmethod
def reduce(key, stringified_values):
MAX_RECOMMENDATIONS = 10
other_exploration_similarities = sorted(
[ast.literal_eval(v) for v in stringified_values],
reverse=True,
key=lambda x: x['similarity_score'])
recommended_exploration_ids = [
item['exp_id']
for item in other_exploration_similarities[:MAX_RECOMMENDATIONS]]
recommendations_services.set_recommendations(
key, recommended_exploration_ids)
| apache-2.0 | -3,311,242,314,840,729,000 | 35.775194 | 84 | 0.669477 | false |
tanjot/trimfilename | create_test_directory.py | 1 | 1235 | #!/usr/bin/python3
import os
def main():
test_folder = 'tmp'
for file_name in get_file_structure():
create(os.path.join(test_folder, file_name))
list_files(test_folder)
def get_file_structure():
simple_files = ['123abc.txt', '123xyz.txt', 'fooabc.barmp3', 'abc.abc', '00000000.txt']
level1_files = ['l1/01-artist-songname.mp3', 'l1/12foo.bar', 'l1/123abc.txt']
level2_files = ['l1/l2/12.34.56.txt', 'l1/l2/foo.bar']
files = []
files.extend(simple_files)
files.extend(level1_files)
files.extend(level2_files)
return files
def list_files(startpath):
INDENT = SUBINDENT = 2
for root, _, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * INDENT * (level)
print('{}|_ {}/'.format(indent, os.path.basename(root)))
subindent = ' ' * SUBINDENT * (level + 1)
for f in files:
print('{}|-- {}'.format(subindent, f))
def create(path):
basedir = os.path.dirname(path)
if not os.path.exists(basedir):
os.makedirs(basedir)
touch(path)
def touch(fname, times=None):
with open(fname, 'a'):
os.utime(fname, times)
if __name__ == '__main__':
main()
| gpl-3.0 | 2,084,338,971,401,158,400 | 24.729167 | 91 | 0.592713 | false |
tlakshman26/cinder-new-branch | cinder/volume/drivers/dothill/dothill_client.py | 4 | 12185 | # Copyright 2014 Objectif Libre
# Copyright 2015 DotHill Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from hashlib import md5
import math
import time
from lxml import etree
from oslo_log import log as logging
import requests
import six
from cinder import exception
from cinder.i18n import _LE
LOG = logging.getLogger(__name__)
class DotHillClient(object):
def __init__(self, host, login, password, protocol):
self._login = login
self._password = password
self._base_url = "%s://%s/api" % (protocol, host)
self._session_key = None
def _get_auth_token(self, xml):
"""Parse an XML authentication reply to extract the session key."""
self._session_key = None
tree = etree.XML(xml)
if tree.findtext(".//PROPERTY[@name='response-type']") == "success":
self._session_key = tree.findtext(".//PROPERTY[@name='response']")
def login(self):
"""Authenticates the service on the device."""
hash_ = "%s_%s" % (self._login, self._password)
if six.PY3:
hash_ = hash_.encode('utf-8')
hash_ = md5(hash_)
digest = hash_.hexdigest()
url = self._base_url + "/login/" + digest
try:
xml = requests.get(url)
except requests.exceptions.RequestException:
raise exception.DotHillConnectionError
self._get_auth_token(xml.text.encode('utf8'))
if self._session_key is None:
raise exception.DotHillAuthenticationError
def _assert_response_ok(self, tree):
"""Parses the XML returned by the device to check the return code.
Raises a DotHillRequestError error if the return code is not 0.
"""
return_code = tree.findtext(".//PROPERTY[@name='return-code']")
if return_code and return_code != '0':
raise exception.DotHillRequestError(
message=tree.findtext(".//PROPERTY[@name='response']"))
elif not return_code:
raise exception.DotHillRequestError(message="No status found")
def _build_request_url(self, path, *args, **kargs):
url = self._base_url + path
if kargs:
url += '/' + '/'.join(["%s/%s" % (k.replace('_', '-'), v)
for (k, v) in kargs.items()])
if args:
url += '/' + '/'.join(args)
return url
def _request(self, path, *args, **kargs):
"""Performs an HTTP request on the device.
Raises a DotHillRequestError if the device returned but the status is
not 0. The device error message will be used in the exception message.
If the status is OK, returns the XML data for further processing.
"""
url = self._build_request_url(path, *args, **kargs)
headers = {'dataType': 'api', 'sessionKey': self._session_key}
try:
xml = requests.get(url, headers=headers)
tree = etree.XML(xml.text.encode('utf8'))
except Exception:
raise exception.DotHillConnectionError
if path == "/show/volumecopy-status":
return tree
self._assert_response_ok(tree)
return tree
def logout(self):
url = self._base_url + '/exit'
try:
requests.get(url)
return True
except Exception:
return False
def create_volume(self, name, size, backend_name, backend_type):
# NOTE: size is in this format: [0-9]+GB
path_dict = {'size': size}
if backend_type == "linear":
path_dict['vdisk'] = backend_name
else:
path_dict['pool'] = backend_name
self._request("/create/volume", name, **path_dict)
return None
def delete_volume(self, name):
self._request("/delete/volumes", name)
def extend_volume(self, name, added_size):
self._request("/expand/volume", name, size=added_size)
def create_snapshot(self, volume_name, snap_name):
self._request("/create/snapshots", snap_name, volumes=volume_name)
def delete_snapshot(self, snap_name):
self._request("/delete/snapshot", "cleanup", snap_name)
def backend_exists(self, backend_name, backend_type):
try:
if backend_type == "linear":
path = "/show/vdisks"
else:
path = "/show/pools"
self._request(path, backend_name)
return True
except exception.DotHillRequestError:
return False
def _get_size(self, size):
return int(math.ceil(float(size) * 512 / (10 ** 9)))
def backend_stats(self, backend_name, backend_type):
stats = {'free_capacity_gb': 0,
'total_capacity_gb': 0}
prop_list = []
if backend_type == "linear":
path = "/show/vdisks"
prop_list = ["size-numeric", "freespace-numeric"]
else:
path = "/show/pools"
prop_list = ["total-size-numeric", "total-avail-numeric"]
tree = self._request(path, backend_name)
size = tree.findtext(".//PROPERTY[@name='%s']" % prop_list[0])
if size:
stats['total_capacity_gb'] = self._get_size(size)
size = tree.findtext(".//PROPERTY[@name='%s']" % prop_list[1])
if size:
stats['free_capacity_gb'] = self._get_size(size)
return stats
def list_luns_for_host(self, host):
tree = self._request("/show/host-maps", host)
return [int(prop.text) for prop in tree.xpath(
"//PROPERTY[@name='lun']")]
def _get_first_available_lun_for_host(self, host):
luns = self.list_luns_for_host(host)
lun = 1
while True:
if lun not in luns:
return lun
lun += 1
def map_volume(self, volume_name, connector, connector_element):
if connector_element == 'wwpns':
lun = self._get_first_available_lun_for_host(connector['wwpns'][0])
host = ",".join(connector['wwpns'])
else:
host = connector['initiator']
host_status = self._check_host(host)
if host_status != 0:
hostname = self._safe_hostname(connector['host'])
self._request("/create/host", hostname, id=host)
lun = self._get_first_available_lun_for_host(host)
self._request("/map/volume",
volume_name,
lun=str(lun),
host=host,
access="rw")
return lun
def unmap_volume(self, volume_name, connector, connector_element):
if connector_element == 'wwpns':
host = ",".join(connector['wwpns'])
else:
host = connector['initiator']
self._request("/unmap/volume", volume_name, host=host)
def get_active_target_ports(self):
ports = []
tree = self._request("/show/ports")
for obj in tree.xpath("//OBJECT[@basetype='port']"):
port = {prop.get('name'): prop.text
for prop in obj.iter("PROPERTY")
if prop.get('name') in
["port-type", "target-id", "status"]}
if port['status'] == 'Up':
ports.append(port)
return ports
def get_active_fc_target_ports(self):
return [port['target-id'] for port in self.get_active_target_ports()
if port['port-type'] == "FC"]
def get_active_iscsi_target_iqns(self):
return [port['target-id'] for port in self.get_active_target_ports()
if port['port-type'] == "iSCSI"]
def copy_volume(self, src_name, dest_name, same_bknd, dest_bknd_name):
self._request("/volumecopy",
dest_name,
dest_vdisk=dest_bknd_name,
source_volume=src_name,
prompt='yes')
if same_bknd == 0:
return
count = 0
while True:
tree = self._request("/show/volumecopy-status")
return_code = tree.findtext(".//PROPERTY[@name='return-code']")
if return_code == '0':
status = tree.findtext(".//PROPERTY[@name='progress']")
progress = False
if status:
progress = True
LOG.debug("Volume copy is in progress: %s", status)
if not progress:
LOG.debug("Volume copy completed: %s", status)
break
else:
if count >= 5:
LOG.error(_LE('Error in copying volume: %s'), src_name)
raise exception.DotHillRequestError
break
time.sleep(1)
count += 1
time.sleep(5)
def _check_host(self, host):
host_status = -1
tree = self._request("/show/hosts")
for prop in tree.xpath("//PROPERTY[@name='host-id' and text()='%s']"
% host):
host_status = 0
return host_status
def _safe_hostname(self, hostname):
"""DotHill hostname restrictions.
A host name cannot include " , \ in linear and " , < > \ in realstor
and can have a max of 15 bytes in linear and 32 bytes in realstor.
"""
for ch in [',', '"', '\\', '<', '>']:
if ch in hostname:
hostname = hostname.replace(ch, '')
index = len(hostname)
if index > 15:
index = 15
return hostname[:index]
def get_active_iscsi_target_portals(self, backend_type):
# This function returns {'ip': status,}
portals = {}
prop = ""
tree = self._request("/show/ports")
if backend_type == "linear":
prop = "primary-ip-address"
else:
prop = "ip-address"
iscsi_ips = [ip.text for ip in tree.xpath(
"//PROPERTY[@name='%s']" % prop)]
if not iscsi_ips:
return portals
for index, port_type in enumerate(tree.xpath(
"//PROPERTY[@name='port-type' and text()='iSCSI']")):
status = port_type.getparent().findtext("PROPERTY[@name='status']")
if status == 'Up':
portals[iscsi_ips[index]] = status
return portals
def get_chap_record(self, initiator_name):
tree = self._request("/show/chap-records")
for prop in tree.xpath("//PROPERTY[@name='initiator-name' and "
"text()='%s']" % initiator_name):
chap_secret = prop.getparent().findtext("PROPERTY[@name='initiator"
"-secret']")
return chap_secret
def create_chap_record(self, initiator_name, chap_secret):
self._request("/create/chap-record",
name=initiator_name,
secret=chap_secret)
def get_serial_number(self):
tree = self._request("/show/system")
return tree.findtext(".//PROPERTY[@name='midplane-serial-number']")
def get_owner_info(self, backend_name):
tree = self._request("/show/vdisks", backend_name)
return tree.findtext(".//PROPERTY[@name='owner']")
def modify_volume_name(self, old_name, new_name):
self._request("/set/volume", old_name, name=new_name)
def get_volume_size(self, volume_name):
tree = self._request("/show/volumes", volume_name)
size = tree.findtext(".//PROPERTY[@name='size-numeric']")
return self._get_size(size)
| apache-2.0 | -2,369,313,715,385,980,400 | 34.943953 | 79 | 0.547641 | false |
loco-odoo/localizacion_co | openerp/addons-extra/odoo-pruebas/odoo-server/addons/report_webkit/convert.py | 322 | 2581 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Nicolas Bessi (Camptocamp)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
from openerp.tools import convert
original_xml_import = convert.xml_import
class WebkitXMLImport(original_xml_import):
# Override of xml import in order to add webkit_header tag in report tag.
# As discussed with the R&D Team, the current XML processing API does
# not offer enough flexibity to do it in a cleaner way.
# The solution is not meant to be long term solution, but at least
# allows chaining of several overrides of the _tag_report method,
# and does not require a copy/paste of the original code.
def _tag_report(self, cr, rec, data_node=None, mode=None):
report_id = super(WebkitXMLImport, self)._tag_report(cr, rec, data_node)
if rec.get('report_type') == 'webkit':
header = rec.get('webkit_header')
if header:
if header in ('False', '0', 'None'):
webkit_header_id = False
else:
webkit_header_id = self.id_get(cr, header)
self.pool.get('ir.actions.report.xml').write(cr, self.uid,
report_id, {'webkit_header': webkit_header_id})
return report_id
convert.xml_import = WebkitXMLImport
| agpl-3.0 | 3,012,153,537,692,216,000 | 44.280702 | 80 | 0.656335 | false |
araines/moto | tests/test_sns/test_application.py | 15 | 8932 | from __future__ import unicode_literals
import boto
from boto.exception import BotoServerError
from moto import mock_sns
import sure # noqa
@mock_sns
def test_create_platform_application():
conn = boto.connect_sns()
platform_application = conn.create_platform_application(
name="my-application",
platform="APNS",
attributes={
"PlatformCredential": "platform_credential",
"PlatformPrincipal": "platform_principal",
},
)
application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn']
application_arn.should.equal('arn:aws:sns:us-east-1:123456789012:app/APNS/my-application')
@mock_sns
def test_get_platform_application_attributes():
conn = boto.connect_sns()
platform_application = conn.create_platform_application(
name="my-application",
platform="APNS",
attributes={
"PlatformCredential": "platform_credential",
"PlatformPrincipal": "platform_principal",
},
)
arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn']
attributes = conn.get_platform_application_attributes(arn)['GetPlatformApplicationAttributesResponse']['GetPlatformApplicationAttributesResult']['Attributes']
attributes.should.equal({
"PlatformCredential": "platform_credential",
"PlatformPrincipal": "platform_principal",
})
@mock_sns
def test_get_missing_platform_application_attributes():
conn = boto.connect_sns()
conn.get_platform_application_attributes.when.called_with("a-fake-arn").should.throw(BotoServerError)
@mock_sns
def test_set_platform_application_attributes():
conn = boto.connect_sns()
platform_application = conn.create_platform_application(
name="my-application",
platform="APNS",
attributes={
"PlatformCredential": "platform_credential",
"PlatformPrincipal": "platform_principal",
},
)
arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn']
conn.set_platform_application_attributes(arn,
{"PlatformPrincipal": "other"}
)
attributes = conn.get_platform_application_attributes(arn)['GetPlatformApplicationAttributesResponse']['GetPlatformApplicationAttributesResult']['Attributes']
attributes.should.equal({
"PlatformCredential": "platform_credential",
"PlatformPrincipal": "other",
})
@mock_sns
def test_list_platform_applications():
conn = boto.connect_sns()
conn.create_platform_application(
name="application1",
platform="APNS",
)
conn.create_platform_application(
name="application2",
platform="APNS",
)
applications_repsonse = conn.list_platform_applications()
applications = applications_repsonse['ListPlatformApplicationsResponse']['ListPlatformApplicationsResult']['PlatformApplications']
applications.should.have.length_of(2)
@mock_sns
def test_delete_platform_application():
conn = boto.connect_sns()
conn.create_platform_application(
name="application1",
platform="APNS",
)
conn.create_platform_application(
name="application2",
platform="APNS",
)
applications_repsonse = conn.list_platform_applications()
applications = applications_repsonse['ListPlatformApplicationsResponse']['ListPlatformApplicationsResult']['PlatformApplications']
applications.should.have.length_of(2)
application_arn = applications[0]['PlatformApplicationArn']
conn.delete_platform_application(application_arn)
applications_repsonse = conn.list_platform_applications()
applications = applications_repsonse['ListPlatformApplicationsResponse']['ListPlatformApplicationsResult']['PlatformApplications']
applications.should.have.length_of(1)
@mock_sns
def test_create_platform_endpoint():
conn = boto.connect_sns()
platform_application = conn.create_platform_application(
name="my-application",
platform="APNS",
)
application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn']
endpoint = conn.create_platform_endpoint(
platform_application_arn=application_arn,
token="some_unique_id",
custom_user_data="some user data",
attributes={
"Enabled": False,
},
)
endpoint_arn = endpoint['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn']
endpoint_arn.should.contain("arn:aws:sns:us-east-1:123456789012:endpoint/APNS/my-application/")
@mock_sns
def test_get_list_endpoints_by_platform_application():
conn = boto.connect_sns()
platform_application = conn.create_platform_application(
name="my-application",
platform="APNS",
)
application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn']
endpoint = conn.create_platform_endpoint(
platform_application_arn=application_arn,
token="some_unique_id",
custom_user_data="some user data",
attributes={
"CustomUserData": "some data",
},
)
endpoint_arn = endpoint['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn']
endpoint_list = conn.list_endpoints_by_platform_application(
platform_application_arn=application_arn
)['ListEndpointsByPlatformApplicationResponse']['ListEndpointsByPlatformApplicationResult']['Endpoints']
endpoint_list.should.have.length_of(1)
endpoint_list[0]['Attributes']['CustomUserData'].should.equal('some data')
endpoint_list[0]['EndpointArn'].should.equal(endpoint_arn)
@mock_sns
def test_get_endpoint_attributes():
conn = boto.connect_sns()
platform_application = conn.create_platform_application(
name="my-application",
platform="APNS",
)
application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn']
endpoint = conn.create_platform_endpoint(
platform_application_arn=application_arn,
token="some_unique_id",
custom_user_data="some user data",
attributes={
"Enabled": False,
"CustomUserData": "some data",
},
)
endpoint_arn = endpoint['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn']
attributes = conn.get_endpoint_attributes(endpoint_arn)['GetEndpointAttributesResponse']['GetEndpointAttributesResult']['Attributes']
attributes.should.equal({
"Enabled": 'False',
"CustomUserData": "some data",
})
@mock_sns
def test_get_missing_endpoint_attributes():
conn = boto.connect_sns()
conn.get_endpoint_attributes.when.called_with("a-fake-arn").should.throw(BotoServerError)
@mock_sns
def test_set_endpoint_attributes():
conn = boto.connect_sns()
platform_application = conn.create_platform_application(
name="my-application",
platform="APNS",
)
application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn']
endpoint = conn.create_platform_endpoint(
platform_application_arn=application_arn,
token="some_unique_id",
custom_user_data="some user data",
attributes={
"Enabled": False,
"CustomUserData": "some data",
},
)
endpoint_arn = endpoint['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn']
conn.set_endpoint_attributes(endpoint_arn,
{"CustomUserData": "other data"}
)
attributes = conn.get_endpoint_attributes(endpoint_arn)['GetEndpointAttributesResponse']['GetEndpointAttributesResult']['Attributes']
attributes.should.equal({
"Enabled": 'False',
"CustomUserData": "other data",
})
@mock_sns
def test_publish_to_platform_endpoint():
conn = boto.connect_sns()
platform_application = conn.create_platform_application(
name="my-application",
platform="APNS",
)
application_arn = platform_application['CreatePlatformApplicationResponse']['CreatePlatformApplicationResult']['PlatformApplicationArn']
endpoint = conn.create_platform_endpoint(
platform_application_arn=application_arn,
token="some_unique_id",
custom_user_data="some user data",
attributes={
"Enabled": False,
},
)
endpoint_arn = endpoint['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn']
conn.publish(message="some message", message_structure="json", target_arn=endpoint_arn)
| apache-2.0 | -804,020,911,579,898,600 | 35.457143 | 162 | 0.701747 | false |
cathyyul/sumo-0.18 | tests/complex/traci/pythonApi/lane/runner.py | 1 | 2329 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, subprocess, sys
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), "..", "..", "..", "..", "..", "tools"))
import traci, sumolib
sumoBinary = sumolib.checkBinary('sumo')
sumoProcess = subprocess.Popen("%s -c sumo.sumocfg" % (sumoBinary), shell=True, stdout=sys.stdout)
traci.init(8813)
for step in range(3):
print "step", step
traci.simulationStep()
print "lanes", traci.lane.getIDList()
laneID = "2fi_0"
print "examining", laneID
print "length", traci.lane.getLength(laneID)
print "maxSpeed", traci.lane.getMaxSpeed(laneID)
print "width", traci.lane.getWidth(laneID)
print "allowed", traci.lane.getAllowed(laneID)
print "disallowed", traci.lane.getDisallowed(laneID)
print "linkNum", traci.lane.getLinkNumber(laneID)
print "links", traci.lane.getLinks(laneID)
print "shape", traci.lane.getShape(laneID)
print "edge", traci.lane.getEdgeID(laneID)
print "CO2", traci.lane.getCO2Emission(laneID)
print "CO", traci.lane.getCOEmission(laneID)
print "HC", traci.lane.getHCEmission(laneID)
print "PMx", traci.lane.getPMxEmission(laneID)
print "NOx", traci.lane.getNOxEmission(laneID)
print "Fuel", traci.lane.getFuelConsumption(laneID)
print "Noise", traci.lane.getNoiseEmission(laneID)
print "meanSpeed", traci.lane.getLastStepMeanSpeed(laneID)
print "occupancy", traci.lane.getLastStepOccupancy(laneID)
print "lastLength", traci.lane.getLastStepLength(laneID)
print "traveltime", traci.lane.getTraveltime(laneID)
print "numVeh", traci.lane.getLastStepVehicleNumber(laneID)
print "haltVeh", traci.lane.getLastStepHaltingNumber(laneID)
print "vehIds", traci.lane.getLastStepVehicleIDs(laneID)
traci.lane.setAllowed(laneID, ["taxi"])
print "after setAllowed", traci.lane.getAllowed(laneID), traci.lane.getDisallowed(laneID)
traci.lane.setDisallowed(laneID, ["bus"])
print "after setDisallowed", traci.lane.getAllowed(laneID), traci.lane.getDisallowed(laneID)
traci.lane.setMaxSpeed(laneID, 42.)
print "after setMaxSpeed", traci.lane.getMaxSpeed(laneID)
traci.lane.setLength(laneID, 123.)
print "after setLength", traci.lane.getLength(laneID)
traci.lane.subscribe(laneID)
print traci.lane.getSubscriptionResults(laneID)
for step in range(3,6):
print "step", step
traci.simulationStep()
print traci.lane.getSubscriptionResults(laneID)
traci.close()
| gpl-3.0 | -4,408,629,877,880,681,000 | 40.589286 | 98 | 0.765135 | false |
dipspb/ardupilot | mk/VRBRAIN/Tools/genmsg/src/genmsg/msg_loader.py | 51 | 20963 | # Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
"""
Loader for messages and :class:`MsgContext` that assumes a
dictionary-based search path scheme (keys are the package/namespace,
values are the paths). Compatible with ROS package system and other
possible layouts.
"""
import os
import sys
try:
from cStringIO import StringIO # Python 2.x
except ImportError:
from io import StringIO # Python 3.x
from . base import InvalidMsgSpec, log, SEP, COMMENTCHAR, CONSTCHAR, IODELIM, EXT_MSG, EXT_SRV
from . msgs import MsgSpec, TIME, TIME_MSG, DURATION, DURATION_MSG, HEADER, HEADER_FULL_NAME, \
is_builtin, is_valid_msg_field_name, is_valid_msg_type, bare_msg_type, is_valid_constant_type, \
Field, Constant, resolve_type
from . names import normalize_package_context, package_resource_name
from . srvs import SrvSpec
class MsgNotFound(Exception):
pass
def get_msg_file(package, base_type, search_path, ext=EXT_MSG):
"""
Determine the file system path for the specified ``.msg`` on
*search_path*.
:param package: name of package file is in, ``str``
:param base_type: type name of message, e.g. 'Point2DFloat32', ``str``
:param search_path: dictionary mapping message namespaces to a directory locations
:param ext: msg file extension. Override with EXT_SRV to search for services instead.
:returns: filesystem path of requested file, ``str``
:raises: :exc:`MsgNotFound` If message cannot be located.
"""
log("msg_file(%s, %s, %s)" % (package, base_type, str(search_path)))
if not isinstance(search_path, dict):
raise ValueError("search_path must be a dictionary of {namespace: dirpath}")
if not package in search_path:
raise MsgNotFound("Cannot locate message [%s]: unknown package [%s] on search path [%s]" \
% (base_type, package, search_path))
else:
for path_tmp in search_path[package]:
path = os.path.join(path_tmp, "%s%s"%(base_type, ext))
if os.path.isfile(path):
return path
raise MsgNotFound("Cannot locate message [%s] in package [%s] with paths [%s]"%
(base_type, package, str(search_path[package])))
def get_srv_file(package, base_type, search_path):
"""
Determine the file system path for the specified .srv on path.
:param package: name of package ``.srv`` file is in, ``str``
:param base_type: type name of service, e.g. 'Empty', ``str``
:param search_path: dictionary mapping message namespaces to a directory locations
:returns: file path of ``.srv`` file in specified package, ``str``
:raises: :exc:`MsgNotFound` If service file cannot be located.
"""
return get_msg_file(package, base_type, search_path, ext=EXT_SRV)
def load_msg_by_type(msg_context, msg_type, search_path):
"""
Load message specification for specified type.
NOTE: this will register the message in the *msg_context*.
:param msg_context: :class:`MsgContext` for finding loaded dependencies
:param msg_type: relative or full message type.
:param search_path: dictionary mapping message namespaces to a directory locations
:returns: :class:`MsgSpec` instance, ``(str, MsgSpec)``
:raises: :exc:`MsgNotFound` If message cannot be located.
"""
log("load_msg_by_type(%s, %s)" % (msg_type, str(search_path)))
if not isinstance(search_path, dict):
raise ValueError("search_path must be a dictionary of {namespace: dirpath}")
if msg_type == HEADER:
msg_type = HEADER_FULL_NAME
package_name, base_type = package_resource_name(msg_type)
file_path = get_msg_file(package_name, base_type, search_path)
log("file_path", file_path)
spec = load_msg_from_file(msg_context, file_path, msg_type)
msg_context.set_file(msg_type, file_path)
return spec
def load_srv_by_type(msg_context, srv_type, search_path):
"""
Load service specification for specified type.
NOTE: services are *never* registered in a :class:`MsgContext`.
:param msg_context: :class:`MsgContext` for finding loaded dependencies
:param srv_type: relative or full message type.
:param search_path: dictionary mapping message namespaces to a directory locations
:returns: :class:`MsgSpec` instance, ``(str, MsgSpec)``
:raises: :exc:`MsgNotFound` If message cannot be located.
"""
log("load_srv_by_type(%s, %s)" % (srv_type, str(search_path)))
if not isinstance(search_path, dict):
raise ValueError("search_path must be a dictionary of {namespace: dirpath}")
package_name, base_type = package_resource_name(srv_type)
file_path = get_srv_file(package_name, base_type, search_path)
log("file_path", file_path)
return load_srv_from_file(msg_context, file_path, srv_type)
def convert_constant_value(field_type, val):
"""
Convert constant value declaration to python value. Does not do
type-checking, so ValueError or other exceptions may be raised.
:param field_type: ROS field type, ``str``
:param val: string representation of constant, ``str``
:raises: :exc:`ValueError` If unable to convert to python representation
:raises: :exc:`InvalidMsgSpec` If value exceeds specified integer width
"""
if field_type in ['float32','float64']:
return float(val)
elif field_type in ['string']:
return val.strip() #string constants are always stripped
elif field_type in ['int8', 'uint8', 'int16','uint16','int32','uint32','int64','uint64', 'char', 'byte']:
# bounds checking
bits = [('int8', 8), ('uint8', 8), ('int16', 16),('uint16', 16),\
('int32', 32),('uint32', 32), ('int64', 64),('uint64', 64),\
('byte', 8), ('char', 8)]
b = [b for t, b in bits if t == field_type][0]
import math
if field_type[0] == 'u' or field_type == 'char':
lower = 0
upper = int(math.pow(2, b)-1)
else:
upper = int(math.pow(2, b-1)-1)
lower = -upper - 1 #two's complement min
val = int(val) #python will autocast to long if necessary
if val > upper or val < lower:
raise InvalidMsgSpec("cannot coerce [%s] to %s (out of bounds)"%(val, field_type))
return val
elif field_type == 'bool':
# TODO: need to nail down constant spec for bool
return True if eval(val) else False
raise InvalidMsgSpec("invalid constant type: [%s]"%field_type)
def _load_constant_line(orig_line):
"""
:raises: :exc:`InvalidMsgSpec`
"""
clean_line = _strip_comments(orig_line)
line_splits = [s for s in [x.strip() for x in clean_line.split(" ")] if s] #split type/name, filter out empties
field_type = line_splits[0]
if not is_valid_constant_type(field_type):
raise InvalidMsgSpec("%s is not a legal constant type"%field_type)
if field_type == 'string':
# strings contain anything to the right of the equals sign, there are no comments allowed
idx = orig_line.find(CONSTCHAR)
name = orig_line[orig_line.find(' ')+1:idx]
val = orig_line[idx+1:]
else:
line_splits = [x.strip() for x in ' '.join(line_splits[1:]).split(CONSTCHAR)] #resplit on '='
if len(line_splits) != 2:
raise InvalidMsgSpec("Invalid constant declaration: %s"%l)
name = line_splits[0]
val = line_splits[1]
try:
val_converted = convert_constant_value(field_type, val)
except Exception as e:
raise InvalidMsgSpec("Invalid constant value: %s"%e)
return Constant(field_type, name, val_converted, val.strip())
def _load_field_line(orig_line, package_context):
"""
:returns: (field_type, name) tuple, ``(str, str)``
:raises: :exc:`InvalidMsgSpec`
"""
#log("_load_field_line", orig_line, package_context)
clean_line = _strip_comments(orig_line)
line_splits = [s for s in [x.strip() for x in clean_line.split(" ")] if s] #split type/name, filter out empties
if len(line_splits) != 2:
raise InvalidMsgSpec("Invalid declaration: %s"%(orig_line))
field_type, name = line_splits
if not is_valid_msg_field_name(name):
raise InvalidMsgSpec("%s is not a legal message field name"%name)
if not is_valid_msg_type(field_type):
raise InvalidMsgSpec("%s is not a legal message field type"%field_type)
if package_context and not SEP in field_type:
if field_type == HEADER:
field_type = HEADER_FULL_NAME
elif not is_builtin(bare_msg_type(field_type)):
field_type = "%s/%s"%(package_context, field_type)
elif field_type == HEADER:
field_type = HEADER_FULL_NAME
return field_type, name
def _strip_comments(line):
return line.split(COMMENTCHAR)[0].strip() #strip comments
def load_msg_from_string(msg_context, text, full_name):
"""
Load message specification from a string.
NOTE: this will register the message in the *msg_context*.
:param msg_context: :class:`MsgContext` for finding loaded dependencies
:param text: .msg text , ``str``
:returns: :class:`MsgSpec` specification
:raises: :exc:`InvalidMsgSpec` If syntax errors or other problems are detected in file
"""
log("load_msg_from_string", full_name)
package_name, short_name = package_resource_name(full_name)
types = []
names = []
constants = []
for orig_line in text.split('\n'):
clean_line = _strip_comments(orig_line)
if not clean_line:
continue #ignore empty lines
if CONSTCHAR in clean_line:
constants.append(_load_constant_line(orig_line))
else:
field_type, name = _load_field_line(orig_line, package_name)
types.append(field_type)
names.append(name)
spec = MsgSpec(types, names, constants, text, full_name, package_name)
msg_context.register(full_name, spec)
return spec
def load_msg_from_file(msg_context, file_path, full_name):
"""
Convert the .msg representation in the file to a :class:`MsgSpec` instance.
NOTE: this will register the message in the *msg_context*.
:param file_path: path of file to load from, ``str``
:returns: :class:`MsgSpec` instance
:raises: :exc:`InvalidMsgSpec`: if syntax errors or other problems are detected in file
"""
log("Load spec from", file_path)
with open(file_path, 'r') as f:
text = f.read()
try:
return load_msg_from_string(msg_context, text, full_name)
except InvalidMsgSpec as e:
raise InvalidMsgSpec('%s: %s'%(file_path, e))
def load_msg_depends(msg_context, spec, search_path):
"""
Add the list of message types that spec depends on to depends.
:param msg_context: :class:`MsgContext` instance to load dependencies into/from.
:param spec: message to compute dependencies for, :class:`MsgSpec`/:class:`SrvSpec`
:param search_path: dictionary mapping message namespaces to a directory locations
:param deps: for recursion use only, do not set
:returns: list of dependency names, ``[str]``
:raises: :exc:`MsgNotFound` If dependency cannot be located.
"""
package_context = spec.package
log("load_msg_depends <spec>", spec.full_name, package_context)
depends = []
# Iterate over each field, loading as necessary
for unresolved_type in spec.types:
bare_type = bare_msg_type(unresolved_type)
resolved_type = resolve_type(bare_type, package_context)
if is_builtin(resolved_type):
continue
# Retrieve the MsgSpec instance of the field
if msg_context.is_registered(resolved_type):
depspec = msg_context.get_registered(resolved_type)
else:
# load and register on demand
depspec = load_msg_by_type(msg_context, resolved_type, search_path)
msg_context.register(resolved_type, depspec)
# Update dependencies
depends.append(resolved_type)
# - check to see if we have compute dependencies of field
dep_dependencies = msg_context.get_depends(resolved_type)
if dep_dependencies is None:
load_msg_depends(msg_context, depspec, search_path)
assert spec.full_name, "MsgSpec must have a properly set full name"
msg_context.set_depends(spec.full_name, depends)
# have to copy array in order to prevent inadvertent mutation (we've stored this list in set_dependencies)
return depends[:]
def load_depends(msg_context, spec, msg_search_path):
"""
Compute dependencies of *spec* and load their MsgSpec dependencies
into *msg_context*.
NOTE: *msg_search_path* is only for finding .msg files. ``.srv``
files have a separate and distinct search path. As services
cannot depend on other services, it is not necessary to provide
the srv search path here.
:param msg_context: :class:`MsgContext` instance to load dependencies into/from.
:param spec: :class:`MsgSpec` or :class:`SrvSpec` instance to load dependencies for.
:param msg_search_path: dictionary mapping message namespaces to a directory locations.
:raises: :exc:`MsgNotFound` If dependency cannot be located.
"""
if isinstance(spec, MsgSpec):
return load_msg_depends(msg_context, spec, msg_search_path)
elif isinstance(spec, SrvSpec):
depends = load_msg_depends(msg_context, spec.request, msg_search_path)
depends.extend(load_msg_depends(msg_context, spec.response, msg_search_path))
return depends
else:
raise ValueError("spec does not appear to be a message or service")
class MsgContext(object):
"""
Context object for storing :class:`MsgSpec` instances and related
metadata.
NOTE: All APIs work on :class:`MsgSpec` instance information.
Thus, for services, there is information for the request and
response messages, but there is no direct information about the
:class:`SrvSpec` instance.
"""
def __init__(self):
self._registered_packages = {}
self._files = {}
self._dependencies = {}
def set_file(self, full_msg_type, file_path):
self._files[full_msg_type] = file_path
def get_file(self, full_msg_type):
return self._files.get(full_msg_type, None)
def set_depends(self, full_msg_type, dependencies):
"""
:param dependencies: direct first order
dependencies for *full_msg_type*
"""
log("set_depends", full_msg_type, dependencies)
self._dependencies[full_msg_type] = dependencies
def get_depends(self, full_msg_type):
"""
:returns: List of dependencies for *full_msg_type*,
only first order dependencies
"""
return self._dependencies.get(full_msg_type, None)
def get_all_depends(self, full_msg_type):
all_deps = []
depends = self.get_depends(full_msg_type)
if depends is None:
raise KeyError(full_msg_type)
for d in depends:
all_deps.extend([d])
all_deps.extend(self.get_all_depends(d))
return all_deps
@staticmethod
def create_default():
msg_context = MsgContext()
# register builtins (needed for serialization). builtins have no package.
load_msg_from_string(msg_context, TIME_MSG, TIME)
load_msg_from_string(msg_context, DURATION_MSG, DURATION)
return msg_context
def register(self, full_msg_type, msgspec):
full_msg_type = bare_msg_type(full_msg_type)
package, base_type = package_resource_name(full_msg_type)
if package not in self._registered_packages:
self._registered_packages[package] = {}
self._registered_packages[package][base_type] = msgspec
def is_registered(self, full_msg_type):
"""
:param full_msg_type: Fully resolve message type
:param default_package: default package namespace to resolve
in. May be ignored by special types (e.g. time/duration).
:returns: ``True`` if :class:`MsgSpec` instance has been loaded for the requested type.
"""
full_msg_type = bare_msg_type(full_msg_type)
package, base_type = package_resource_name(full_msg_type)
if package in self._registered_packages:
return base_type in self._registered_packages[package]
else:
return False
def get_registered(self, full_msg_type):
"""
:raises: :exc:`KeyError` If not registered
"""
full_msg_type = bare_msg_type(full_msg_type)
if self.is_registered(full_msg_type):
package, base_type = package_resource_name(full_msg_type)
return self._registered_packages[package][base_type]
else:
raise KeyError(full_msg_type)
def __str__(self):
return str(self._registered_packages)
def load_srv_from_string(msg_context, text, full_name):
"""
Load :class:`SrvSpec` from the .srv file.
:param msg_context: :class:`MsgContext` instance to load request/response messages into.
:param text: .msg text , ``str``
:param package_name: context to use for msg type name, i.e. the package name,
or '' to use local naming convention. ``str``
:returns: :class:`SrvSpec` instance
:raises :exc:`InvalidMsgSpec` If syntax errors or other problems are detected in file
"""
text_in = StringIO()
text_out = StringIO()
accum = text_in
for l in text.split('\n'):
l = l.split(COMMENTCHAR)[0].strip() #strip comments
if l.startswith(IODELIM): #lenient, by request
accum = text_out
else:
accum.write(l+'\n')
# create separate MsgSpec objects for each half of file
msg_in = load_msg_from_string(msg_context, text_in.getvalue(), '%sRequest'%(full_name))
msg_out = load_msg_from_string(msg_context, text_out.getvalue(), '%sResponse'%(full_name))
return SrvSpec(msg_in, msg_out, text, full_name)
def load_srv_from_file(msg_context, file_path, full_name):
"""
Convert the .srv representation in the file to a :class:`SrvSpec` instance.
:param msg_context: :class:`MsgContext` instance to load request/response messages into.
:param file_name: name of file to load from, ``str``
:returns: :class:`SrvSpec` instance
:raise: :exc:`InvalidMsgSpec` If syntax errors or other problems are detected in file
"""
log("Load spec from %s %s\n"%(file_path, full_name))
with open(file_path, 'r') as f:
text = f.read()
spec = load_srv_from_string(msg_context, text, full_name)
msg_context.set_file('%sRequest'%(full_name), file_path)
msg_context.set_file('%sResponse'%(full_name), file_path)
return spec
| gpl-3.0 | 2,734,824,519,041,150,000 | 41.311983 | 115 | 0.641416 | false |
jkandasa/integration_tests | artifactor/plugins/filedump.py | 6 | 4156 | """ FileDump plugin for Artifactor
Add a stanza to the artifactor config like this,
artifactor:
log_dir: /home/username/outdir
per_run: test #test, run, None
overwrite: True
plugins:
filedump:
enabled: True
plugin: filedump
"""
from artifactor import ArtifactorBasePlugin
import base64
import os
import re
from cfme.utils import normalize_text, safe_string
class Filedump(ArtifactorBasePlugin):
def plugin_initialize(self):
self.register_plugin_hook('filedump', self.filedump)
self.register_plugin_hook('sanitize', self.sanitize)
self.register_plugin_hook('pre_start_test', self.start_test)
self.register_plugin_hook('finish_test', self.finish_test)
def configure(self):
self.configured = True
def start_test(self, artifact_path, test_name, test_location, slaveid):
if not slaveid:
slaveid = "Master"
self.store[slaveid] = {
"artifact_path": artifact_path,
"test_name": test_name,
"test_location": test_location
}
def finish_test(self, artifact_path, test_name, test_location, slaveid):
if not slaveid:
slaveid = "Master"
@ArtifactorBasePlugin.check_configured
def filedump(self, description, contents, slaveid=None, mode="w", contents_base64=False,
display_type="primary", display_glyph=None, file_type=None,
dont_write=False, os_filename=None, group_id=None, test_name=None,
test_location=None):
if not slaveid:
slaveid = "Master"
test_ident = "{}/{}".format(self.store[slaveid]['test_location'],
self.store[slaveid]['test_name'])
artifacts = []
if os_filename is None:
safe_name = re.sub(r"\s+", "_", normalize_text(safe_string(description)))
os_filename = self.ident + "-" + safe_name
os_filename = os.path.join(self.store[slaveid]['artifact_path'], os_filename)
if file_type is not None and "screenshot" in file_type:
os_filename = os_filename + ".png"
elif file_type is not None and (
"_tb" in file_type or "traceback" in file_type or file_type == "log"):
os_filename = os_filename + ".log"
elif file_type is not None and file_type == "html":
os_filename = os_filename + ".html"
elif file_type is not None and file_type == "video":
os_filename = os_filename + ".ogv"
else:
os_filename = os_filename + ".txt"
artifacts.append({
"file_type": file_type,
"display_type": display_type,
"display_glyph": display_glyph,
"description": description,
"os_filename": os_filename,
"group_id": group_id,
})
if not dont_write:
if os.path.isfile(os_filename):
os.remove(os_filename)
with open(os_filename, mode) as f:
if contents_base64:
contents = base64.b64decode(contents)
f.write(contents)
return None, {'artifacts': {test_ident: {'files': artifacts}}}
@ArtifactorBasePlugin.check_configured
def sanitize(self, test_location, test_name, artifacts, words):
test_ident = "{}/{}".format(test_location, test_name)
filename = None
try:
for f in artifacts[test_ident]['files']:
if f["file_type"] not in {
"traceback", "short_tb", "rbac", "soft_traceback",
"soft_short_tb"}:
continue
filename = f["os_filename"]
with open(filename) as f:
data = f.read()
for word in words:
if not isinstance(word, basestring):
word = str(word)
data = data.replace(word, "*" * len(word))
with open(filename, "w") as f:
f.write(data)
except KeyError:
pass
| gpl-2.0 | -8,823,760,158,629,757,000 | 37.12844 | 92 | 0.551011 | false |
ehealthafrica-ci/onadata | onadata/apps/viewer/management/commands/sync_mongo.py | 8 | 1981 | #!/usr/bin/env python
from optparse import make_option
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from django.utils.translation import ugettext_lazy
from onadata.apps.logger.models import XForm
from onadata.libs.utils.logger_tools import mongo_sync_status
class Command(BaseCommand):
args = '[username] [id_string]'
help = ugettext_lazy("Check the count of submissions in sqlite vs the "
"mongo db per form and optionally run remongo.")
option_list = BaseCommand.option_list + (make_option(
'-r', '--remongo',
action='store_true',
dest='remongo',
default=False,
help=ugettext_lazy("Whether to run remongo on the found set.")),
make_option(
'-a', '--all', action='store_true', dest='update_all',
default=False, help=ugettext_lazy(
"Update all instances for the selected "
"form(s), including existing ones. "
"Will delete and re-create mongo records. "
"Only makes sense when used with the -r option")))
def handle(self, *args, **kwargs):
user = xform = None
if len(args) > 0:
username = args[0]
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError("User %s does not exist" % username)
if len(args) > 1:
id_string = args[1]
try:
xform = XForm.objects.get(user=user, id_string=id_string)
except XForm.DoesNotExist:
raise CommandError("Xform %s does not exist for user %s" %
(id_string, user.username))
remongo = kwargs["remongo"]
update_all = kwargs["update_all"]
report_string = mongo_sync_status(remongo, update_all, user, xform)
self.stdout.write(report_string)
| bsd-2-clause | -3,070,710,025,928,482,300 | 38.62 | 75 | 0.594144 | false |
BeDjango/intef-openedx | cms/djangoapps/contentstore/utils.py | 22 | 15660 | """
Common utility functions useful throughout the contentstore
"""
import logging
import re
from datetime import datetime
from pytz import UTC
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django_comment_common.models import assign_default_role
from django_comment_common.utils import seed_permissions_roles
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from opaque_keys.edx.keys import UsageKey, CourseKey
from student.roles import CourseInstructorRole, CourseStaffRole
from student.models import CourseEnrollment
from student import auth
log = logging.getLogger(__name__)
def add_instructor(course_key, requesting_user, new_instructor):
"""
Adds given user as instructor and staff to the given course,
after verifying that the requesting_user has permission to do so.
"""
# can't use auth.add_users here b/c it requires user to already have Instructor perms in this course
CourseInstructorRole(course_key).add_users(new_instructor)
auth.add_users(requesting_user, CourseStaffRole(course_key), new_instructor)
def initialize_permissions(course_key, user_who_created_course):
"""
Initializes a new course by enrolling the course creator as a student,
and initializing Forum by seeding its permissions and assigning default roles.
"""
# seed the forums
seed_permissions_roles(course_key)
# auto-enroll the course creator in the course so that "View Live" will work.
CourseEnrollment.enroll(user_who_created_course, course_key)
# set default forum roles (assign 'Student' role)
assign_default_role(course_key, user_who_created_course)
def remove_all_instructors(course_key):
"""
Removes all instructor and staff users from the given course.
"""
staff_role = CourseStaffRole(course_key)
staff_role.remove_users(*staff_role.users_with_role())
instructor_role = CourseInstructorRole(course_key)
instructor_role.remove_users(*instructor_role.users_with_role())
def delete_course_and_groups(course_key, user_id):
"""
This deletes the courseware associated with a course_key as well as cleaning update_item
the various user table stuff (groups, permissions, etc.)
"""
module_store = modulestore()
with module_store.bulk_operations(course_key):
module_store.delete_course(course_key, user_id)
print 'removing User permissions from course....'
# in the django layer, we need to remove all the user permissions groups associated with this course
try:
remove_all_instructors(course_key)
except Exception as err:
log.error("Error in deleting course groups for {0}: {1}".format(course_key, err))
def get_lms_link_for_item(location, preview=False):
"""
Returns an LMS link to the course with a jump_to to the provided location.
:param location: the location to jump to
:param preview: True if the preview version of LMS should be returned. Default value is false.
"""
assert isinstance(location, UsageKey)
if settings.LMS_BASE is None:
return None
if preview:
lms_base = settings.FEATURES.get('PREVIEW_LMS_BASE')
else:
lms_base = settings.LMS_BASE
return u"//{lms_base}/courses/{course_key}/jump_to/{location}".format(
lms_base=lms_base,
course_key=location.course_key.to_deprecated_string(),
location=location.to_deprecated_string(),
)
def get_lms_link_for_about_page(course_key):
"""
Returns the url to the course about page from the location tuple.
"""
assert isinstance(course_key, CourseKey)
if settings.FEATURES.get('ENABLE_MKTG_SITE', False):
if not hasattr(settings, 'MKTG_URLS'):
log.exception("ENABLE_MKTG_SITE is True, but MKTG_URLS is not defined.")
return None
marketing_urls = settings.MKTG_URLS
# Root will be "https://www.edx.org". The complete URL will still not be exactly correct,
# but redirects exist from www.edx.org to get to the Drupal course about page URL.
about_base = marketing_urls.get('ROOT', None)
if about_base is None:
log.exception('There is no ROOT defined in MKTG_URLS')
return None
# Strip off https:// (or http://) to be consistent with the formatting of LMS_BASE.
about_base = re.sub(r"^https?://", "", about_base)
elif settings.LMS_BASE is not None:
about_base = settings.LMS_BASE
else:
return None
return u"//{about_base_url}/courses/{course_key}/about".format(
about_base_url=about_base,
course_key=course_key.to_deprecated_string()
)
# pylint: disable=invalid-name
def get_lms_link_for_certificate_web_view(user_id, course_key, mode):
"""
Returns the url to the certificate web view.
"""
assert isinstance(course_key, CourseKey)
if settings.LMS_BASE is None:
return None
return u"//{certificate_web_base}/certificates/user/{user_id}/course/{course_id}?preview={mode}".format(
certificate_web_base=settings.LMS_BASE,
user_id=user_id,
course_id=unicode(course_key),
mode=mode
)
# pylint: disable=invalid-name
def is_currently_visible_to_students(xblock):
"""
Returns true if there is a published version of the xblock that is currently visible to students.
This means that it has a release date in the past, and the xblock has not been set to staff only.
"""
try:
published = modulestore().get_item(xblock.location, revision=ModuleStoreEnum.RevisionOption.published_only)
# If there's no published version then the xblock is clearly not visible
except ItemNotFoundError:
return False
# If visible_to_staff_only is True, this xblock is not visible to students regardless of start date.
if published.visible_to_staff_only:
return False
# Check start date
if 'detached' not in published._class_tags and published.start is not None:
return datetime.now(UTC) > published.start
# No start date, so it's always visible
return True
def has_children_visible_to_specific_content_groups(xblock):
"""
Returns True if this xblock has children that are limited to specific content groups.
Note that this method is not recursive (it does not check grandchildren).
"""
if not xblock.has_children:
return False
for child in xblock.get_children():
if is_visible_to_specific_content_groups(child):
return True
return False
def is_visible_to_specific_content_groups(xblock):
"""
Returns True if this xblock has visibility limited to specific content groups.
"""
if not xblock.group_access:
return False
for partition in get_user_partition_info(xblock):
if any(g["selected"] for g in partition["groups"]):
return True
return False
def find_release_date_source(xblock):
"""
Finds the ancestor of xblock that set its release date.
"""
# Stop searching at the section level
if xblock.category == 'chapter':
return xblock
parent_location = modulestore().get_parent_location(xblock.location,
revision=ModuleStoreEnum.RevisionOption.draft_preferred)
# Orphaned xblocks set their own release date
if not parent_location:
return xblock
parent = modulestore().get_item(parent_location)
if parent.start != xblock.start:
return xblock
else:
return find_release_date_source(parent)
def find_staff_lock_source(xblock):
"""
Returns the xblock responsible for setting this xblock's staff lock, or None if the xblock is not staff locked.
If this xblock is explicitly locked, return it, otherwise find the ancestor which sets this xblock's staff lock.
"""
# Stop searching if this xblock has explicitly set its own staff lock
if xblock.fields['visible_to_staff_only'].is_set_on(xblock):
return xblock
# Stop searching at the section level
if xblock.category == 'chapter':
return None
parent_location = modulestore().get_parent_location(xblock.location,
revision=ModuleStoreEnum.RevisionOption.draft_preferred)
# Orphaned xblocks set their own staff lock
if not parent_location:
return None
parent = modulestore().get_item(parent_location)
return find_staff_lock_source(parent)
def ancestor_has_staff_lock(xblock, parent_xblock=None):
"""
Returns True iff one of xblock's ancestors has staff lock.
Can avoid mongo query by passing in parent_xblock.
"""
if parent_xblock is None:
parent_location = modulestore().get_parent_location(xblock.location,
revision=ModuleStoreEnum.RevisionOption.draft_preferred)
if not parent_location:
return False
parent_xblock = modulestore().get_item(parent_location)
return parent_xblock.visible_to_staff_only
def reverse_url(handler_name, key_name=None, key_value=None, kwargs=None):
"""
Creates the URL for the given handler.
The optional key_name and key_value are passed in as kwargs to the handler.
"""
kwargs_for_reverse = {key_name: unicode(key_value)} if key_name else None
if kwargs:
kwargs_for_reverse.update(kwargs)
return reverse('contentstore.views.' + handler_name, kwargs=kwargs_for_reverse)
def reverse_course_url(handler_name, course_key, kwargs=None):
"""
Creates the URL for handlers that use course_keys as URL parameters.
"""
return reverse_url(handler_name, 'course_key_string', course_key, kwargs)
def reverse_library_url(handler_name, library_key, kwargs=None):
"""
Creates the URL for handlers that use library_keys as URL parameters.
"""
return reverse_url(handler_name, 'library_key_string', library_key, kwargs)
def reverse_usage_url(handler_name, usage_key, kwargs=None):
"""
Creates the URL for handlers that use usage_keys as URL parameters.
"""
return reverse_url(handler_name, 'usage_key_string', usage_key, kwargs)
def get_user_partition_info(xblock, schemes=None, course=None):
"""
Retrieve user partition information for an XBlock for display in editors.
* If a partition has been disabled, it will be excluded from the results.
* If a group within a partition is referenced by the XBlock, but the group has been deleted,
the group will be marked as deleted in the results.
Arguments:
xblock (XBlock): The courseware component being edited.
Keyword Arguments:
schemes (iterable of str): If provided, filter partitions to include only
schemes with the provided names.
course (XBlock): The course descriptor. If provided, uses this to look up the user partitions
instead of loading the course. This is useful if we're calling this function multiple
times for the same course want to minimize queries to the modulestore.
Returns: list
Example Usage:
>>> get_user_partition_info(block, schemes=["cohort", "verification"])
[
{
"id": 12345,
"name": "Cohorts"
"scheme": "cohort",
"groups": [
{
"id": 7890,
"name": "Foo",
"selected": True,
"deleted": False,
}
]
},
{
"id": 7292,
"name": "Midterm A",
"scheme": "verification",
"groups": [
{
"id": 1,
"name": "Completed verification at Midterm A",
"selected": False,
"deleted": False
},
{
"id": 0,
"name": "Did not complete verification at Midterm A",
"selected": False,
"deleted": False,
}
]
}
]
"""
course = course or modulestore().get_course(xblock.location.course_key)
if course is None:
log.warning(
"Could not find course %s to retrieve user partition information",
xblock.location.course_key
)
return []
if schemes is not None:
schemes = set(schemes)
partitions = []
for p in sorted(course.user_partitions, key=lambda p: p.name):
# Exclude disabled partitions, partitions with no groups defined
# Also filter by scheme name if there's a filter defined.
if p.active and p.groups and (schemes is None or p.scheme.name in schemes):
# First, add groups defined by the partition
groups = []
for g in p.groups:
# Falsey group access for a partition mean that all groups
# are selected. In the UI, though, we don't show the particular
# groups selected, since there's a separate option for "all users".
selected_groups = set(xblock.group_access.get(p.id, []) or [])
groups.append({
"id": g.id,
"name": g.name,
"selected": g.id in selected_groups,
"deleted": False,
})
# Next, add any groups set on the XBlock that have been deleted
all_groups = set(g.id for g in p.groups)
missing_group_ids = selected_groups - all_groups
for gid in missing_group_ids:
groups.append({
"id": gid,
"name": _("Deleted group"),
"selected": True,
"deleted": True,
})
# Put together the entire partition dictionary
partitions.append({
"id": p.id,
"name": p.name,
"scheme": p.scheme.name,
"groups": groups,
})
return partitions
def get_visibility_partition_info(xblock):
"""
Retrieve user partition information for the component visibility editor.
This pre-processes partition information to simplify the template.
Arguments:
xblock (XBlock): The component being edited.
Returns: dict
"""
user_partitions = get_user_partition_info(xblock, schemes=["verification", "cohort"])
cohort_partitions = []
verification_partitions = []
has_selected_groups = False
selected_verified_partition_id = None
# Pre-process the partitions to make it easier to display the UI
for p in user_partitions:
has_selected = any(g["selected"] for g in p["groups"])
has_selected_groups = has_selected_groups or has_selected
if p["scheme"] == "cohort":
cohort_partitions.append(p)
elif p["scheme"] == "verification":
verification_partitions.append(p)
if has_selected:
selected_verified_partition_id = p["id"]
return {
"user_partitions": user_partitions,
"cohort_partitions": cohort_partitions,
"verification_partitions": verification_partitions,
"has_selected_groups": has_selected_groups,
"selected_verified_partition_id": selected_verified_partition_id,
}
| agpl-3.0 | -7,753,078,859,180,698,000 | 33.266958 | 116 | 0.638378 | false |
zxtstarry/src | user/karl/display1.py | 5 | 8972 | from traits.api import HasTraits, Instance, Button, Enum, Int, Float, Range
from traitsui.api import View, Item, Group
from chaco.api import HPlotContainer, Plot, ArrayPlotData, DataRange1D
from chaco.tools.api import PanTool, ZoomTool
from enable.api import ColorTrait
from enable.component_editor import ComponentEditor
from numpy import linspace, nanmin, nanmax, transpose, uint8, min, max, ones
from enable.api import BaseTool
import sys
import m8r
from chaco.default_colormaps import *
" derived from /home/karl/learn/python/chaco/button.py "
# add the unzoom button function karl kls
class SeisData(HasTraits):
def __init__(self, filename):
super(SeisData, self).__init__()
self.model=m8r.Input(filename)
self.vals = self.model[:,:,:]
self.dim=len(self.vals.shape)
self.axis_start=self.read_axis_float_info("o")
print "self.axis_start=",self.axis_start
self.axis_delta=self.read_axis_float_info("d")
print "self.axis_delta=",self.axis_delta
self.axis_end=[]
for i in range(0,self.dim):
self.axis_end.append(self.axis_start[i]+
(self.vals.shape[i]-1)*self.axis_delta[i])
print "self.axis_end=",self.axis_end
print "compute min/max"
max_n_samples=100
inc=ones(self.dim,dtype=int)
last=ones(self.dim,dtype=int)
for i in range(0,self.dim):
inc[i]=self.vals.shape[i]/100
print "self.vals.shape=", self.vals.shape ,"inc=",inc
if(inc[i]<1):
inc[i]=1
last[i]=(self.vals.shape[i]/inc[i]-1)*inc[i]
print "self.vals.shape=", self.vals.shape
print "inc=",inc,"last=",last
subsetvals=self.vals[:last[0]:inc[0],:last[1]:inc[1],:last[2]:inc[2]]
print "subsetvals.shape=",subsetvals.shape
self.minval = min(subsetvals)
print "compute max"
self.maxval = max(subsetvals)
print "min=",self.minval
print "max=",self.maxval
print "leaving m8rInput"
def read_axis_float_info(self,letter):
list_floats=[]
for i in range(1,self.dim+1):
print "get parameter",letter+"%d"%i
list_floats.append(self.model.float(letter+"%d"%i,1))
list_floats.reverse()
return tuple(list_floats)
class ContainerExample(HasTraits):
plot = Instance(HPlotContainer)
display_button = Button()
display_button1 = Button()
prev = Button()
next = Button()
unzoom = Button()
traits_view = View(
Group(Item('display_button', show_label=False),
Item('display_button1', show_label=False),
Item('prev', show_label=False),
Item('next', show_label=False),
Item('unzoom', show_label=False),
orientation="horizontal"),
Item('plot',editor=ComponentEditor(), show_label=False),
# put the info box that lists the mouse location tuple kls karl
width=1000, height=600, resizable=True, title="Chaco Plot",
# How do I activate these? buttons=["do_nothing","do_nothing_1"]
)
def __init__(self):
super(ContainerExample, self).__init__()
filenames=[]
for parameter in sys.argv[1:]:
print "processing parameter",parameter
if parameter.find("=")==-1 :
print "no = in parameter",parameter,"must be a file name"
filenames.append(parameter)
if len(filenames)<1:
print "just to help me test, if there are no files in the list, "
print "I will append the file foldplot1.rsf"
filenames.append('foldplot1.rsf')
self.seis_data_0=SeisData(filenames[0])
self.cmap = jet
self.displayParameters=DisplayParameters()
self.slice_y=self.displayParameters.slice_y
print "self.slice_y=",self.slice_y
self.arrayPlotData=ArrayPlotData()
self._update_images()
x_extents=(self.seis_data_0.axis_start[1],
self.seis_data_0.axis_end[1])
y_extents=(self.seis_data_0.axis_start[2],
self.seis_data_0.axis_end[2])
bottomplot = Plot(self.arrayPlotData, origin="top left")
self.bottomplot=bottomplot
imgplot = bottomplot.img_plot("xz",
xbounds=x_extents,
ybounds=y_extents,
colormap=self.cmap)[0]
self.bottom = imgplot
plotright = Plot(self.arrayPlotData, origin="top left",
range2d=bottomplot.range2d)
imgplotright = plotright.img_plot("xz",
xbounds=x_extents,
ybounds=y_extents,
colormap=self.cmap)[0]
self.right = imgplotright
container = HPlotContainer(fill_padding=True,
bgcolor = "white", use_backbuffer=True)
container.add(bottomplot)
container.add(plotright)
self.plot = container
self.displayParameters=DisplayParameters()
self.slice_y=self.displayParameters.slice_y
imgplot.tools.append(CustomTool(imgplot))
imgplot.tools.append(PanTool(imgplot, constrain_key="shift"))
imgplot.overlays.append(ZoomTool(component=imgplot,
tool_mode="box", always_on=False))
imgplotright.tools.append(PanTool(imgplotright, constrain_key="shift"))
imgplotright.overlays.append(ZoomTool(component=self.right,
tool_mode="box", always_on=False))
def _update_images(self):
if self.displayParameters.gain != 0:
rgain=1./self.displayParameters.gain
else:
rgain=1
print "rgain=",rgain
range = DataRange1D(low=self.seis_data_0.minval*rgain,
high=self.seis_data_0.maxval*rgain)
self.colormap = self.cmap(range)
slice=transpose(self.seis_data_0.vals[self.slice_y, :, :])
self.slice=slice
colorslice=(self.colormap.map_screen(slice) * 255).astype(uint8)
# Transposed required because img_plot() expects data in row-major order
# self.arrayPlotData.set_data("xz", colorslicexz)
self.arrayPlotData.set_data("xz", colorslice)
def _marker_size_changed(self):
self.scatter.marker_size = self.marker_size
def _color_changed(self):
self.scatter.marker_size = self.marker_size
def _display_button_fired(self):
print "Display button pushed"
self.displayParameters.edit_traits()
self._update_images()
def _prev_fired(self):
print "prev button pushed"
slice_y = self.slice_y - self.displayParameters.slice_inc
if(slice_y < 0):
slice_y = self.seis_data_0.vals.shape[0]-1
print "after decrement slice_y=",slice_y
self.slice_y=slice_y
self._update_images()
def _next_fired(self):
print "next button pushed"
slice_y = self.slice_y + self.displayParameters.slice_inc
print "shape=",self.seis_data_0.vals.shape
if(slice_y >= self.seis_data_0.vals.shape[0]):
slice_y = 0
print "after increment slice_y=",slice_y
self.slice_y=slice_y
self._update_images()
def _unzoom_fired(self):
print "unzoom button pushed"
print "self.bottomplot.range2d=",self.bottomplot.range2d
print "xmin/xmax=", \
self.bottomplot.range2d.x_range.low, \
self.bottomplot.range2d.x_range.high
print "ymin/ymax=", \
self.bottomplot.range2d.y_range.low, \
self.bottomplot.range2d.y_range.high
self.bottomplot.range2d.x_range.low=self.seis_data_0.axis_start[1]
self.bottomplot.range2d.x_range.high=self.seis_data_0.axis_end[1]
self.bottomplot.range2d.y_range.low=self.seis_data_0.axis_start[2]
self.bottomplot.range2d.y_range.high=self.seis_data_0.axis_end[2]
class DisplayParameters(HasTraits):
gain = Range(low=.0001,high=10000, value=1.0)
vaxis = Int(0)
haxis = Int(1)
slice_y = Int(63)
slice_inc = Int(10)
class CustomTool(BaseTool):
def normal_mouse_move(self, event):
# print "type event=",type(event),"Screen point:", event.x, event.y
#print "Data:", self.component.map_data((event.x, event.y))
x=int(round(self.component.x_mapper.map_data(event.x)))
y=int(round(self.component.y_mapper.map_data(event.y)))
# print "Data:", x, y #, self.slice(x,y)
#print "current_pointer_position", event.current_pointer_position
#print "scale_xy", event.scale_xy
#print "event", event
if __name__ == "__main__":
myContainerExample=ContainerExample()
myContainerExample.configure_traits()
| gpl-2.0 | -4,594,332,212,451,156,000 | 37.016949 | 80 | 0.597414 | false |
kool79/intellij-community | python/lib/Lib/site-packages/django/contrib/contenttypes/views.py | 295 | 3025 | from django import http
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site, get_current_site
from django.core.exceptions import ObjectDoesNotExist
def shortcut(request, content_type_id, object_id):
"Redirect to an object's page based on a content-type ID and an object ID."
# Look up the object, making sure it's got a get_absolute_url() function.
try:
content_type = ContentType.objects.get(pk=content_type_id)
if not content_type.model_class():
raise http.Http404("Content type %s object has no associated model" % content_type_id)
obj = content_type.get_object_for_this_type(pk=object_id)
except (ObjectDoesNotExist, ValueError):
raise http.Http404("Content type %s object %s doesn't exist" % (content_type_id, object_id))
try:
absurl = obj.get_absolute_url()
except AttributeError:
raise http.Http404("%s objects don't have get_absolute_url() methods" % content_type.name)
# Try to figure out the object's domain, so we can do a cross-site redirect
# if necessary.
# If the object actually defines a domain, we're done.
if absurl.startswith('http://') or absurl.startswith('https://'):
return http.HttpResponseRedirect(absurl)
# Otherwise, we need to introspect the object's relationships for a
# relation to the Site object
object_domain = None
if Site._meta.installed:
opts = obj._meta
# First, look for an many-to-many relationship to Site.
for field in opts.many_to_many:
if field.rel.to is Site:
try:
# Caveat: In the case of multiple related Sites, this just
# selects the *first* one, which is arbitrary.
object_domain = getattr(obj, field.name).all()[0].domain
except IndexError:
pass
if object_domain is not None:
break
# Next, look for a many-to-one relationship to Site.
if object_domain is None:
for field in obj._meta.fields:
if field.rel and field.rel.to is Site:
try:
object_domain = getattr(obj, field.name).domain
except Site.DoesNotExist:
pass
if object_domain is not None:
break
# Fall back to the current site (if possible).
if object_domain is None:
try:
object_domain = get_current_site(request).domain
except Site.DoesNotExist:
pass
# If all that malarkey found an object domain, use it. Otherwise, fall back
# to whatever get_absolute_url() returned.
if object_domain is not None:
protocol = request.is_secure() and 'https' or 'http'
return http.HttpResponseRedirect('%s://%s%s' % (protocol, object_domain, absurl))
else:
return http.HttpResponseRedirect(absurl)
| apache-2.0 | 1,950,682,146,996,222,200 | 41.605634 | 100 | 0.617521 | false |
secdev/scapy | scapy/contrib/spbm.py | 5 | 2442 | # This file is part of Scapy
# Scapy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# any later version.
#
# Scapy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Scapy. If not, see <http://www.gnu.org/licenses/>.
# IEEE 802.1aq - Shorest Path Bridging Mac-in-mac (SPBM):
# Ethernet based link state protocol that enables Layer 2 Unicast, Layer 2 Multicast, Layer 3 Unicast, and Layer 3 Multicast virtualized services # noqa: E501
# https://en.wikipedia.org/wiki/IEEE_802.1aq
# Modeled after the scapy VXLAN contribution
# scapy.contrib.description = Shorest Path Bridging Mac-in-mac (SBPM)
# scapy.contrib.status = loads
"""
Example SPB Frame Creation
Note the outer Dot1Q Ethertype marking (0x88e7)
backboneEther = Ether(dst='00:bb:00:00:90:00', src='00:bb:00:00:40:00', type=0x8100) # noqa: E501
backboneDot1Q = Dot1Q(vlan=4051,type=0x88e7)
backboneServiceID = SPBM(prio=1,isid=20011)
customerEther = Ether(dst='00:1b:4f:5e:ca:00',src='00:00:00:00:00:01',type=0x8100) # noqa: E501
customerDot1Q = Dot1Q(prio=1,vlan=11,type=0x0800)
customerIP = IP(src='10.100.11.10',dst='10.100.12.10',id=0x0629,len=106) # noqa: E501
customerUDP = UDP(sport=1024,dport=1025,chksum=0,len=86)
spb_example = backboneEther/backboneDot1Q/backboneServiceID/customerEther/customerDot1Q/customerIP/customerUDP/"Payload" # noqa: E501
"""
from scapy.packet import Packet, bind_layers
from scapy.fields import BitField, ThreeBytesField
from scapy.layers.l2 import Ether, Dot1Q, Dot1AD
class SPBM(Packet):
name = "SPBM"
fields_desc = [BitField("prio", 0, 3),
BitField("dei", 0, 1),
BitField("nca", 0, 1),
BitField("res1", 0, 1),
BitField("res2", 0, 2),
ThreeBytesField("isid", 0)]
def mysummary(self):
return self.sprintf("SPBM (isid=%SPBM.isid%")
bind_layers(Ether, SPBM, type=0x88e7)
bind_layers(Dot1Q, SPBM, type=0x88e7)
bind_layers(Dot1AD, SPBM, type=0x88e7)
bind_layers(SPBM, Ether)
| gpl-2.0 | -4,815,096,797,782,140,000 | 39.7 | 159 | 0.700246 | false |
chienlieu2017/it_management | odoo/addons/payment_paypal/controllers/main.py | 1 | 4633 | # -*- coding: utf-8 -*-
import json
import logging
import pprint
import urllib2
import werkzeug
from odoo import http
from odoo.http import request
_logger = logging.getLogger(__name__)
class PaypalController(http.Controller):
_notify_url = '/payment/paypal/ipn/'
_return_url = '/payment/paypal/dpn/'
_cancel_url = '/payment/paypal/cancel/'
def _get_return_url(self, **post):
""" Extract the return URL from the data coming from paypal. """
return_url = post.pop('return_url', '')
if not return_url:
custom = json.loads(post.pop('custom', False) or post.pop('cm', False) or '{}')
return_url = custom.get('return_url', '/')
return return_url
def _parse_pdt_response(self, response):
""" Parse a text reponse for a PDT verification .
:param response str: text response, structured in the following way:
STATUS\nkey1=value1\nkey2=value2...\n
:rtype tuple(str, dict)
:return: tuple containing the STATUS str and the key/value pairs
parsed as a dict
"""
lines = filter(None, response.split('\n'))
status = lines.pop(0)
pdt_post = dict(line.split('=', 1) for line in lines)
return status, pdt_post
def paypal_validate_data(self, **post):
""" Paypal IPN: three steps validation to ensure data correctness
- step 1: return an empty HTTP 200 response -> will be done at the end
by returning ''
- step 2: POST the complete, unaltered message back to Paypal (preceded
by cmd=_notify-validate or _notify-synch for PDT), with same encoding
- step 3: paypal send either VERIFIED or INVALID (single word) for IPN
or SUCCESS or FAIL (+ data) for PDT
Once data is validated, process it. """
res = False
new_post = dict(post, cmd='_notify-validate')
reference = post.get('item_number')
tx = None
if reference:
tx = request.env['payment.transaction'].search([('reference', '=', reference)])
paypal_urls = request.env['payment.acquirer']._get_paypal_urls(tx and tx.acquirer_id.environment or 'prod')
pdt_request = bool(new_post.get('amt')) # check for spefific pdt param
if pdt_request:
# this means we are in PDT instead of DPN like before
# fetch the PDT token
new_post['at'] = request.env['ir.config_parameter'].sudo().get_param('payment_paypal.pdt_token')
new_post['cmd'] = '_notify-synch' # command is different in PDT than IPN/DPN
validate_url = paypal_urls['paypal_form_url']
urequest = urllib2.Request(validate_url, werkzeug.url_encode(new_post))
uopen = urllib2.urlopen(urequest)
resp = uopen.read()
if pdt_request:
resp, post = self._parse_pdt_response(resp)
if resp == 'VERIFIED' or pdt_request and resp == 'SUCCESS':
_logger.info('Paypal: validated data')
res = request.env['payment.transaction'].sudo().form_feedback(post, 'paypal')
elif resp == 'INVALID' or pdt_request and resp == 'FAIL':
_logger.warning('Paypal: answered INVALID/FAIL on data verification')
else:
_logger.warning('Paypal: unrecognized paypal answer, received %s instead of VERIFIED/SUCCESS or INVALID/FAIL (validation: %s)' % (resp, 'PDT' if pdt_request else 'IPN/DPN'))
return res
@http.route('/payment/paypal/ipn/', type='http', auth='none', methods=['POST'], csrf=False)
def paypal_ipn(self, **post):
""" Paypal IPN. """
_logger.info('Beginning Paypal IPN form_feedback with post data %s', pprint.pformat(post)) # debug
self.paypal_validate_data(**post)
return ''
@http.route('/payment/paypal/dpn', type='http', auth="none", methods=['POST', 'GET'], csrf=False)
def paypal_dpn(self, **post):
""" Paypal DPN """
_logger.info('Beginning Paypal DPN form_feedback with post data %s', pprint.pformat(post)) # debug
return_url = self._get_return_url(**post)
self.paypal_validate_data(**post)
return werkzeug.utils.redirect(return_url)
@http.route('/payment/paypal/cancel', type='http', auth="none", csrf=False)
def paypal_cancel(self, **post):
""" When the user cancels its Paypal payment: GET on this route """
_logger.info('Beginning Paypal cancel with post data %s', pprint.pformat(post)) # debug
return_url = self._get_return_url(**post)
return werkzeug.utils.redirect(return_url)
| gpl-3.0 | -745,103,968,935,849,100 | 44.871287 | 185 | 0.616447 | false |
pixelgremlins/ztruck | dj/lib/python2.7/site-packages/django/contrib/sitemaps/__init__.py | 84 | 6499 | import warnings
from django.apps import apps as django_apps
from django.conf import settings
from django.core import urlresolvers, paginator
from django.core.exceptions import ImproperlyConfigured
from django.utils import translation
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.six.moves.urllib.parse import urlencode
from django.utils.six.moves.urllib.request import urlopen
PING_URL = "http://www.google.com/webmasters/tools/ping"
class SitemapNotFound(Exception):
pass
def ping_google(sitemap_url=None, ping_url=PING_URL):
"""
Alerts Google that the sitemap for the current site has been updated.
If sitemap_url is provided, it should be an absolute path to the sitemap
for this site -- e.g., '/sitemap.xml'. If sitemap_url is not provided, this
function will attempt to deduce it by using urlresolvers.reverse().
"""
if sitemap_url is None:
try:
# First, try to get the "index" sitemap URL.
sitemap_url = urlresolvers.reverse('django.contrib.sitemaps.views.index')
except urlresolvers.NoReverseMatch:
try:
# Next, try for the "global" sitemap URL.
sitemap_url = urlresolvers.reverse('django.contrib.sitemaps.views.sitemap')
except urlresolvers.NoReverseMatch:
pass
if sitemap_url is None:
raise SitemapNotFound("You didn't provide a sitemap_url, and the sitemap URL couldn't be auto-detected.")
if not django_apps.is_installed('django.contrib.sites'):
raise ImproperlyConfigured("ping_google requires django.contrib.sites, which isn't installed.")
Site = django_apps.get_model('sites.Site')
current_site = Site.objects.get_current()
url = "http://%s%s" % (current_site.domain, sitemap_url)
params = urlencode({'sitemap': url})
urlopen("%s?%s" % (ping_url, params))
class Sitemap(object):
# This limit is defined by Google. See the index documentation at
# http://sitemaps.org/protocol.php#index.
limit = 50000
# If protocol is None, the URLs in the sitemap will use the protocol
# with which the sitemap was requested.
protocol = None
def __get(self, name, obj, default=None):
try:
attr = getattr(self, name)
except AttributeError:
return default
if callable(attr):
return attr(obj)
return attr
def items(self):
return []
def location(self, obj):
return obj.get_absolute_url()
def _get_paginator(self):
return paginator.Paginator(self.items(), self.limit)
paginator = property(_get_paginator)
def get_urls(self, page=1, site=None, protocol=None):
# Determine protocol
if self.protocol is not None:
protocol = self.protocol
if protocol is None:
protocol = 'http'
# Determine domain
if site is None:
if django_apps.is_installed('django.contrib.sites'):
Site = django_apps.get_model('sites.Site')
try:
site = Site.objects.get_current()
except Site.DoesNotExist:
pass
if site is None:
raise ImproperlyConfigured(
"To use sitemaps, either enable the sites framework or pass "
"a Site/RequestSite object in your view."
)
domain = site.domain
if getattr(self, 'i18n', False):
urls = []
current_lang_code = translation.get_language()
for lang_code, lang_name in settings.LANGUAGES:
translation.activate(lang_code)
urls += self._urls(page, protocol, domain)
translation.activate(current_lang_code)
else:
urls = self._urls(page, protocol, domain)
return urls
def _urls(self, page, protocol, domain):
urls = []
latest_lastmod = None
all_items_lastmod = True # track if all items have a lastmod
for item in self.paginator.page(page).object_list:
loc = "%s://%s%s" % (protocol, domain, self.__get('location', item))
priority = self.__get('priority', item, None)
lastmod = self.__get('lastmod', item, None)
if all_items_lastmod:
all_items_lastmod = lastmod is not None
if (all_items_lastmod and
(latest_lastmod is None or lastmod > latest_lastmod)):
latest_lastmod = lastmod
url_info = {
'item': item,
'location': loc,
'lastmod': lastmod,
'changefreq': self.__get('changefreq', item, None),
'priority': str(priority if priority is not None else ''),
}
urls.append(url_info)
if all_items_lastmod and latest_lastmod:
self.latest_lastmod = latest_lastmod
return urls
class FlatPageSitemap(Sitemap):
# This class is not a subclass of
# django.contrib.flatpages.sitemaps.FlatPageSitemap to avoid a
# circular import problem.
def __init__(self):
warnings.warn(
"'django.contrib.sitemaps.FlatPageSitemap' is deprecated. "
"Use 'django.contrib.flatpages.sitemaps.FlatPageSitemap' instead.",
RemovedInDjango19Warning,
stacklevel=2
)
def items(self):
if not django_apps.is_installed('django.contrib.sites'):
raise ImproperlyConfigured("FlatPageSitemap requires django.contrib.sites, which isn't installed.")
Site = django_apps.get_model('sites.Site')
current_site = Site.objects.get_current()
return current_site.flatpage_set.filter(registration_required=False)
class GenericSitemap(Sitemap):
priority = None
changefreq = None
def __init__(self, info_dict, priority=None, changefreq=None):
self.queryset = info_dict['queryset']
self.date_field = info_dict.get('date_field', None)
self.priority = priority
self.changefreq = changefreq
def items(self):
# Make sure to return a clone; we don't want premature evaluation.
return self.queryset.filter()
def lastmod(self, item):
if self.date_field is not None:
return getattr(item, self.date_field)
return None
default_app_config = 'django.contrib.sitemaps.apps.SiteMapsConfig'
| apache-2.0 | -1,929,745,932,302,384,000 | 35.307263 | 113 | 0.617326 | false |
Dancore/SubTrigger | SubTrigger.py | 1 | 2599 | import sublime, sublime_plugin
class MytestCommand(sublime_plugin.TextCommand):
def run(self, edit):
# self.view.insert(edit, 0, "Hello, World! ")
self.view.run_command("show_panel", {"panel": "console"}) # "toggle": 0})
# print self.view.file_name(), "is now the active view"
class SublimeOnSave(sublime_plugin.EventListener):
def on_pre_save(self, view):
print "on_pre_save"
# view.run_command('mytest')
view.run_command("run_multiple_commands", {"commands": [{"command": "show_panel", "args": {"panel": "console"}, "context": "window"}]})
# print "filename is: "+str(view.file_name())
def on_post_save(self, view):
print "on_post_save"
# print "filename is: "+str(view.file_name())
def on_activated(self, view):
print "view activated"
# view.run_command("run_multiple_commands")
# view.run_command("mytest")
# Takes an array of commands (same as those you'd provide to a key binding) with
# an optional context (defaults to view commands) & runs each command in order.
# Valid contexts are 'text', 'window', and 'app' for running a TextCommand,
# WindowCommands, or ApplicationCommand respectively.
#
# The run_multiple_commands.py has been developed by Nilium - see
# http://www.sublimetext.com/forum/viewtopic.php?f=5&t=8677 for a discussion.
class RunMultipleCommandsCommand(sublime_plugin.TextCommand):
def exec_command(self, command):
if not 'command' in command:
raise Exception('No command name provided.')
args = None
if 'args' in command:
args = command['args']
# default context is the view since it's easiest to get the other contexts
# from the view
context = self.view
if 'context' in command:
context_name = command['context']
if context_name == 'window':
context = context.window()
elif context_name == 'app':
context = sublime
elif context_name == 'text':
pass
else:
raise Exception('Invalid command context "'+context_name+'".')
# skip args if not needed
if args is None:
context.run_command(command['command'])
# uncomment the next line, if you want to add a delay to the execution
# sublime.set_timeout( lambda: context.run_command(command['command']), 2000 )
else:
context.run_command(command['command'], args)
# uncomment the next line, if you want to add a delay to the execution
# sublime.set_timeout( lambda: context.run_command(command['command'], args), 2000 )
def run(self, edit, commands = None):
print "running multiple commands"
if commands is None:
return # not an error
for command in commands:
self.exec_command(command)
| gpl-3.0 | 2,388,659,439,743,564,300 | 35.605634 | 137 | 0.694113 | false |
prds21/barrial-movie | lib/atom/http.py | 27 | 10236 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HttpClients in this module use httplib to make HTTP requests.
This module make HTTP requests based on httplib, but there are environments
in which an httplib based approach will not work (if running in Google App
Engine for example). In those cases, higher level classes (like AtomService
and GDataService) can swap out the HttpClient to transparently use a
different mechanism for making HTTP requests.
HttpClient: Contains a request method which performs an HTTP call to the
server.
ProxiedHttpClient: Contains a request method which connects to a proxy using
settings stored in operating system environment variables then
performs an HTTP call to the endpoint server.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import types
import os
import httplib
import atom.url
import atom.http_interface
import socket
import base64
class ProxyError(atom.http_interface.Error):
pass
DEFAULT_CONTENT_TYPE = 'application/atom+xml'
class HttpClient(atom.http_interface.GenericHttpClient):
def __init__(self, headers=None):
self.debug = False
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and
DELETE.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.http
client = atom.http.HttpClient()
http_response = client.request('GET', 'http://www.google.com/')
Args:
operation: str The HTTP operation to be performed. This is usually one
of 'GET', 'POST', 'PUT', or 'DELETE'
data: filestream, list of parts, or other object which can be converted
to a string. Should be set to None when performing a GET or DELETE.
If data is a file-like object which can be read, this method will
read a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be
evaluated and sent.
url: The full URL to which the request should be sent. Can be a string
or atom.url.Url.
headers: dict of strings. HTTP headers which should be sent
in the request.
"""
if not isinstance(url, atom.url.Url):
if isinstance(url, types.StringTypes):
url = atom.url.parse_url(url)
else:
raise atom.http_interface.UnparsableUrlObject('Unable to parse url '
'parameter because it was not a string or atom.url.Url')
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
connection = self._prepare_connection(url, all_headers)
if self.debug:
connection.debuglevel = 1
connection.putrequest(operation, self._get_access_url(url),
skip_host=True)
connection.putheader('Host', url.host)
# Overcome a bug in Python 2.4 and 2.5
# httplib.HTTPConnection.putrequest adding
# HTTP request header 'Host: www.google.com:443' instead of
# 'Host: www.google.com', and thus resulting the error message
# 'Token invalid - AuthSub token has wrong scope' in the HTTP response.
if (url.protocol == 'https' and int(url.port or 443) == 443 and
hasattr(connection, '_buffer') and
isinstance(connection._buffer, list)):
header_line = 'Host: %s:443' % url.host
replacement_header_line = 'Host: %s' % url.host
try:
connection._buffer[connection._buffer.index(header_line)] = (
replacement_header_line)
except ValueError: # header_line missing from connection._buffer
pass
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
if isinstance(data, types.StringTypes):
all_headers['Content-Length'] = len(data)
else:
raise atom.http_interface.ContentLengthRequired('Unable to calculate '
'the length of the data parameter. Specify a value for '
'Content-Length')
# Set the content type to the default value if none was set.
if 'Content-Type' not in all_headers:
all_headers['Content-Type'] = DEFAULT_CONTENT_TYPE
# Send the HTTP headers.
for header_name in all_headers:
connection.putheader(header_name, all_headers[header_name])
connection.endheaders()
# If there is data, send it in the request.
if data:
if isinstance(data, list):
for data_part in data:
_send_data_part(data_part, connection)
else:
_send_data_part(data, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def _prepare_connection(self, url, headers):
if not isinstance(url, atom.url.Url):
if isinstance(url, types.StringTypes):
url = atom.url.parse_url(url)
else:
raise atom.http_interface.UnparsableUrlObject('Unable to parse url '
'parameter because it was not a string or atom.url.Url')
if url.protocol == 'https':
if not url.port:
return httplib.HTTPSConnection(url.host)
return httplib.HTTPSConnection(url.host, int(url.port))
else:
if not url.port:
return httplib.HTTPConnection(url.host)
return httplib.HTTPConnection(url.host, int(url.port))
def _get_access_url(self, url):
return url.to_string()
class ProxiedHttpClient(HttpClient):
"""Performs an HTTP request through a proxy.
The proxy settings are obtained from enviroment variables. The URL of the
proxy server is assumed to be stored in the environment variables
'https_proxy' and 'http_proxy' respectively. If the proxy server requires
a Basic Auth authorization header, the username and password are expected to
be in the 'proxy-username' or 'proxy_username' variable and the
'proxy-password' or 'proxy_password' variable.
After connecting to the proxy server, the request is completed as in
HttpClient.request.
"""
def _prepare_connection(self, url, headers):
proxy_auth = _get_proxy_auth()
if url.protocol == 'https':
# destination is https
proxy = os.environ.get('https_proxy')
if proxy:
# Set any proxy auth headers
if proxy_auth:
proxy_auth = 'Proxy-authorization: %s' % proxy_auth
# Construct the proxy connect command.
port = url.port
if not port:
port = '443'
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (url.host, port)
# Set the user agent to send to the proxy
if headers and 'User-Agent' in headers:
user_agent = 'User-Agent: %s\r\n' % (headers['User-Agent'])
else:
user_agent = ''
proxy_pieces = '%s%s%s\r\n' % (proxy_connect, proxy_auth, user_agent)
# Find the proxy host and port.
proxy_url = atom.url.parse_url(proxy)
if not proxy_url.port:
proxy_url.port = '80'
# Connect to the proxy server, very simple recv and error checking
p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
p_sock.connect((proxy_url.host, int(proxy_url.port)))
p_sock.sendall(proxy_pieces)
response = ''
# Wait for the full response.
while response.find("\r\n\r\n") == -1:
response += p_sock.recv(8192)
p_status = response.split()[1]
if p_status != str(200):
raise ProxyError('Error status=%s' % str(p_status))
# Trivial setup for ssl socket.
ssl = socket.ssl(p_sock, None, None)
fake_sock = httplib.FakeSocket(p_sock, ssl)
# Initalize httplib and replace with the proxy socket.
connection = httplib.HTTPConnection(proxy_url.host)
connection.sock=fake_sock
return connection
else:
# The request was HTTPS, but there was no https_proxy set.
return HttpClient._prepare_connection(self, url, headers)
else:
proxy = os.environ.get('http_proxy')
if proxy:
# Find the proxy host and port.
proxy_url = atom.url.parse_url(proxy)
if not proxy_url.port:
proxy_url.port = '80'
if proxy_auth:
headers['Proxy-Authorization'] = proxy_auth.strip()
return httplib.HTTPConnection(proxy_url.host, int(proxy_url.port))
else:
# The request was HTTP, but there was no http_proxy set.
return HttpClient._prepare_connection(self, url, headers)
def _get_access_url(self, url):
return url.to_string()
def _get_proxy_auth():
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
user_auth = base64.encodestring('%s:%s' % (proxy_username,
proxy_password))
return 'Basic %s\r\n' % (user_auth.strip())
else:
return ''
def _send_data_part(data, connection):
if isinstance(data, types.StringTypes):
connection.send(data)
return
# Check to see if data is a file-like object that has a read method.
elif hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string and send the data.
connection.send(str(data))
return
| gpl-3.0 | -414,355,509,971,354,750 | 34.79021 | 79 | 0.655725 | false |
odooindia/odoo | addons/project_issue_sheet/__openerp__.py | 120 | 1851 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Timesheet on Issues',
'version': '1.0',
'category': 'Project Management',
'description': """
This module adds the Timesheet support for the Issues/Bugs Management in Project.
=================================================================================
Worklogs can be maintained to signify number of hours spent by users to handle an issue.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/project_issue_sheet_worklog.jpeg'],
'depends': [
'project_issue',
'hr_timesheet_sheet',
],
'data': [
'project_issue_sheet_view.xml',
'security/ir.model.access.csv',
],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,107,620,662,748,895,000 | 36.77551 | 88 | 0.576445 | false |
yuvipanda/zulip | puppet/zulip_internal/files/postgresql/pg_backup_and_purge.py | 114 | 1575 | #!/usr/bin/python
import subprocess
import sys
import logging
import dateutil.parser
import pytz
from datetime import datetime, timedelta
logging.basicConfig(format="%(asctime)s %(levelname)s: %(message)s")
logger = logging.getLogger(__name__)
def run(args, dry_run=False):
if dry_run:
print "Would have run: " + " ".join(args)
return ""
p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode:
logger.error("Could not invoke %s\nstdout: %s\nstderror: %s"
% (args[0], stdout, stderr))
sys.exit(1)
return stdout
# Only run if we're the master
if run(['psql', '-t', '-c', 'select pg_is_in_recovery()']).strip() != 'f':
sys.exit(0)
run(['env-wal-e', 'backup-push', '/var/lib/postgresql/9.1/main'])
now = datetime.now(tz=pytz.utc)
with open('/var/lib/nagios_state/last_postgres_backup', 'w') as f:
f.write(now.isoformat())
f.write("\n")
backups = {}
lines = run(['env-wal-e', 'backup-list']).split("\n")
for line in lines[1:]:
if line:
backup_name, date, _, _ = line.split()
backups[dateutil.parser.parse(date)] = backup_name
one_month_ago = now - timedelta(days=30)
for date in sorted(backups.keys(), reverse=True):
if date < one_month_ago:
run(['env-wal-e', 'delete', '--confirm', 'before', backups[date]])
# Because we're going from most recent to least recent, we
# only have to do one delete operation
break
| apache-2.0 | -4,915,651,480,030,616,000 | 29.882353 | 77 | 0.620317 | false |
adalke/rdkit | rdkit/ML/DecTree/UnitTestQuantTree.py | 3 | 7680 | ## Automatically adapted for numpy.oldnumeric Jun 27, 2008 by -c
#
# Copyright (C) 2001,2003 greg Landrum and Rational Discovery LLC
#
""" unit tests for the QuantTree implementation """
from __future__ import print_function
import unittest
import io
from rdkit import RDConfig
from rdkit.ML.DecTree import BuildQuantTree
from rdkit.ML.DecTree.QuantTree import QuantTreeNode
from rdkit.ML.Data import MLData
from rdkit.six.moves import cPickle, xrange
from rdkit.six import cmp
class TestCase(unittest.TestCase):
def setUp(self):
print('\n%s: '%self.shortDescription(),end='')
self.qTree1Name=RDConfig.RDCodeDir+'/ML/DecTree/test_data/QuantTree1.pkl'
self.qTree2Name=RDConfig.RDCodeDir+'/ML/DecTree/test_data/QuantTree2.pkl'
def _setupTree1(self):
examples1 = [['p1',0,1,0.1,0],
['p2',0,0,0.1,1],
['p3',0,0,1.1,2],
['p4',0,1,1.1,2],
['p5',1,0,0.1,2],
['p6',1,0,1.1,2],
['p7',1,1,0.1,2],
['p8',1,1,1.1,0]
]
attrs = range(1,len(examples1[0])-1)
nPossibleVals = [0,2,2,0,3]
boundsPerVar=[0,0,0,1,0]
self.t1 = BuildQuantTree.QuantTreeBoot(examples1,attrs,nPossibleVals,boundsPerVar)
self.examples1 = examples1
def _setupTree2(self):
examples1 = [['p1',0.1,1,0.1,0],
['p2',0.1,0,0.1,1],
['p3',0.1,0,1.1,2],
['p4',0.1,1,1.1,2],
['p5',1.1,0,0.1,2],
['p6',1.1,0,1.1,2],
['p7',1.1,1,0.1,2],
['p8',1.1,1,1.1,0]
]
attrs = range(1,len(examples1[0])-1)
nPossibleVals = [0,0,2,0,3]
boundsPerVar=[0,1,0,1,0]
self.t2 = BuildQuantTree.QuantTreeBoot(examples1,attrs,nPossibleVals,boundsPerVar)
self.examples2 = examples1
def _setupTree1a(self):
examples1 = [['p1',0,1,0.1,4.0,0],
['p2',0,0,0.1,4.1,1],
['p3',0,0,1.1,4.2,2],
['p4',0,1,1.1,4.2,2],
['p5',1,0,0.1,4.2,2],
['p6',1,0,1.1,4.2,2],
['p7',1,1,0.1,4.2,2],
['p8',1,1,1.1,4.0,0]
]
attrs = range(1,len(examples1[0])-1)
nPossibleVals = [0,2,2,0,0,3]
boundsPerVar=[0,0,0,1,-1,0]
self.t1 = BuildQuantTree.QuantTreeBoot(examples1,attrs,nPossibleVals,boundsPerVar)
self.examples1 = examples1
def test0Cmp(self):
" testing tree comparisons "
self._setupTree1()
self._setupTree2()
assert self.t1 == self.t1, 'self equals failed'
assert self.t2 == self.t2, 'self equals failed'
assert self.t1 != self.t2, 'not equals failed'
def test1Tree(self):
" testing tree1 "
self._setupTree1()
with open(self.qTree1Name,'r') as inTFile:
buf = inTFile.read().replace('\r\n', '\n').encode('utf-8')
inTFile.close()
with io.BytesIO(buf) as inFile:
t2 = cPickle.load(inFile)
assert self.t1 == t2, 'Incorrect tree generated.'
def test2Tree(self):
" testing tree2 "
self._setupTree2()
with open(self.qTree2Name,'r') as inTFile:
buf = inTFile.read().replace('\r\n', '\n').encode('utf-8')
inTFile.close()
with io.BytesIO(buf) as inFile:
t2 = cPickle.load(inFile)
assert self.t2 == t2, 'Incorrect tree generated.'
def test3Classify(self):
" testing classification "
self._setupTree1()
self._setupTree2()
for i in xrange(len(self.examples1)):
assert self.t1.ClassifyExample(self.examples1[i])==self.examples1[i][-1],\
'examples1[%d] misclassified'%i
for i in xrange(len(self.examples2)):
assert self.t2.ClassifyExample(self.examples2[i])==self.examples2[i][-1],\
'examples2[%d] misclassified'%i
def test4UnusedVars(self):
" testing unused variables "
self._setupTree1a()
with open(self.qTree1Name,'r') as inTFile:
buf = inTFile.read().replace('\r\n', '\n').encode('utf-8')
inTFile.close()
with io.BytesIO(buf) as inFile:
t2 = cPickle.load(inFile)
assert self.t1 == t2, 'Incorrect tree generated.'
for i in xrange(len(self.examples1)):
assert self.t1.ClassifyExample(self.examples1[i])==self.examples1[i][-1],\
'examples1[%d] misclassified'%i
def test5Bug29(self):
""" a more extensive test of the cmp stuff using hand-built trees """
import copy
t1 = QuantTreeNode(None,'t1')
t1.SetQuantBounds([1.])
c1 = QuantTreeNode(t1,'c1')
c1.SetQuantBounds([2.])
t1.AddChildNode(c1)
c2 = QuantTreeNode(t1,'c2')
c2.SetQuantBounds([2.])
t1.AddChildNode(c2)
c11 = QuantTreeNode(c1,'c11')
c11.SetQuantBounds([3.])
c1.AddChildNode(c11)
c12 = QuantTreeNode(c1,'c12')
c12.SetQuantBounds([3.])
c1.AddChildNode(c12)
assert not cmp(t1,copy.deepcopy(t1)),'self equality failed'
t2 = QuantTreeNode(None,'t1')
t2.SetQuantBounds([1.])
c1 = QuantTreeNode(t2,'c1')
c1.SetQuantBounds([2.])
t2.AddChildNode(c1)
c2 = QuantTreeNode(t2,'c2')
c2.SetQuantBounds([2.])
t2.AddChildNode(c2)
c11 = QuantTreeNode(c1,'c11')
c11.SetQuantBounds([3.])
c1.AddChildNode(c11)
c12 = QuantTreeNode(c1,'c12')
c12.SetQuantBounds([3.00003])
c1.AddChildNode(c12)
assert cmp(t1,t2),'inequality failed'
def test6Bug29_2(self):
""" a more extensive test of the cmp stuff using pickled trees"""
import os
with open(os.path.join(RDConfig.RDCodeDir,'ML','DecTree','test_data','CmpTree1.pkl'),'r') as t1TFile:
buf = t1TFile.read().replace('\r\n', '\n').encode('utf-8')
t1TFile.close()
with io.BytesIO(buf) as t1File:
t1 = cPickle.load(t1File)
with open(os.path.join(RDConfig.RDCodeDir,'ML','DecTree','test_data','CmpTree2.pkl'),'r') as t2TFile:
buf = t2TFile.read().replace('\r\n', '\n').encode('utf-8')
t2TFile.close()
with io.BytesIO(buf) as t2File:
t2 = cPickle.load(t2File)
assert cmp(t1,t2),'equality failed'
def test7Recycle(self):
""" try recycling descriptors """
examples1 = [[3,0,0],
[3,1,1],
[1,0,0],
[0,0,1],
[1,1,0],
]
attrs = range(2)
nPossibleVals = [2,2,2]
boundsPerVar=[1,0,0]
self.t1 = BuildQuantTree.QuantTreeBoot(examples1,attrs,nPossibleVals,boundsPerVar,
recycleVars=1)
assert self.t1.GetLabel()==0,self.t1.GetLabel()
assert self.t1.GetChildren()[0].GetLabel()==1
assert self.t1.GetChildren()[1].GetLabel()==1
assert self.t1.GetChildren()[1].GetChildren()[0].GetLabel()==0
assert self.t1.GetChildren()[1].GetChildren()[1].GetLabel()==0
def test8RandomForest(self):
""" try random forests descriptors """
import random
random.seed(23)
nAttrs = 100
nPts = 10
examples = []
for i in range(nPts):
descrs = [int(random.random()>0.5) for x in range(nAttrs)]
act = sum(descrs) > nAttrs/2
examples.append(descrs+[act])
attrs = list(range(nAttrs))
nPossibleVals = [2]*(nAttrs+1)
boundsPerVar=[0]*nAttrs+[0]
self.t1 = BuildQuantTree.QuantTreeBoot(examples,attrs,
nPossibleVals,boundsPerVar,
maxDepth=1,
recycleVars=1,
randomDescriptors=3)
self.assertEqual(self.t1.GetLabel(),49)
self.assertEqual(self.t1.GetChildren()[0].GetLabel(),3)
self.assertEqual(self.t1.GetChildren()[1].GetLabel(),54)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -1,270,667,071,428,813,800 | 32.103448 | 105 | 0.580729 | false |
mraduldubey/webpage2kindle | webpage2kindle.py | 1 | 3273 | #!/usr/bin/python
import os,os.path,sys,pdfkit,requests,re
import getpass
import pynotify
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEBase import MIMEBase
from email import encoders
#IMPORTANT--Make sure your gmail settings' access to 'less-secure apps' is allowed.
#IMPORTANT--Make sure that the email you use here is added as trusted address in amazon kindle settings
def URL_to_PDF( URL, filename, pdfobj ):
'''Convert to pdf if URL given is not the direct link to pdf. Save on disk.'''
res = requests.get(URL)
#Validating URL
res.raise_for_status()
#If URL is a direct Link to a pdf file, then save directly on DISK.
if pdfobj.search(URL):
with open(filename,'wb') as f:
f.write(res.content)
f.close()
#Convert to pdf and save on DISK using pdfkit.
else:
pdfkit.from_url( URL, filename )
def email_to_kindle( URL, filename, from_address, to_address ):
'''Sending the saved PDF on disk, to the configured kindle email address.'''
#Do not want to configure password in a unencrypted file.
print "Enter your password for account:", from_address, " (Will not be saved)"
password = getpass.getpass()
print 'Adding webpage to kindle...'
msg = MIMEMultipart()
msg['From'] = from_address
msg['To'] = to_address
msg['Subject'] = "Kindle Document by Webpage2Kindle."
body = "The document named" + filename + "from" + URL + "has been added to this Kindle account:" + to_address + "."
#Text added to email body.
msg.attach(MIMEText(body, 'plain'))
#PDF Object.
attachment = open(filename, "rb")
part = MIMEBase('application', 'octet-stream')
part.set_payload((attachment).read())
encoders.encode_base64(part)
#Add PDF to email header.
part.add_header('Content-Disposition', "attachment; filename= %s" % filename)
#Attach the header to email.
msg.attach(part)
#Starting Gmail server.
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
#LogIn Attempt to the from_address email.
server.login(from_address, password)
text = msg.as_string()
#Push the email.
server.sendmail(from_address, to_address, text)
#Quit the server after sending email.
server.quit()
#Deleting the saved PDF file from DISK.
os.remove(filename)
return True
if __name__ == '__main__':
filename = sys.argv[2]
URL = sys.argv[1]
#The email added to trusted email in Kindle settings.
from_address = "YOUR_TRUSTED_GMAIL"
#The Kindle email.
to_address = "YOUR_KINDLE_EMAIL"
pdfobj = re.compile(r'.pdf$')
httpobj = re.compile(r'^http')
#Add '.pdf' to file at the end, if not supplied as cmd line arguments e.g. 'thefirstbook' to 'thefirstbook.pdf'
if not pdfobj.search(filename):
filename += '.pdf'
#Add connection adapter to the URL if not present in cmd line argument e.g. 'www.google.com' to 'http://www.google.com'.
if not httpobj.search( URL ):
URL = 'http://' + URL
URL_to_PDF( URL, filename, pdfobj )
if email_to_kindle( URL, filename, from_address, to_address ):
print "The document has been added to the kindle."
pynotify.init('mraduldubey')
notify = pynotify.Notification('webpage2kindle','The document has been added to the kindle.',os.path.join(os.getcwd(),'kindle.png'))
notify.show()
| gpl-3.0 | -1,687,208,103,624,151,600 | 25.609756 | 134 | 0.707608 | false |
tigerbunny/bitmessage | src/class_objectHashHolder.py | 15 | 2004 | # objectHashHolder is a timer-driven thread. One objectHashHolder thread is used
# by each sendDataThread. The sendDataThread uses it whenever it needs to
# advertise an object to peers in an inv message, or advertise a peer to other
# peers in an addr message. Instead of sending them out immediately, it must
# wait a random number of seconds for each connection so that different peers
# get different objects at different times. Thus an attacker who is
# connecting to many network nodes who receives a message first from Alice
# cannot be sure if Alice is the node who originated the message.
import random
import time
import threading
class objectHashHolder(threading.Thread):
def __init__(self, sendDataThreadMailbox):
threading.Thread.__init__(self)
self.shutdown = False
self.sendDataThreadMailbox = sendDataThreadMailbox # This queue is used to submit data back to our associated sendDataThread.
self.collectionOfHashLists = {}
self.collectionOfPeerLists = {}
for i in range(10):
self.collectionOfHashLists[i] = []
self.collectionOfPeerLists[i] = []
def run(self):
iterator = 0
while not self.shutdown:
if len(self.collectionOfHashLists[iterator]) > 0:
self.sendDataThreadMailbox.put((0, 'sendinv', self.collectionOfHashLists[iterator]))
self.collectionOfHashLists[iterator] = []
if len(self.collectionOfPeerLists[iterator]) > 0:
self.sendDataThreadMailbox.put((0, 'sendaddr', self.collectionOfPeerLists[iterator]))
self.collectionOfPeerLists[iterator] = []
iterator += 1
iterator %= 10
time.sleep(1)
def holdHash(self,hash):
self.collectionOfHashLists[random.randrange(0, 10)].append(hash)
def holdPeer(self,peerDetails):
self.collectionOfPeerLists[random.randrange(0, 10)].append(peerDetails)
def close(self):
self.shutdown = True | mit | 9,104,127,570,577,058,000 | 43.555556 | 133 | 0.688623 | false |
asimshankar/tensorflow | tensorflow/python/kernel_tests/padding_fifo_queue_test.py | 1 | 59357 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.PaddingFIFOQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
@test_util.run_v1_only("b/120545219")
class PaddingFIFOQueueTest(test.TestCase):
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((None,),), name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueueV2'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
attr { key: 'shapes' value { list { shape { dim { size: -1 } } } } }
attr { key: 'capacity' value { i: 10 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.PaddingFIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32), ((), ()),
shared_name="foo",
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list { shape { } shape { } } } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
with ops.Graph().as_default():
q = data_flow_ops.PaddingFIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
shapes=(tensor_shape.TensorShape([1, 1, 2, 3]),
tensor_shape.TensorShape([5, 8])),
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testEnqueue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=((3, 2),))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testEnqueueManyWithShape(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
def testParallelEnqueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
self.evaluate(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(3, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
self.evaluate(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(self.evaluate(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.float32),
((), ()))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = self.evaluate(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, self.evaluate(size))
dequeued_t.op.run()
self.assertEqual(0, self.evaluate(size))
def testEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, (
(None, None),))
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual([0], self.evaluate(size_t))
enqueue_op.run()
self.assertEqual([0], self.evaluate(size_t))
def testEmptyDequeueMany(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, shapes=((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueManyWithDynamicShape(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=((None,),))
enqueue_op = q.enqueue(([10.0],))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueUpToWithDynamicShape(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=((None,),))
enqueue_op = q.enqueue(([10.0],))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testConstructPaddingFIFOQueueWithNoShape(self):
with self.cached_session():
with self.assertRaisesRegexp(
ValueError,
r"When providing partial shapes, a list of shapes must be provided."):
data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32,
None).queue_ref.eval()
def testMultiEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.float32, dtypes_lib.int32),
((), (2,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = self.evaluate(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testMultiEnqueueManyWithPartiallyKnownShapes(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (None,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = self.evaluate(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testDequeueMany(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], self.evaluate(dequeued_t))
self.assertAllEqual(elems[4:8], self.evaluate(dequeued_t))
def testDequeueUpToNoBlocking(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], self.evaluate(dequeued_t))
self.assertAllEqual(elems[4:8], self.evaluate(dequeued_t))
def testMultiDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
def testMultiDequeueManyWithPartiallyKnownShapes(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (None,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertTrue(
tensor_shape.TensorShape(float_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
1].get_shape()))
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertTrue(
tensor_shape.TensorShape(float_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testMultiDequeueManyWithPartiallyKnownShapesAndVariableSizeInput(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.string, dtypes_lib.int32),
shapes=((None,), (1, None)))
str_elems = [["a"], ["ab"], ["abc"], ["abc", "d"], ["abc", "d", "e"],
["abc", "d", "e", "f"]]
int_elems = [[[1]], [[2]], [[3]], [[1, 2]], [[1, 2, 3]], [[1, 2, 3, 4]]]
enqueue_ops = [q.enqueue((str_elems[i], int_elems[i])) for i in range(6)]
dequeued_t = q.dequeue_many(5)
dequeued_single_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
string_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual([[b"a", b"", b""], [b"ab", b"", b""],
[b"abc", b"", b""], [b"abc", b"d", b""],
[b"abc", b"d", b"e"]], string_val)
self.assertAllEqual([[[1, 0, 0]], [[2, 0, 0]], [[3, 0, 0]], [[1, 2, 0]],
[[1, 2, 3]]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
1].get_shape()))
string_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
self.assertAllEqual([[1, 2, 3, 4]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testMultiDequeueUpToPartiallyKnownShapesAndVariableInputNoBlocking(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.string, dtypes_lib.int32),
shapes=((None,), (1, None)))
str_elems = [["a"], ["ab"], ["abc"], ["abc", "d"], ["abc", "d", "e"],
["abc", "d", "e", "f"]]
int_elems = [[[1]], [[2]], [[3]], [[1, 2]], [[1, 2, 3]], [[1, 2, 3, 4]]]
enqueue_ops = [q.enqueue((str_elems[i], int_elems[i])) for i in range(6)]
dequeued_t = q.dequeue_up_to(5)
dequeued_single_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
string_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual([[b"a", b"", b""], [b"ab", b"", b""],
[b"abc", b"", b""], [b"abc", b"d", b""],
[b"abc", b"d", b"e"]], string_val)
self.assertAllEqual([[[1, 0, 0]], [[2, 0, 0]], [[3, 0, 0]], [[1, 2, 0]],
[[1, 2, 3]]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
1].get_shape()))
string_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
self.assertAllEqual([[1, 2, 3, 4]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testHighDimension(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, ((4, 4, 4, 4),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testPartiallyKnownHighDimension(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, (
(4, None, 4, None),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
q = data_flow_ops.PaddingFIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32),
((), (2,)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
with self.assertRaises(ValueError):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testBatchSizeMismatch(self):
q = data_flow_ops.PaddingFIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32,
dtypes_lib.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
q.enqueue_many(
([1, 2, 3], [1, 2], array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many(
(array_ops.placeholder(dtypes_lib.int32), [1, 2], [1, 2, 3]))
def testEnqueueManyEmptyTypeConversion(self):
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
enq = q.enqueue_many(([], []))
self.assertEqual(dtypes_lib.int32, enq.inputs[1].dtype)
self.assertEqual(dtypes_lib.float32, enq.inputs[2].dtype)
def testEnqueueWrongType(self):
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
with self.assertRaises(ValueError):
q.enqueue((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
def testEnqueueWrongPartiallyKnownShapeAtRuntime(self):
with self.cached_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (None, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Expected \[\?,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongPartiallyKnownShape(self):
with self.cached_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (None, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Shape mismatch in tuple component 1. "
r"Expected \[2,\?,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
self.evaluate(dequeued_t)
def testParallelEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
self.evaluate(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(101)
enqueue_op.run()
close_op.run()
# Dequeue up to 101 items in parallel on 10 threads, from closed queue.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelEnqueueAndDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(50, dtypes_lib.float32, shapes=((),))
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
enqueue_op = q.enqueue((20.0,))
dequeued_t = q.dequeue()
def enqueue():
for _ in xrange(100):
self.evaluate(enqueue_op)
def dequeue():
for _ in xrange(100):
self.assertTrue(self.evaluate(dequeued_t) in (10.0, 20.0))
enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)]
dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for enqueue_thread in enqueue_threads:
enqueue_thread.start()
for dequeue_thread in dequeue_threads:
dequeue_thread.start()
for enqueue_thread in enqueue_threads:
enqueue_thread.join()
for dequeue_thread in dequeue_threads:
dequeue_thread.join()
# Dequeue the initial count of elements to clean up.
cleanup_elems = q.dequeue_many(49).eval()
for elem in cleanup_elems:
self.assertTrue(elem in (10.0, 20.0))
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, shapes=((),))
enqueue_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
enqueuemany_placeholder = array_ops.placeholder(
dtypes_lib.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
close_op = q.close()
def dequeue():
for i in xrange(250):
self.assertEqual(i, self.evaluate(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
elements_enqueued = 0
while elements_enqueued < 250:
# With equal probability, run Enqueue or enqueue_many.
if random.random() > 0.5:
enqueue_op.run({enqueue_placeholder: elements_enqueued})
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
range_to_enqueue = np.arange(
elements_enqueued, elements_enqueued + count, dtype=np.int32)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
close_op.run()
dequeue_thread.join()
self.assertEqual(0, q.size().eval())
def testMixtureOfDequeueAndDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, shapes=((),))
enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),))
dequeued_t = q.dequeue()
count_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
self.evaluate(enqueue_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
elements_dequeued = 0
while elements_dequeued < 250:
# With equal probability, run Dequeue or dequeue_many.
if random.random() > 0.5:
self.assertEqual(elements_dequeued, self.evaluate(dequeued_t))
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
expected_range = np.arange(
elements_dequeued, elements_dequeued + count, dtype=np.int32)
self.assertAllEqual(expected_range,
dequeuemany_t.eval({
count_placeholder: count
}))
elements_dequeued += count
q.close().run()
enqueue_thread.join()
self.assertEqual(0, q.size().eval())
def testBlockingDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.PaddingFIFOQueue(100, dtypes_lib.int32, ((),))
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.PaddingFIFOQueue(total_count, dtypes_lib.int32, ((),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
def testBlockingDequeueFromClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def dequeue():
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], self.evaluate(dequeued_t))
self.assertAllEqual(elems[3:], self.evaluate(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyFromClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems, self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyButNotAllFromClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue()
def enqueue():
self.evaluate(enqueue_op)
def dequeue():
self.assertAllEqual(elems[0:3], self.evaluate(dequeued_t))
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(dequeued_t)
self.assertEqual(elems[3], self.evaluate(cleanup_dequeue_t))
def close():
self.evaluate(close_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_thread = self.checkedThread(target=close)
close_thread.start()
enqueue_thread.join()
dequeue_thread.join()
close_thread.join()
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, (dtypes_lib.float32,
dtypes_lib.float32), ((), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
dequeued_a_t, dequeued_b_t = q.dequeue_many(4)
cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def dequeue():
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
# Test that the elements in the partially-dequeued batch are
# restored in the correct order.
for elem_a, elem_b in zip(elems_a, elems_b):
val_a, val_b = self.evaluate([cleanup_dequeue_a_t, cleanup_dequeue_b_t])
self.assertEqual(elem_a, val_a)
self.assertEqual(elem_b, val_b)
self.assertEqual(0, q.size().eval())
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
self.assertEqual([50.0], self.evaluate(dequeued_t))
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
time.sleep(0.01)
self.assertEqual([50.0], self.evaluate(dequeued_t))
self.assertEqual([60.0], self.evaluate(dequeued_t))
# Make sure the thread finishes before exiting.
thread.join()
def testBlockingEnqueueBeforeClose(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
self.evaluate(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
self.evaluate(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, self.evaluate(dequeued_t))
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 40.0, 50.0]:
self.assertEqual(elem, self.evaluate(dequeued_t))
self.assertEqual(0, q.size().eval())
def testBlockingEnqueueManyBeforeClose(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
self.evaluate(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, self.evaluate(dequeued_t))
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 50.0, 60.0]:
self.assertEqual(elem, self.evaluate(dequeued_t))
def testDoesNotLoseValue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(1, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
enqueue_op.run()
for _ in range(500):
self.assertEqual(size_t.eval(), [1])
def testSharedQueueSameSession(self):
with self.cached_session():
q1 = data_flow_ops.PaddingFIFOQueue(
1, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = data_flow_ops.PaddingFIFOQueue(
1, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q2.dequeue().eval(), [10.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q1.dequeue().eval(), [20.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
def testIncompatibleSharedQueueErrors(self):
with self.cached_session():
q_a_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_a")
q_a_2 = data_flow_ops.PaddingFIFOQueue(
15, dtypes_lib.float32, ((),), shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_b")
q_b_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.int32, ((),), shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_c")
q_c_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_f")
q_f_2 = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), ((), ()),
shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.op.run()
def testSelectQueue(self):
with self.cached_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(
data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),)))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.PaddingFIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.cached_session():
q1 = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
q2 = data_flow_ops.PaddingFIFOQueue(15, dtypes_lib.float32, ((),))
enq_q = data_flow_ops.PaddingFIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_many_op)
@test_util.run_deprecated_v1
def testResetOfBlockingOperation(self):
with self.cached_session() as sess:
q_empty = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.float32, ((),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testBigEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
self.evaluate(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertAllEqual(elem, results)
def testBigDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(2, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(self.evaluate(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
self.evaluate(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertAllEqual(elem, results)
def testDtypes(self):
with self.cached_session() as sess:
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8, dtypes_lib.int64,
dtypes_lib.bool, dtypes_lib.complex64, dtypes_lib.complex128
]
shape = (32, 4, 128)
q = data_flow_ops.PaddingFIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes_lib.bool:
np_array = np_array > 0
elif dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
input_tuple.append(np_array)
q.enqueue_many(input_tuple).run()
output_tuple_t = q.dequeue_many(32)
output_tuple = self.evaluate(output_tuple_t)
for (input_elem, output_elem) in zip(input_tuple, output_tuple):
self.assertAllEqual(input_elem, output_elem)
def testUnknownRank(self):
with self.assertRaisesRegexp(ValueError, "must have a defined rank"):
data_flow_ops.PaddingFIFOQueue(32, [dtypes_lib.float32],
[tensor_shape.TensorShape(None)])
class QueueFromListTest(test.TestCase):
def testQueueFromListShapes(self):
which = constant_op.constant(1)
def _cmp(expected, *shapes):
qs = [
data_flow_ops.PaddingFIFOQueue(10, [dtypes_lib.float32],
[tensor_shape.TensorShape(s)])
for s in shapes
]
s_expected = tensor_shape.TensorShape(expected)
s = data_flow_ops.QueueBase.from_list(which, qs).shapes[0]
if s_expected.ndims is None:
self.assertEqual(s_expected.ndims, s.ndims)
else:
self.assertEqual(s_expected.as_list(), s.as_list())
_cmp(None, [1, None], [None])
_cmp([None], [1], [2])
_cmp([1, None], [1, 1], [1, 2])
_cmp([1, None], [1, 1], [1, None])
_cmp([None, None], [None, 1], [1, None])
_cmp([1], [1], [1], [1])
_cmp([None], [1], [None], [1])
_cmp(None, [1, None], [1], [1])
def testQueueFromListShapesMultipleComponents(self):
q_u_u = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([None]), tensor_shape.TensorShape([None])])
q_u_f = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([None]), tensor_shape.TensorShape([1, 2])])
q_f_f = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([3, 4]), tensor_shape.TensorShape([1, 2])])
which = constant_op.constant(1)
s_cmp_1 = data_flow_ops.QueueBase.from_list(which,
[q_u_u, q_u_u, q_u_u]).shapes
self.assertEqual([1, 1], [x.ndims for x in s_cmp_1])
self.assertEqual([None, None], [x.as_list()[0] for x in s_cmp_1])
s_cmp_2 = data_flow_ops.QueueBase.from_list(which,
[q_u_u, q_u_u, q_u_f]).shapes
self.assertEqual([1, None], [x.ndims for x in s_cmp_2])
self.assertEqual([None], s_cmp_2[0].as_list())
s_cmp_3 = data_flow_ops.QueueBase.from_list(which, [q_f_f, q_f_f]).shapes
self.assertEqual([2, 2], [x.ndims for x in s_cmp_3])
self.assertEqual([[3, 4], [1, 2]], [x.as_list() for x in s_cmp_3])
if __name__ == "__main__":
test.main()
| apache-2.0 | -6,433,804,936,346,545,000 | 36.35494 | 80 | 0.603939 | false |
hangzhang925/WhereHows | wherehows-etl/src/main/resources/jython/LdapTransform.py | 3 | 9171 | #
# Copyright 2015 LinkedIn Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
from org.slf4j import LoggerFactory
from wherehows.common import Constant
from com.ziclix.python.sql import zxJDBC
import sys
class LdapTransform:
_tables = {"ldap_user": {
"columns": "app_id, is_active, user_id, urn, full_name, display_name, title, employee_number, manager_urn, email, department_id, department_name, start_date, mobile_phone, wh_etl_exec_id",
"file": "ldap_user_record.csv",
"table": "stg_dir_external_user_info",
"nullif_columns":
{"department_id": "''",
"employee_number": 0,
"start_date": "'0000-00-00'",
"manager_urn": "''",
"department_name": "''",
"mobile_phone": "''",
"email": "''",
"title": "''"}
},
"ldap_group": {"columns": "app_id, group_id, sort_id, user_app_id, user_id, wh_etl_exec_id",
"file": "ldap_group_record.csv",
"table": "stg_dir_external_group_user_map",
"nullif_columns": {"user_id": "''"}
},
"ldap_group_flatten": {"columns": "app_id, group_id, sort_id, user_app_id, user_id, wh_etl_exec_id",
"file": "ldap_group_flatten_record.csv",
"table": "stg_dir_external_group_user_map_flatten"
}
}
_read_file_template = """
LOAD DATA LOCAL INFILE '{folder}/{file}'
INTO TABLE {table}
FIELDS TERMINATED BY '\x1a' ESCAPED BY '\0'
LINES TERMINATED BY '\n'
({columns});
"""
_update_column_to_null_template = """
UPDATE {table} stg
SET {column} = NULL
WHERE {column} = {column_value} and app_id = {app_id}
"""
_update_manager_info = """
update {table} stg
join (select t1.app_id, t1.user_id, t1.employee_number, t2.user_id as manager_user_id, t2.employee_number as manager_employee_number from
{table} t1 join {table} t2 on t1.manager_urn = t2.urn and t1.app_id = t2.app_id
where t1.app_id = {app_id}
) s on stg.app_id = s.app_id and stg.user_id = s.user_id
set stg.manager_user_id = s.manager_user_id
, stg.manager_employee_number = s.manager_employee_number
WHERE stg.app_id = {app_id}
"""
_get_manager_edge = """
select user_id, manager_user_id from {table} stg
where app_id = {app_id} and manager_user_id is not null and user_id <> manager_user_id
"""
_update_hierarchy_info = """
update {table} stg
set org_hierarchy = CASE {org_hierarchy_long_string} END,
org_hierarchy_depth = CASE {org_hierarchy_depth_long_string} END
where app_id = {app_id} and user_id in ({user_ids})
"""
_update_hierarchy_info_per_row = """
update {table} stg
set org_hierarchy = '{org_hierarchy}',
org_hierarchy_depth = {org_hierarchy_depth}
where app_id = {app_id} and user_id = '{user_id}'
"""
_clear_staging_tempalte = """
DELETE FROM {table} where app_id = {app_id}
"""
def __init__(self, args):
self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__)
self.wh_con = zxJDBC.connect(args[Constant.WH_DB_URL_KEY],
args[Constant.WH_DB_USERNAME_KEY],
args[Constant.WH_DB_PASSWORD_KEY],
args[Constant.WH_DB_DRIVER_KEY])
self.wh_cursor = self.wh_con.cursor()
self.app_id = int(args[Constant.JOB_REF_ID_KEY])
self.group_app_id = int(args[Constant.LDAP_GROUP_APP_ID_KEY])
self.app_folder = args[Constant.WH_APP_FOLDER_KEY]
self.metadata_folder = self.app_folder + "/" + str(self.app_id)
self.ceo_user_id = args[Constant.LDAP_CEO_USER_ID_KEY]
def run(self):
try:
self.read_file_to_stg()
self.update_null_value()
self.update_manager_info()
self.update_hierarchy_info()
finally:
self.wh_cursor.close()
self.wh_con.close()
def read_file_to_stg(self):
for table in self._tables:
t = self._tables[table]
# Clear stagging table
query = self._clear_staging_tempalte.format(table=t.get("table"), app_id=self.app_id)
print query
self.wh_cursor.execute(query)
self.wh_con.commit()
# Load file into stagging table
query = self._read_file_template.format(folder=self.metadata_folder, file=t.get("file"), table=t.get("table"), columns=t.get("columns"))
self.logger.debug(query)
self.wh_cursor.execute(query)
self.wh_con.commit()
def update_null_value(self):
for table in self._tables:
t = self._tables[table]
if 'nullif_columns' in t:
for column in t['nullif_columns']:
query = self._update_column_to_null_template.format(table=t.get("table"), column=column, column_value=t['nullif_columns'][column], app_id=self.app_id)
self.logger.debug(query)
self.wh_cursor.execute(query)
self.wh_con.commit()
def update_manager_info(self):
t = self._tables["ldap_user"]
query = self._update_manager_info.format(table=t.get("table"), app_id=self.app_id)
self.logger.debug(query)
self.wh_cursor.execute(query)
self.wh_con.commit()
def update_hierarchy_info(self):
t = self._tables["ldap_user"]
query = self._get_manager_edge.format(table=t.get("table"), app_id=self.app_id)
self.logger.debug(query)
self.wh_cursor.execute(query)
user_mgr_map = dict()
hierarchy = dict()
for row in self.wh_cursor:
user_mgr_map[row[0]] = row[1]
for user in user_mgr_map:
self.find_path_for_user(user, user_mgr_map, hierarchy)
case_org_hierarchy_template = " WHEN user_id = '{user_id}' THEN '{org_hierarchy}' "
case_org_hierarchy_depth_template = " WHEN user_id = '{user_id}' THEN {org_hierarchy_depth} "
user_ids = []
org_hierarchy_long_string = ""
org_hierarchy_depth_long_string = ""
count = 0
for user in hierarchy:
if hierarchy[user] is not None:
user_ids.append("'" + user + "'")
org_hierarchy_long_string += case_org_hierarchy_template.format(user_id=user, org_hierarchy=hierarchy[user][0])
org_hierarchy_depth_long_string += case_org_hierarchy_depth_template.format(user_id=user, org_hierarchy_depth=hierarchy[user][1])
count += 1
if count % 1000 == 0:
query = self._update_hierarchy_info.format(table=t.get("table"), app_id=self.app_id, user_ids=",".join(user_ids), org_hierarchy_long_string=org_hierarchy_long_string,
org_hierarchy_depth_long_string=org_hierarchy_depth_long_string)
# self.logger.debug(query)
self.wh_cursor.executemany(query)
user_ids = []
org_hierarchy_long_string = ""
org_hierarchy_depth_long_string = ""
query = self._update_hierarchy_info.format(table=t.get("table"), app_id=self.app_id, user_ids=",".join(user_ids), org_hierarchy_long_string=org_hierarchy_long_string,
org_hierarchy_depth_long_string=org_hierarchy_depth_long_string)
# self.logger.debug(query)
self.wh_cursor.executemany(query)
self.wh_con.commit()
def find_path_for_user(self, start, user_mgr_map, hierarchy):
if start in hierarchy:
return hierarchy[start]
if start is None or start == '':
return None
path = "/" + start
depth = 0
user = start
while user in user_mgr_map:
if user == self.ceo_user_id or user == user_mgr_map[user]:
break
user = user_mgr_map[user]
path = "/" + user + path
depth += 1
if user == self.ceo_user_id:
break
if path:
hierarchy[start] = (path, depth)
if len(hierarchy) % 1000 == 0:
self.logger.info("%d hierarchy path created in cache so far. [%s]" % (len(hierarchy), start))
return (path, depth)
if __name__ == "__main__":
props = sys.argv[1]
lt = LdapTransform(props)
lt.run()
| apache-2.0 | -3,446,747,300,276,571,000 | 40.310811 | 192 | 0.557518 | false |
ibuler/jumpserver | connect.py | 1 | 33558 | #!/usr/bin/env python
# coding: utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import os
import re
import time
import datetime
import textwrap
import getpass
import readline
import django
import paramiko
import errno
import struct, fcntl, signal, socket, select
from io import open as copen
import uuid
os.environ['DJANGO_SETTINGS_MODULE'] = 'jumpserver.settings'
if not django.get_version().startswith('1.6'):
setup = django.setup()
from django.contrib.sessions.models import Session
from jumpserver.api import ServerError, User, Asset, PermRole, AssetGroup, get_object, mkdir, get_asset_info
from jumpserver.api import logger, Log, TtyLog, get_role_key, CRYPTOR, bash, get_tmp_dir
from jperm.perm_api import gen_resource, get_group_asset_perm, get_group_user_perm, user_have_perm, PermRole
from jumpserver.settings import LOG_DIR
from jperm.ansible_api import MyRunner
# from jlog.log_api import escapeString
from jlog.models import ExecLog, FileLog
login_user = get_object(User, username=getpass.getuser())
try:
remote_ip = os.environ.get('SSH_CLIENT').split()[0]
except (IndexError, AttributeError):
remote_ip = os.popen("who -m | awk '{ print $NF }'").read().strip('()\n')
try:
import termios
import tty
except ImportError:
print '\033[1;31m仅支持类Unix系统 Only unix like supported.\033[0m'
time.sleep(3)
sys.exit()
def color_print(msg, color='red', exits=False):
"""
Print colorful string.
颜色打印字符或者退出
"""
color_msg = {'blue': '\033[1;36m%s\033[0m',
'green': '\033[1;32m%s\033[0m',
'yellow': '\033[1;33m%s\033[0m',
'red': '\033[1;31m%s\033[0m',
'title': '\033[30;42m%s\033[0m',
'info': '\033[32m%s\033[0m'}
msg = color_msg.get(color, 'red') % msg
print msg
if exits:
time.sleep(2)
sys.exit()
return msg
def write_log(f, msg):
msg = re.sub(r'[\r\n]', '\r\n', msg)
f.write(msg)
f.flush()
class Tty(object):
"""
A virtual tty class
一个虚拟终端类,实现连接ssh和记录日志,基类
"""
def __init__(self, user, asset, role, login_type='ssh'):
self.username = user.username
self.asset_name = asset.hostname
self.ip = None
self.port = 22
self.ssh = None
self.channel = None
self.asset = asset
self.user = user
self.role = role
self.remote_ip = ''
self.login_type = login_type
self.vim_flag = False
self.ps1_pattern = re.compile('\[.*@.*\][\$#]')
self.vim_data = ''
@staticmethod
def is_output(strings):
newline_char = ['\n', '\r', '\r\n']
for char in newline_char:
if char in strings:
return True
return False
@staticmethod
def remove_obstruct_char(cmd_str):
'''删除一些干扰的特殊符号'''
control_char = re.compile(r'\x07 | \x1b\[1P | \r ', re.X)
cmd_str = control_char.sub('',cmd_str.strip())
patch_char = re.compile('\x08\x1b\[C') #删除方向左右一起的按键
while patch_char.search(cmd_str):
cmd_str = patch_char.sub('', cmd_str.rstrip())
return cmd_str
@staticmethod
def deal_backspace(match_str, result_command, pattern_str, backspace_num):
'''
处理删除确认键
'''
if backspace_num > 0:
if backspace_num > len(result_command):
result_command += pattern_str
result_command = result_command[0:-backspace_num]
else:
result_command = result_command[0:-backspace_num]
result_command += pattern_str
del_len = len(match_str)-3
if del_len > 0:
result_command = result_command[0:-del_len]
return result_command, len(match_str)
@staticmethod
def deal_replace_char(match_str,result_command,backspace_num):
'''
处理替换命令
'''
str_lists = re.findall(r'(?<=\x1b\[1@)\w',match_str)
tmp_str =''.join(str_lists)
result_command_list = list(result_command)
if len(tmp_str) > 1:
result_command_list[-backspace_num:-(backspace_num-len(tmp_str))] = tmp_str
elif len(tmp_str) > 0:
if result_command_list[-backspace_num] == ' ':
result_command_list.insert(-backspace_num, tmp_str)
else:
result_command_list[-backspace_num] = tmp_str
result_command = ''.join(result_command_list)
return result_command, len(match_str)
def remove_control_char(self, result_command):
"""
处理日志特殊字符
"""
control_char = re.compile(r"""
\x1b[ #%()*+\-.\/]. |
\r | #匹配 回车符(CR)
(?:\x1b\[|\x9b) [ -?]* [@-~] | #匹配 控制顺序描述符(CSI)... Cmd
(?:\x1b\]|\x9d) .*? (?:\x1b\\|[\a\x9c]) | \x07 | #匹配 操作系统指令(OSC)...终止符或振铃符(ST|BEL)
(?:\x1b[P^_]|[\x90\x9e\x9f]) .*? (?:\x1b\\|\x9c) | #匹配 设备控制串或私讯或应用程序命令(DCS|PM|APC)...终止符(ST)
\x1b. #匹配 转义过后的字符
[\x80-\x9f] | (?:\x1b\]0.*) | \[.*@.*\][\$#] | (.*mysql>.*) #匹配 所有控制字符
""", re.X)
result_command = control_char.sub('', result_command.strip())
if not self.vim_flag:
if result_command.startswith('vi') or result_command.startswith('fg'):
self.vim_flag = True
return result_command.decode('utf8',"ignore")
else:
return ''
def deal_command(self, str_r):
"""
处理命令中特殊字符
"""
str_r = self.remove_obstruct_char(str_r)
result_command = '' # 最后的结果
backspace_num = 0 # 光标移动的个数
reach_backspace_flag = False # 没有检测到光标键则为true
pattern_str = ''
while str_r:
tmp = re.match(r'\s*\w+\s*', str_r)
if tmp:
str_r = str_r[len(str(tmp.group(0))):]
if reach_backspace_flag:
pattern_str += str(tmp.group(0))
continue
else:
result_command += str(tmp.group(0))
continue
tmp = re.match(r'\x1b\[K[\x08]*', str_r)
if tmp:
result_command, del_len = self.deal_backspace(str(tmp.group(0)), result_command, pattern_str, backspace_num)
reach_backspace_flag = False
backspace_num = 0
pattern_str = ''
str_r = str_r[del_len:]
continue
tmp = re.match(r'\x08+', str_r)
if tmp:
str_r = str_r[len(str(tmp.group(0))):]
if len(str_r) != 0:
if reach_backspace_flag:
result_command = result_command[0:-backspace_num] + pattern_str
pattern_str = ''
else:
reach_backspace_flag = True
backspace_num = len(str(tmp.group(0)))
continue
else:
break
tmp = re.match(r'(\x1b\[1@\w)+', str_r) #处理替换的命令
if tmp:
result_command,del_len = self.deal_replace_char(str(tmp.group(0)), result_command, backspace_num)
str_r = str_r[del_len:]
backspace_num = 0
continue
if reach_backspace_flag:
pattern_str += str_r[0]
else:
result_command += str_r[0]
str_r = str_r[1:]
if backspace_num > 0:
result_command = result_command[0:-backspace_num] + pattern_str
result_command = self.remove_control_char(result_command)
return result_command
def get_log(self):
"""
Logging user command and output.
记录用户的日志
"""
tty_log_dir = os.path.join(LOG_DIR, 'tty')
date_today = datetime.datetime.now()
date_start = date_today.strftime('%Y%m%d')
time_start = date_today.strftime('%H%M%S')
today_connect_log_dir = os.path.join(tty_log_dir, date_start)
log_file_path = os.path.join(today_connect_log_dir, '%s_%s_%s' % (self.username, self.asset_name, time_start))
try:
mkdir(os.path.dirname(today_connect_log_dir), mode=0777)
mkdir(today_connect_log_dir, mode=0777)
except OSError:
logger.debug('创建目录 %s 失败,请修改%s目录权限' % (today_connect_log_dir, tty_log_dir))
raise ServerError('创建目录 %s 失败,请修改%s目录权限' % (today_connect_log_dir, tty_log_dir))
try:
log_file_f = open(log_file_path + '.log', 'a')
log_time_f = open(log_file_path + '.time', 'a')
except IOError:
logger.debug('创建tty日志文件失败, 请修改目录%s权限' % today_connect_log_dir)
raise ServerError('创建tty日志文件失败, 请修改目录%s权限' % today_connect_log_dir)
if self.login_type == 'ssh': # 如果是ssh连接过来,记录connect.py的pid,web terminal记录为日志的id
pid = os.getpid()
self.remote_ip = remote_ip # 获取远端IP
else:
pid = 0
log = Log(user=self.username, host=self.asset_name, remote_ip=self.remote_ip, login_type=self.login_type,
log_path=log_file_path, start_time=date_today, pid=pid)
log.save()
if self.login_type == 'web':
log.pid = log.id # 设置log id为websocket的id, 然后kill时干掉websocket
log.save()
log_file_f.write('Start at %s\r\n' % datetime.datetime.now())
return log_file_f, log_time_f, log
def get_connect_info(self):
"""
获取需要登陆的主机的信息和映射用户的账号密码
"""
asset_info = get_asset_info(self.asset)
role_key = get_role_key(self.user, self.role) # 获取角色的key,因为ansible需要权限是600,所以统一生成用户_角色key
role_pass = CRYPTOR.decrypt(self.role.password)
connect_info = {'user': self.user, 'asset': self.asset, 'ip': asset_info.get('ip'),
'port': int(asset_info.get('port')), 'role_name': self.role.name,
'role_pass': role_pass, 'role_key': role_key}
logger.debug(connect_info)
return connect_info
def get_connection(self):
"""
获取连接成功后的ssh
"""
connect_info = self.get_connect_info()
# 发起ssh连接请求 Make a ssh connection
ssh = paramiko.SSHClient()
#ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
role_key = connect_info.get('role_key')
if role_key and os.path.isfile(role_key):
try:
ssh.connect(connect_info.get('ip'),
port=connect_info.get('port'),
username=connect_info.get('role_name'),
password=connect_info.get('role_pass'),
key_filename=role_key,
look_for_keys=False)
return ssh
except (paramiko.ssh_exception.AuthenticationException, paramiko.ssh_exception.SSHException):
logger.warning(u'使用ssh key %s 失败, 尝试只使用密码' % role_key)
pass
ssh.connect(connect_info.get('ip'),
port=connect_info.get('port'),
username=connect_info.get('role_name'),
password=connect_info.get('role_pass'),
allow_agent=False,
look_for_keys=False)
except paramiko.ssh_exception.AuthenticationException, paramiko.ssh_exception.SSHException:
raise ServerError('认证失败 Authentication Error.')
except socket.error:
raise ServerError('端口可能不对 Connect SSH Socket Port Error, Please Correct it.')
else:
self.ssh = ssh
return ssh
class SshTty(Tty):
"""
A virtual tty class
一个虚拟终端类,实现连接ssh和记录日志
"""
@staticmethod
def get_win_size():
"""
This function use to get the size of the windows!
获得terminal窗口大小
"""
if 'TIOCGWINSZ' in dir(termios):
TIOCGWINSZ = termios.TIOCGWINSZ
else:
TIOCGWINSZ = 1074295912L
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(sys.stdout.fileno(), TIOCGWINSZ, s)
return struct.unpack('HHHH', x)[0:2]
def set_win_size(self, sig, data):
"""
This function use to set the window size of the terminal!
设置terminal窗口大小
"""
try:
win_size = self.get_win_size()
self.channel.resize_pty(height=win_size[0], width=win_size[1])
except Exception:
pass
def posix_shell(self):
"""
Use paramiko channel connect server interactive.
使用paramiko模块的channel,连接后端,进入交互式
"""
log_file_f, log_time_f, log = self.get_log()
old_tty = termios.tcgetattr(sys.stdin)
pre_timestamp = time.time()
data = ''
input_mode = False
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
self.channel.settimeout(0.0)
while True:
try:
r, w, e = select.select([self.channel, sys.stdin], [], [])
flag = fcntl.fcntl(sys.stdin, fcntl.F_GETFL, 0)
fcntl.fcntl(sys.stdin.fileno(), fcntl.F_SETFL, flag|os.O_NONBLOCK)
except Exception:
pass
if self.channel in r:
try:
x = self.channel.recv(10240)
if len(x) == 0:
break
if self.vim_flag:
self.vim_data += x
index = 0
len_x = len(x)
while index < len_x:
try:
n = os.write(sys.stdout.fileno(), x[index:])
sys.stdout.flush()
index += n
except OSError as msg:
if msg.errno == errno.EAGAIN:
continue
#sys.stdout.write(x)
#sys.stdout.flush()
now_timestamp = time.time()
log_time_f.write('%s %s\n' % (round(now_timestamp-pre_timestamp, 4), len(x)))
log_time_f.flush()
log_file_f.write(x)
log_file_f.flush()
pre_timestamp = now_timestamp
log_file_f.flush()
if input_mode and not self.is_output(x):
data += x
except socket.timeout:
pass
if sys.stdin in r:
try:
x = os.read(sys.stdin.fileno(), 4096)
except OSError:
pass
input_mode = True
if str(x) in ['\r', '\n', '\r\n']:
if self.vim_flag:
match = self.ps1_pattern.search(self.vim_data)
if match:
self.vim_flag = False
data = self.deal_command(data)[0:200]
if len(data) > 0:
TtyLog(log=log, datetime=datetime.datetime.now(), cmd=data).save()
else:
data = self.deal_command(data)[0:200]
if len(data) > 0:
TtyLog(log=log, datetime=datetime.datetime.now(), cmd=data).save()
data = ''
self.vim_data = ''
input_mode = False
if len(x) == 0:
break
self.channel.send(x)
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty)
log_file_f.write('End time is %s' % datetime.datetime.now())
log_file_f.close()
log_time_f.close()
log.is_finished = True
log.end_time = datetime.datetime.now()
log.save()
def connect(self):
"""
Connect server.
连接服务器
"""
# 发起ssh连接请求 Make a ssh connection
ssh = self.get_connection()
transport = ssh.get_transport()
transport.set_keepalive(30)
transport.use_compression(True)
# 获取连接的隧道并设置窗口大小 Make a channel and set windows size
global channel
win_size = self.get_win_size()
#self.channel = channel = ssh.invoke_shell(height=win_size[0], width=win_size[1], term='xterm')
self.channel = channel = transport.open_session()
channel.get_pty(term='xterm', height=win_size[0], width=win_size[1])
channel.invoke_shell()
try:
signal.signal(signal.SIGWINCH, self.set_win_size)
except:
pass
self.posix_shell()
# Shutdown channel socket
channel.close()
ssh.close()
class Nav(object):
"""
导航提示类
"""
def __init__(self, user):
self.user = user
self.search_result = {}
self.user_perm = {}
@staticmethod
def print_nav():
"""
Print prompt
打印提示导航
"""
msg = """\n\033[1;32m### 欢迎使用Jumpserver开源跳板机系统 ### \033[0m
1) 输入 \033[32mID\033[0m 直接登录.
2) 输入 \033[32m/\033[0m + \033[32mIP, 主机名 or 备注 \033[0m搜索.
3) 输入 \033[32mP/p\033[0m 显示您有权限的主机.
4) 输入 \033[32mG/g\033[0m 显示您有权限的主机组.
5) 输入 \033[32mG/g\033[0m\033[0m + \033[32m组ID\033[0m 显示该组下主机.
6) 输入 \033[32mE/e\033[0m 批量执行命令.
7) 输入 \033[32mU/u\033[0m 批量上传文件.
8) 输入 \033[32mD/d\033[0m 批量下载文件.
9) 输入 \033[32mH/h\033[0m 帮助.
0) 输入 \033[32mQ/q\033[0m 退出.
"""
print textwrap.dedent(msg)
def search(self, str_r=''):
gid_pattern = re.compile(r'^g\d+$')
# 获取用户授权的所有主机信息
if not self.user_perm:
self.user_perm = get_group_user_perm(self.user)
user_asset_all = self.user_perm.get('asset').keys()
# 搜索结果保存
user_asset_search = []
if str_r:
# 资产组组id匹配
if gid_pattern.match(str_r):
gid = int(str_r.lstrip('g'))
# 获取资产组包含的资产
user_asset_search = get_object(AssetGroup, id=gid).asset_set.all()
else:
# 匹配 ip, hostname, 备注
for asset in user_asset_all:
if str_r in asset.ip or str_r in str(asset.hostname) or str_r in str(asset.comment):
user_asset_search.append(asset)
else:
# 如果没有输入就展现所有
user_asset_search = user_asset_all
self.search_result = dict(zip(range(len(user_asset_search)), user_asset_search))
color_print('[%-3s] %-12s %-15s %-5s %-10s %s' % ('ID', '主机名', 'IP', '端口', '系统用户', '备注'), 'title')
for index, asset in self.search_result.items():
# 获取该资产信息
asset_info = get_asset_info(asset)
# 获取该资产包含的角色
role = [str(role.name) for role in self.user_perm.get('asset').get(asset).get('role')]
print '[%-3s] %-15s %-15s %-5s %-10s %s' % (index, asset.hostname, asset.ip, asset_info.get('port'),
role, asset.comment)
print
def print_asset_group(self):
"""
打印用户授权的资产组
"""
user_asset_group_all = get_group_user_perm(self.user).get('asset_group', [])
color_print('[%-3s] %-20s %s' % ('ID', '组名', '备注'), 'title')
for asset_group in user_asset_group_all:
print '[%-3s] %-15s %s' % (asset_group.id, asset_group.name, asset_group.comment)
print
def exec_cmd(self):
"""
批量执行命令
"""
while True:
if not self.user_perm:
self.user_perm = get_group_user_perm(self.user)
roles = self.user_perm.get('role').keys()
if len(roles) > 1: # 授权角色数大于1
color_print('[%-2s] %-15s' % ('ID', '系统用户'), 'info')
role_check = dict(zip(range(len(roles)), roles))
for i, r in role_check.items():
print '[%-2s] %-15s' % (i, r.name)
print
print "请输入运行命令所关联系统用户的ID, q退出"
try:
role_id = raw_input("\033[1;32mRole>:\033[0m ").strip()
if role_id == 'q':
break
except (IndexError, ValueError):
color_print('错误输入')
else:
role = role_check[int(role_id)]
elif len(roles) == 1: # 授权角色数为1
role = roles[0]
else:
color_print('当前用户未被授予角色,无法执行任何操作,如有疑问请联系管理员。')
return
assets = list(self.user_perm.get('role', {}).get(role).get('asset')) # 获取该用户,角色授权主机
print "授权包含该系统用户的所有主机"
for asset in assets:
print ' %s' % asset.hostname
print
print "请输入主机名或ansible支持的pattern, 多个主机:分隔, q退出"
pattern = raw_input("\033[1;32mPattern>:\033[0m ").strip()
if pattern == 'q':
break
else:
res = gen_resource({'user': self.user, 'asset': assets, 'role': role}, perm=self.user_perm)
runner = MyRunner(res)
asset_name_str = ''
print "匹配主机:"
for inv in runner.inventory.get_hosts(pattern=pattern):
print ' %s' % inv.name
asset_name_str += '%s ' % inv.name
print
while True:
print "请输入执行的命令, 按q退出"
command = raw_input("\033[1;32mCmds>:\033[0m ").strip()
if command == 'q':
break
runner.run('shell', command, pattern=pattern)
ExecLog(host=asset_name_str, user=self.user.username, cmd=command, remote_ip=remote_ip,
result=runner.results).save()
for k, v in runner.results.items():
if k == 'ok':
for host, output in v.items():
color_print("%s => %s" % (host, 'Ok'), 'green')
print output
print
else:
for host, output in v.items():
color_print("%s => %s" % (host, k), 'red')
color_print(output, 'red')
print
print "~o~ Task finished ~o~"
print
def upload(self):
while True:
if not self.user_perm:
self.user_perm = get_group_user_perm(self.user)
try:
print "进入批量上传模式"
print "请输入主机名或ansible支持的pattern, 多个主机:分隔 q退出"
pattern = raw_input("\033[1;32mPattern>:\033[0m ").strip()
if pattern == 'q':
break
else:
assets = self.user_perm.get('asset').keys()
res = gen_resource({'user': self.user, 'asset': assets}, perm=self.user_perm)
runner = MyRunner(res)
asset_name_str = ''
print "匹配主机:"
for inv in runner.inventory.get_hosts(pattern=pattern):
print inv.name
asset_name_str += '%s ' % inv.name
if not asset_name_str:
color_print('没有匹配主机')
continue
tmp_dir = get_tmp_dir()
logger.debug('Upload tmp dir: %s' % tmp_dir)
os.chdir(tmp_dir)
bash('rz')
filename_str = ' '.join(os.listdir(tmp_dir))
if not filename_str:
color_print("上传文件为空")
continue
logger.debug('上传文件: %s' % filename_str)
runner = MyRunner(res)
runner.run('copy', module_args='src=%s dest=%s directory_mode'
% (tmp_dir, tmp_dir), pattern=pattern)
ret = runner.results
FileLog(user=self.user.name, host=asset_name_str, filename=filename_str,
remote_ip=remote_ip, type='upload', result=ret).save()
logger.debug('Upload file: %s' % ret)
if ret.get('failed'):
error = '上传目录: %s \n上传失败: [ %s ] \n上传成功 [ %s ]' % (tmp_dir,
', '.join(ret.get('failed').keys()),
', '.join(ret.get('ok').keys()))
color_print(error)
else:
msg = '上传目录: %s \n传送成功 [ %s ]' % (tmp_dir, ', '.join(ret.get('ok').keys()))
color_print(msg, 'green')
print
except IndexError:
pass
def download(self):
while True:
if not self.user_perm:
self.user_perm = get_group_user_perm(self.user)
try:
print "进入批量下载模式"
print "请输入主机名或ansible支持的pattern, 多个主机:分隔,q退出"
pattern = raw_input("\033[1;32mPattern>:\033[0m ").strip()
if pattern == 'q':
break
else:
assets = self.user_perm.get('asset').keys()
res = gen_resource({'user': self.user, 'asset': assets}, perm=self.user_perm)
runner = MyRunner(res)
asset_name_str = ''
print "匹配主机:\n"
for inv in runner.inventory.get_hosts(pattern=pattern):
asset_name_str += '%s ' % inv.name
print ' %s' % inv.name
if not asset_name_str:
color_print('没有匹配主机')
continue
print
while True:
tmp_dir = get_tmp_dir()
logger.debug('Download tmp dir: %s' % tmp_dir)
print "请输入文件路径(不支持目录)"
file_path = raw_input("\033[1;32mPath>:\033[0m ").strip()
if file_path == 'q':
break
if not file_path:
color_print("文件路径为空")
continue
runner.run('fetch', module_args='src=%s dest=%s' % (file_path, tmp_dir), pattern=pattern)
ret = runner.results
FileLog(user=self.user.name, host=asset_name_str, filename=file_path, type='download',
remote_ip=remote_ip, result=ret).save()
logger.debug('Download file result: %s' % ret)
os.chdir('/tmp')
tmp_dir_name = os.path.basename(tmp_dir)
if not os.listdir(tmp_dir):
color_print('下载全部失败')
continue
bash('tar czf %s.tar.gz %s && sz %s.tar.gz' % (tmp_dir, tmp_dir_name, tmp_dir))
if ret.get('failed'):
error = '文件名称: %s \n下载失败: [ %s ] \n下载成功 [ %s ]' % \
('%s.tar.gz' % tmp_dir_name, ', '.join(ret.get('failed').keys()), ', '.join(ret.get('ok').keys()))
color_print(error)
else:
msg = '文件名称: %s \n下载成功 [ %s ]' % ('%s.tar.gz' % tmp_dir_name, ', '.join(ret.get('ok').keys()))
color_print(msg, 'green')
print
except IndexError:
pass
def main():
"""
he he
主程序
"""
if not login_user: # 判断用户是否存在
color_print('没有该用户,或许你是以root运行的 No that user.', exits=True)
gid_pattern = re.compile(r'^g\d+$')
nav = Nav(login_user)
nav.print_nav()
try:
while True:
try:
option = raw_input("\033[1;32mOpt or ID>:\033[0m ").strip()
except EOFError:
nav.print_nav()
continue
except KeyboardInterrupt:
sys.exit(0)
if option in ['P', 'p', '\n', '']:
nav.search()
continue
if option.startswith('/') or gid_pattern.match(option):
nav.search(option.lstrip('/'))
elif option in ['G', 'g']:
nav.print_asset_group()
continue
elif option in ['E', 'e']:
nav.exec_cmd()
continue
elif option in ['U', 'u']:
nav.upload()
elif option in ['D', 'd']:
nav.download()
elif option in ['H', 'h']:
nav.print_nav()
elif option in ['Q', 'q', 'exit']:
sys.exit()
else:
try:
asset = nav.search_result[int(option)]
roles = nav.user_perm.get('asset').get(asset).get('role')
if len(roles) > 1:
role_check = dict(zip(range(len(roles)), roles))
print "\033[32m[ID] 系统用户\033[0m"
for index, role in role_check.items():
print "[%-2s] %s" % (index, role.name)
print
print "授权系统用户超过1个,请输入ID, q退出"
try:
role_index = raw_input("\033[1;32mID>:\033[0m ").strip()
if role_index == 'q':
continue
else:
role = role_check[int(role_index)]
except IndexError:
color_print('请输入正确ID', 'red')
continue
elif len(roles) == 1:
role = list(roles)[0]
else:
color_print('没有映射用户', 'red')
continue
ssh_tty = SshTty(login_user, asset, role)
ssh_tty.connect()
except (KeyError, ValueError):
color_print('请输入正确ID', 'red')
except ServerError, e:
color_print(e, 'red')
except IndexError, e:
color_print(e)
time.sleep(5)
pass
if __name__ == '__main__':
main()
| gpl-2.0 | 2,503,802,177,334,187,500 | 37.665037 | 134 | 0.460763 | false |
jmacmahon/invenio | modules/websession/lib/webuser_config.py | 16 | 1790 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
webuser_config.py - magic constants for webuser module.
"""
# Used by merge_usera_into_userb, contains the list of which SQL tables refer
# to the external key id_user, and what column contains this information.
CFG_WEBUSER_USER_TABLES = (
## The next tables are disabled because they are often too big
## and not so critical to deserve merging
#("rnkPAGEVIEWS", "id_user"),
#("rnkDOWNLOADS", "id_user"),
#("session", "uid"),
("user_usergroup", "id_user"),
("user_accROLE", "id_user"),
("user_query", "id_user"),
("user_query_basket", "id_user"),
("bskREC", "id_user_who_added_item"),
("user_bskBASKET", "id_user"),
("bskRECORDCOMMENT", "id_user"),
("msgMESSAGE", "id_user_from"),
("user_msgMESSAGE", "id_user_to"),
("cmtRECORDCOMMENT", "id_user"),
("cmtACTIONHISTORY", "id_user"),
("cmtSUBSCRIPTION", "id_user"),
("user_expJOB", "id_user"),
("swrCLIENTDATA", "id_user"),
("sbmCOOKIES", "uid"),
("aidUSERINPUTLOG", "userid"),
)
| gpl-2.0 | 1,593,291,101,930,926,800 | 36.291667 | 77 | 0.67095 | false |
tswast/google-cloud-python | trace/google/cloud/trace_v1/gapic/trace_service_client_config.py | 1 | 1368 | config = {
"interfaces": {
"google.devtools.cloudtrace.v1.TraceService": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": [],
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 20000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 20000,
"total_timeout_millis": 600000,
}
},
"methods": {
"ListTraces": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"GetTrace": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"PatchTraces": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
},
}
}
}
| apache-2.0 | -7,595,018,230,649,111,000 | 35 | 67 | 0.38962 | false |
Achuth17/scikit-bio | skbio/io/_base.py | 1 | 7700 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.builtins import range
import re
import warnings
import numpy as np
from skbio.util import cardinal_to_ordinal
_whitespace_regex = re.compile(r'\s')
_newline_regex = re.compile(r'\n')
def _decode_qual_to_phred(qual_str, variant=None, phred_offset=None):
phred_offset, phred_range = _get_phred_offset_and_range(
variant, phred_offset,
["Must provide either `variant` or `phred_offset` in order to decode "
"quality scores.",
"Decoding Solexa quality scores is not currently supported, "
"as quality scores are always stored as Phred scores in "
"scikit-bio. Please see the following scikit-bio issue to "
"track progress on this:\n\t"
"https://github.com/biocore/scikit-bio/issues/719"])
qual = np.fromstring(qual_str, dtype=np.uint8) - phred_offset
if np.any((qual > phred_range[1]) | (qual < phred_range[0])):
raise ValueError("Decoded Phred score is out of range [%d, %d]."
% (phred_range[0], phred_range[1]))
return qual
def _encode_phred_to_qual(phred, variant=None, phred_offset=None):
phred_offset, phred_range = _get_phred_offset_and_range(
variant, phred_offset,
["Must provide either `variant` or `phred_offset` in order to encode "
"Phred scores.",
"Encoding Solexa quality scores is not currently supported. "
"Please see the following scikit-bio issue to track progress "
"on this:\n\t"
"https://github.com/biocore/scikit-bio/issues/719"])
qual_chars = []
for score in phred:
if score < phred_range[0]:
raise ValueError("Phred score %d is out of range [%d, %d]."
% (score, phred_range[0], phred_range[1]))
if score > phred_range[1]:
warnings.warn(
"Phred score %d is out of targeted range [%d, %d]. Converting "
"to %d." % (score, phred_range[0], phred_range[1],
phred_range[1]), UserWarning)
score = phred_range[1]
qual_chars.append(chr(score + phred_offset))
return ''.join(qual_chars)
def _get_phred_offset_and_range(variant, phred_offset, errors):
if variant is None and phred_offset is None:
raise ValueError(errors[0])
if variant is not None and phred_offset is not None:
raise ValueError(
"Cannot provide both `variant` and `phred_offset`.")
if variant is not None:
if variant == 'sanger':
phred_offset = 33
phred_range = (0, 93)
elif variant == 'illumina1.3':
phred_offset = 64
phred_range = (0, 62)
elif variant == 'illumina1.8':
phred_offset = 33
phred_range = (0, 62)
elif variant == 'solexa':
phred_offset = 64
phred_range = (-5, 62)
raise NotImplementedError(errors[1])
else:
raise ValueError("Unrecognized variant %r." % variant)
else:
if not (33 <= phred_offset <= 126):
raise ValueError(
"`phred_offset` %d is out of printable ASCII character range."
% phred_offset)
phred_range = (0, 126 - phred_offset)
return phred_offset, phred_range
def _get_nth_sequence(generator, seq_num):
# i is set to None so that an empty generator will not result in an
# undefined variable when compared to seq_num.
i = None
if seq_num is None or seq_num < 1:
raise ValueError('Invalid sequence number (`seq_num`=%s). `seq_num`'
' must be between 1 and the number of sequences in'
' the file.' % str(seq_num))
try:
for i, seq in zip(range(1, seq_num + 1), generator):
pass
finally:
generator.close()
if i == seq_num:
return seq
raise ValueError('Reached end of file before finding the %s sequence.'
% cardinal_to_ordinal(seq_num))
def _parse_fasta_like_header(line):
id_ = ''
desc = ''
header = line[1:].rstrip()
if header:
if header[0].isspace():
# no id
desc = header.lstrip()
else:
header_tokens = header.split(None, 1)
if len(header_tokens) == 1:
# no description
id_ = header_tokens[0]
else:
id_, desc = header_tokens
return id_, desc
def _format_fasta_like_records(generator, id_whitespace_replacement,
description_newline_replacement, require_qual,
lowercase=None):
if ((id_whitespace_replacement is not None and
'\n' in id_whitespace_replacement) or
(description_newline_replacement is not None and
'\n' in description_newline_replacement)):
raise ValueError(
"Newline character (\\n) cannot be used to replace whitespace in "
"sequence IDs, nor to replace newlines in sequence descriptions.")
for idx, seq in enumerate(generator):
if len(seq) < 1:
raise ValueError(
"%s sequence does not contain any characters (i.e., it is an "
"empty/blank sequence). Writing empty sequences is not "
"supported." % cardinal_to_ordinal(idx + 1))
if 'id' in seq.metadata:
id_ = seq.metadata['id']
else:
id_ = ''
if id_whitespace_replacement is not None:
id_ = _whitespace_regex.sub(id_whitespace_replacement, id_)
if 'description' in seq.metadata:
desc = seq.metadata['description']
else:
desc = ''
if description_newline_replacement is not None:
desc = _newline_regex.sub(description_newline_replacement, desc)
if desc:
header = '%s %s' % (id_, desc)
else:
header = id_
if require_qual and 'quality' not in seq.positional_metadata:
raise ValueError(
"Cannot write %s sequence because it does not have quality "
"scores associated with it." % cardinal_to_ordinal(idx + 1))
qual = None
if 'quality' in seq.positional_metadata:
qual = seq.positional_metadata['quality'].values
if lowercase is not None:
if hasattr(seq, 'lowercase'):
seq_str = seq.lowercase(lowercase)
else:
raise AttributeError("lowercase specified but class %s does "
"not support lowercase functionality" %
seq.__class__.__name__)
else:
seq_str = str(seq)
yield header, seq_str, qual
def _line_generator(fh, skip_blanks=False):
for line in fh:
line = line.strip()
if line or not skip_blanks:
yield line
def _too_many_blanks(fh, max_blanks):
count = 0
too_many = False
for line in _line_generator(fh, skip_blanks=False):
if line:
break
else:
count += 1
if count > max_blanks:
too_many = True
break
fh.seek(0)
return too_many
| bsd-3-clause | -8,607,395,293,773,900,000 | 34.321101 | 79 | 0.554805 | false |
home-assistant/home-assistant | tests/components/mfi/test_switch.py | 5 | 3581 | """The tests for the mFi switch platform."""
import unittest.mock as mock
import pytest
import homeassistant.components.mfi.switch as mfi
import homeassistant.components.switch as switch_component
from homeassistant.setup import async_setup_component
PLATFORM = mfi
COMPONENT = switch_component
THING = "switch"
GOOD_CONFIG = {
"switch": {
"platform": "mfi",
"host": "foo",
"port": 6123,
"username": "user",
"password": "pass",
"ssl": True,
"verify_ssl": True,
}
}
async def test_setup_adds_proper_devices(hass):
"""Test if setup adds devices."""
with mock.patch(
"homeassistant.components.mfi.switch.MFiClient"
) as mock_client, mock.patch(
"homeassistant.components.mfi.switch.MfiSwitch", side_effect=mfi.MfiSwitch
) as mock_switch:
ports = {
i: mock.MagicMock(
model=model, label=f"Port {i}", output=False, data={}, ident=f"abcd-{i}"
)
for i, model in enumerate(mfi.SWITCH_MODELS)
}
ports["bad"] = mock.MagicMock(model="notaswitch")
print(ports["bad"].model)
mock_client.return_value.get_devices.return_value = [
mock.MagicMock(ports=ports)
]
assert await async_setup_component(hass, COMPONENT.DOMAIN, GOOD_CONFIG)
await hass.async_block_till_done()
for ident, port in ports.items():
if ident != "bad":
mock_switch.assert_any_call(port)
assert mock.call(ports["bad"], hass) not in mock_switch.mock_calls
@pytest.fixture(name="port")
def port_fixture():
"""Port fixture."""
return mock.MagicMock()
@pytest.fixture(name="switch")
def switch_fixture(port):
"""Switch fixture."""
return mfi.MfiSwitch(port)
async def test_name(port, switch):
"""Test the name."""
assert port.label == switch.name
async def test_update(port, switch):
"""Test update."""
switch.update()
assert port.refresh.call_count == 1
assert port.refresh.call_args == mock.call()
async def test_update_with_target_state(port, switch):
"""Test update with target state."""
# pylint: disable=protected-access
switch._target_state = True
port.data = {}
port.data["output"] = "stale"
switch.update()
assert port.data["output"] == 1.0
# pylint: disable=protected-access
assert switch._target_state is None
port.data["output"] = "untouched"
switch.update()
assert port.data["output"] == "untouched"
async def test_turn_on(port, switch):
"""Test turn_on."""
switch.turn_on()
assert port.control.call_count == 1
assert port.control.call_args == mock.call(True)
# pylint: disable=protected-access
assert switch._target_state
async def test_turn_off(port, switch):
"""Test turn_off."""
switch.turn_off()
assert port.control.call_count == 1
assert port.control.call_args == mock.call(False)
# pylint: disable=protected-access
assert not switch._target_state
async def test_current_power_w(port, switch):
"""Test current power."""
port.data = {"active_pwr": 10}
assert switch.current_power_w == 10
async def test_current_power_w_no_data(port, switch):
"""Test current power if there is no data."""
port.data = {"notpower": 123}
assert switch.current_power_w == 0
async def test_extra_state_attributes(port, switch):
"""Test the state attributes."""
port.data = {"v_rms": 1.25, "i_rms": 2.75}
assert switch.extra_state_attributes == {"volts": 1.2, "amps": 2.8}
| apache-2.0 | -1,817,772,666,641,239,000 | 27.879032 | 88 | 0.632226 | false |
COCS4950G7/COSC4950 | Source/Network/Obsolete/NetworkServer_r13B.py | 1 | 56657 | __author__ = 'chris hamm'
#NetworkServer_r13B
#Created: 2/19/2015
#DEsigned to run with NetworkClient_r13B
#Changes from previous revision:
#(Implemented) Replace the stackOfChunksThatNeedToBeReassigned with the queue of chunks
#changed receive chunk from controller setup to have chunk stored in the queue
#changed the receive nextchunk request from client setup to send the chunk from the queue, and if queue is empty, throw a warning
#STill checks for if clients are in the stackof waiting
#[MIGHT NOT BE NEEDED](Not implemented yet) Add a lock for the queue of stored chunks so there is no threading conflict
#(Implemented) When server starts up, immeadiately request 5 chunks from the controller
#(Implemented) Add a queue to hold excess chunks at all times, the queue is a buffer to improve speed slightly
#(Implemented) Removed old unused functions and useless comments
def compareString(inboundStringA, inboundStringB, startA, startB, endA, endB):
try:
posA = startA
posB = startB
if((endA-startA) != (endB-startB)):
return False
for x in range(startA,endA):
tempCharA= inboundStringA[posA]
tempCharB= inboundStringB[posB]
if(tempCharA != tempCharB):
return False
posA+= 1
posB+= 1
return True
except Exception as inst:
print "========================================================================\n"
print "Exception thrown in compareString Function: " +str(inst)+"\n"
print "========================================================================\n"
return False
def extractSolutionFromFoundSolutionTuple(self, inboundTuple):
try:
theSolution = str(inboundTuple[1]) #second element in the tuple
return theSolution
except Exception as inst:
print "===========================================================\n"
print "Exception thrown in extractSolutionFromFoundSolutionTuple: "+str(inst)+"\n"
print "===========================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "EXCEPTION in extractSolutionFromFoundSolutionTuple", "Self", "Self")
return "" #return empty string
#Stack of IO Commands=======================================================
def pushCommandOntoTheStackOfIOCommands(self, commandName, commandOrigin_Destination, commandDirection):
try:
import time
current_time= time.time()
#print "Acquiring the stackOfIOCommands Lock"
self.stackOfIOCommandsLock.acquire()
#print "Acquired the stackOfIOCommands Lock"
self.stackOfIOCommands.append((commandName, commandOrigin_Destination, commandDirection, current_time ))#tuple contains name, origin/destination, direction, time
except Exception as inst:
print "======================================================\n"
print "Exception was thrown in pushCommandOntoTheStackOfIOCommands: "+str(inst)+"\n"
print "=======================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "EXCEPTION in pushCommandOnToTheStackOfIOCommands", "Self", "Self")
finally:
#print "Releasing the stackOfIOCommands Lock"
self.stackOfIOCommandsLock.release()
#print "Released the stackOfIOCommands Lock"
#Inbound commands from controller==========================================
def checkForDoneCommandFromController(self, inboundString):
try:
#print "Checking for done Command from the Controller\n"
if(compareString(str(inboundString),"done",0,0,len("done"),len("done"))==True):
#print "done Command was received from the Controller\n"
pushCommandOntoTheStackOfIOCommands(self, "done", "Controller","Inbound" )
return True
else:
return False
except Exception as inst:
print "========================================================================\n"
print "Exception thrown in checkForDoneCommandFromController: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "EXCEPTION in checkForDoneCommandFromCOntroller", "Controller", "Inbound")
return False
def checkForNextChunkCommandFromController(self, inboundString):
try:
#print "Checking for nextChunk Command from the Controller\n"
if(compareString(str(inboundString),"nextChunk",0,0,len("nextChunk"),len("nextChunk"))==True):
#print "nextChunk Command was received from the Controller\n"
pushCommandOntoTheStackOfIOCommands(self, "nextChunk", "Controller", "Inbound")
return True
else:
return False
except Exception as inst:
print "========================================================================\n"
print "Exception thrown in checkForNextChunkCommandFromController: " +str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "EXCEPTION in checkForNextChunkCommandFromController", "Controller", "Inbound")
return False
def receiveNextChunkFromController(self):
try:
#print "Receiving Chunk From the Pipe\n"
inboundChunk= self.pipe.recv()
# print "Received the Chunk from the pipe\n"
pushCommandOntoTheStackOfIOCommands(self, "nextChunk", "Controller", "Inbound")
return inboundChunk
except Exception as inst:
print "========================================================================\n"
print "ERROR in receiveNextChunkFromController: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in receiveNextChunkFromController", "Controller", "Inbound")
return ""
#Outbound commands to controller======================================
def sendNextChunkCommandToController(self):
try:
# print "Sending nextChunk Command to the Controller\n"
self.pipe.send("nextChunk")
#print "Sent the nextChunk Command to the Controller\n"
pushCommandOntoTheStackOfIOCommands(self, "nextChunk", "Controller", "Outbound")
except Exception as inst:
print "========================================================================\n"
print "Exception was thrown in sendNextChunkCommandToController: " +str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "EXCEPTION in sendNextChunkCommandToController", "Controller", "Outbound")
def sendDoneCommandToController(self):
try:
# print "Sending done Command to the Controller\n"
self.pipe.send("done")
# print "Sent the done Command to the Controller\n"
pushCommandOntoTheStackOfIOCommands(self, "done", "Controller", "Outbound")
except Exception as inst:
print "========================================================================\n"
print "Exception thrown in sendDoneCommandToController: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "EXCEPTION in sendDoneCommandToController", "Controller", "Outbound")
def sendSolutionToController(self):
try:
#get the solution from the class variable that stores it
#print "Sending Solution To Controller\n"
self.pipe.send(str(self.theSolution))
# print "Sent solution to the controller\n"
pushCommandOntoTheStackOfIOCommands(self, "sendSolution", "Controller", "Outbound")
except Exception as inst:
print "==================================================================\n"
print "Exception thrown in sendSolutionToController: "+str(inst)+"\n"
print "==================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "EXCEPTION in sendSolutionToController", "Controller", "Outbound")
#Inbound commands from the client=========================================
def checkForCrashedCommandFromClient(self,inboundData): #NOTE: This is NOT modelled after the check for crash command in the previous revisions
try:
# print "Checking for the Crashed Command from the Client\n"
if(compareString(str(inboundData),"CRASHED",0,0,len("CRASHED"),len("CRASHED"))==True):
#print "Crash Command was received from the Client\n"
pushCommandOntoTheStackOfIOCommands(self, "CRASHED", "Client", "Inbound")
return True
else:
return False
except Exception as inst:
print "========================================================================\n"
print "Exception thrown in checkForCrashedCommandFromClient: " +str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "EXCEPTION in checkForCrashedCommandFromClient", "Client", "Inbound")
return False
def checkForFoundSolutionCommandFromClient(self,inboundData):
try:
#print "Checking for the Found Solution Command from the client\n"
if(compareString(str(inboundData),"FOUNDSOLUTION",0,0,len("FOUNDSOLUTION"),len("FOUNDSOLUTION"))): #access the first element iin the tuple
#print "FOUNDSOLUTION Command was received from the client\n"
#Extracting solution from the string
#inboundData= FOUNDSOLUTION [solution]
#inboundData[0:14]= FOUNDSOLUTION (including the space)
#first bracket is at [15]
openingBracketPos = 15
closingBracketPos = 15
theInboundSolution = ""
for index in range(openingBracketPos, len(inboundData)):
if(inboundData[index] == "]"):
# print "Extraction of solution is complete\n"
closingBracketPos= index
break
else:
theInboundSolution+= str(inboundData[index])
if(closingBracketPos == 15): #where it started
raise Exception("closing bracket not found")
pushCommandOntoTheStackOfIOCommands(self, "FOUNDSOLUTION", "Client", "Inbound")
#set the class variab;e that holds the solution
self.theSolution= theInboundSolution
return True
else:
return False
except Exception as inst:
print "========================================================================\n"
print "Exception thrown in checkForFoundSolutionCommandFromClient: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "EXCEPTION in checkForFoundSOlutionCommandFromClient", "Client", "Inbound")
return False
def checkForNextCommandFromClient(self,inboundData):
try:
# print "Checking for the Next command from the client\n"
if(compareString(str(inboundData),"NEXT",0,0,len("NEXT"),len("NEXT"))):
#print "NEXT command was received from the client\n"
pushCommandOntoTheStackOfIOCommands(self, "NEXT", "Client", "Inbound")
return True
else:
return False
except Exception as inst:
print "========================================================================\n"
print "Exception was thrown in checkForNextCommandFromClient: " +str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "EXCEPTION in checkForNextCommandFromCLient", "Client", "Inbound")
return False
def receiveCommandFromClient(self, clientSocket): #NOTE new function, used to receive normal commands
while True:
try:
receivedCommandFromClient = ""
#print "Acquiring socketLock"
self.socketLock.acquire()
#print "Acquired socketLock"
#print "Checking for inbound client Commands"
clientSocket.settimeout(0.25)
clientInput= clientSocket.recv(4096)
if(len(clientInput) > 0):
receivedCommandFromClient= clientInput
break
#return command in finally block for this function
except Exception as inst:
if(compareString(str(inst),"[Errno 35] Resource temporarily unavailable",0,0,len("[Errno 35] Resource temporarily unavailable"),len("[Errno 35] Resource temporarily unavailable"))==True):
print "[Errno 35] Resource is not available in receiveCommandFromClient, trying again.\n"
elif(compareString(str(inst),"timed out",0,0,len("timed out"),len("timed out"))==True):
#ignore, do no print out error
break
else:
print "===================================================================\n"
print "ERROR in receiveCommandFromClient: " +str(inst)+"\n"
print "===================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in receiveCommandFromClient", "Client", "Inbound")
receivedCommandFromClient= ""#set to empty string
break
finally:
#print "Releasing socketLock\n"
self.socketLock.release()
#print "Released socketLock\n"
return receivedCommandFromClient
#Outbound commands to client==================================================
def sendDoneCommandToClient(self,networkSocket, clientIP):
#print "Issuing Done Command to Client: " + str(clientIP) +"\n"
#print "Acquiring socket lock\n"
self.socketLock.acquire()
#print "Acquired socketLock\n"
networkSocket.settimeout(0.25)
#print "socket lock acquired\n"
try: #send try block
# print "preparing to send done command to client\n"
networkSocket.send("done")
#print "sent Done command to client: " +str(clientIP) +"\n"
pushCommandOntoTheStackOfIOCommands(self, "done", "Client", "Outbound")
except Exception as inst:
if(compareString(str(inst),"[Errno 32] Broken pipe",0,0,len("[Errno 32] Broken pipe"),len("[Errno 32] Broken pipe"))):
print "========================================================================\n"
print "Exception thrown in sendDoneCommandToClient: Broken pipe error detected in send try block\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "EXCEPTION in sendDoneCommandToClient: Broken Pipe", "Client", "Outbound")
else:
print "========================================================================\n"
print "Exception in send Done command: " +str(inst) +"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "EXCEPTION in sendDoneCommandToClient", "Client", "Outbound")
finally:
#print "Releasing the socketLock\n"
self.socketLock.release()
#print "Released socketLock\n"
def sendNextCommandToClientByLength(self, clientSocket, chunkObject): #This sends the measurements to the client in length instead of file size
try:
#print "Acquiring the socketLock\n"
self.socketLock.acquire()
#print "Acquired the socketLock\n"
chunkParamLength = len(str(chunkObject.params))
chunkDataLength = len(str(chunkObject.data))
#Create the command string
commandString= ""
try:
commandString = "NEXT PSIZE("+str(chunkParamLength)+") DSIZE("+str(chunkDataLength)+")\n" #keeping same names, even though it is length
except Exception as inst:
print "========================================================================\n"
print "Error in create command string step of sendNextCommandToCLientByLength: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in createCommandStringStep of sendNextCommandToClientByLength", "Client", "Outbound")
#Send command string to the client
try:
# print "Sending command string to the client\n"
clientSocket.send(commandString)
import time
time.sleep(0.25)
# print "Sent the command string to the client\n"
except Exception as inst:
print "========================================================================\n"
print "Error in send command string to client in sendNextCOmmandToCLientByLength: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in sendCommandStringToClient of sendNextCommandToClientByLength", "Client", "Outbound")
#Send the chunk params to the client
try:
# print "Sending chunk params to the client\n"
while True:
try:
clientSocket.send(str(chunkObject.params))
# print "Sent chunk params to the client\n"
pushCommandOntoTheStackOfIOCommands(self, "next: chunk.params", "Client", "Outbound")
break
except Exception as inst:
if(compareString(str(inst),"timed out",0,0,len("timed out"),len("timed out"))==True):
#dont throw an error, just try again
fakeVar=True
else:
raise Exception ("Error in sending chunk params to the client in infinite while loop")
break
except Exception as inst:
print "========================================================================\n"
print "Error in send chunk params to the client in sendNextCOmmandToClientByLength: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in sendChunkParamsToClient of sendNextCommandToClientByLength", "Client", "Outbound")
#send the chunk data to the client
try:
# print "Sending chunk data to the client\n"
while True:
try:
clientSocket.send(str(chunkObject.data))
#print "Sent chunk data to the client\n"
pushCommandOntoTheStackOfIOCommands(self, "next: chunk.data", "Client", "Outbound")
break
except Exception as inst:
if(compareString(str(inst),"timed out",0,0,len("timed out"),len("timed out"))==True):
#dont throw error, just try again
fakeVar=True
else:
raise Exception ("Error in sending chunk data to the client in infinite loop")
break
except Exception as inst:
print "========================================================================\n"
print "Error in send chunk data to the client in sendNextCOmmandToClientByLength: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in sendChunkDataToClient of sendNextCommandToClientByLength", "Client", "Outbound")
except Exception as inst:
print "========================================================================\n"
print "ERROR in sendNextCommandToClientByLength: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in sendNextCommandToClientByLength", "Client", "Outbound")
finally:
#print "Releasing the socketLock\n"
self.socketLock.release()
#print "Released the socketLock\n"
#dictionaryOfCurrentClientTasks functions================================================================
def addClientToDictionaryOfCurrentClientTasks(self, clientAddress, clientChunk): #client Address has both the ip address and port
try:
self.dictionaryOfCurrentClientTasks[clientAddress] = clientChunk
except Exception as inst:
print "========================================================================\n"
print "ERROR in addClientToDictionaryOfCurrentClientTasks: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in addClientToDictionaryOfCurrentClientTasks", "Self", "Self")
def delClientFromDictionaryOfCurrentClientTasks(self, clientAddress): #clientAddress contains IP and port
try:
del self.dictionaryOfCurrentClientTasks[clientAddress]
except KeyError as inst:
print "========================================================================\n"
print "ERROR: " +str(clientAddress)+" does not exist in the dictionaryOfCurrentClientTasks\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR (Key Error) in delClientFromDictionaryOfCurrentClientTasks", "Self", "Self")
except Exception as inst:
print "========================================================================\n"
print "ERROR in delClientFromDictionaryOfCurrentClientTasks: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in delClientFromDictionaryOfCurrentClientTasks", "Self", "Self")
def getChunkFromDictionaryOfCurrentClientTasks(self, clientAddress): #clientAddress contains IP and port
try:
retrievedChunk = self.dictionaryOfCurrentClientTasks[clientAddress]
return retrievedChunk
except KeyError as inst:
print "========================================================================\n"
print "ERROR: " +str(clientAddress)+" does not exist in the dictionaryOfCurrentClientTasks\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR (Key Error) in getClientFromDictionaryOfCurrentClientTasks", "Self", "Self")
return "" #changed from none
except Exception as inst:
print "========================================================================\n"
print "ERROR in getChunkFromDictionaryOfCurrentClientTasks: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in getClientFromDictionaryOfCurrentClientTasks", "Self", "Self")
return "" #changed from none
def setChunkToDictionaryOfCurrentClientTasks(self, clientAddr, chunkObject):
try:
self.dictionaryOfCurrentClientTasks[clientAddr] = chunkObject
except Exception as inst:
print "=======================================================================\n"
print "ERROR in setChunkToDIctionaryOfCurrentCLientTasks: " +str(inst)+"\n"
print "=======================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in setClientFromDictionaryOfCurrentClientTasks", "Self", "Self")
#list of Crashed clients functions====================================================================
def addClientToListOfCrashedClients(self, clientAddress): #clientAddress has the ip and the port
try:
self.listOfCrashedClients.append(clientAddress)
except Exception as inst:
print "========================================================================\n"
print "ERROR in addClientToListOfCrashedClients: " + str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in addClientToListOfCrashedClients", "Self", "Self")
#stackOfChunksThatNeedToBeReassigned functions==========================================================
def pushChunkOnToStackOfChunksThatNeedToBeReassigned(self, inboundChunk):
try:
#print "Pushing chunk onto the stackOfChunksThatNeedToBeReassigned\n"
self.stackOfChunksThatNeedToBeReassigned.append(inboundChunk)
# print "Pushed chunk onto the stackOfChunksThatNeedToBeReassigned\n"
except Exception as inst:
print "========================================================================\n"
print "ERROR in pushChunkOnToStackOfChunksThatNeedToBeReassigned: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in pushChunksOnToStackOfChunksThatNeedToBeReassigned", "Self", "Self")
def popChunkFromStackOfChunksThatNeedToBeReassigned(self):
try:
poppedChunk = ""
# print "Popping chunk from stackOfChunksThatNeedToBeReassigned\n"
poppedChunk = self.stackOfChunksThatNeedToBeReassigned.pop()
# print "Popped chunk off the stackOfChunksThatNeedToBeReassigned\n"
return poppedChunk
except Exception as inst:
print "========================================================================\n"
print "ERROR in popChunkFromStackOfChunksThatNeedToBeReassigned: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in popChunkFromStackOfChunksThatNeedToBeReassigned", "Self", "Self")
return "" #changed from none
#stackOfClientsWaitingForNextChunk functions============================================================
def pushClientOnToStackOfClientsWaitingForNextChunk(self, clientSocket, clientAddress):
try:
#print "Pushing client on to stackOfClientsWaitingForNextChunk\n"
self.stackOfClientsWaitingForNextChunk.append((clientSocket,clientAddress)) #holds a tuple
# print "Pushed client on to stackOfClientsWaitingForNextChunk\n"
except Exception as inst:
print "========================================================================\n"
print "ERROR in pushClientOnToStackOfClientsWaitingForNextChunk: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in pushClientOnToStackOfClientsWaitingForNextChunk", "Self", "Self")
def popClientFromStackOfClientsWaitingForNextChunk(self):
try:
poppedClient= ""
# print "Popping client off the stackOfClientsWaitingForNextChunk\n"
poppedClient= self.stackOfClientsWaitingForNextChunk.pop()
# print "Popped client off the stackOfClientsWaitingForNextChunk\n"
return poppedClient
except Exception as inst:
print "========================================================================\n"
print "ERROR in popClientFromStackOfClientsWaitingForNextChunk: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in popClientFromStackOfClientsWaitingForNextChunk", "Self", "Self")
return "" #changed from none
import threading
import thread
from socket import *
import platform
import Queue
class NetworkServer():
#CLASS VARS
host = ''
port = 55568
myIPAddress = '127.0.0.1' #default to ping back address
stopAllThreads = False #set to true to have all threads break out of their while loops
listOfCrashedClients = []
theSolution = "" #holds the solution if found
stackOfIOCommands = [] #holds a record all the IO commands that have been sent through server
stackOfChunksThatNeedToBeReassigned = [] #THIS CONTAINER IS TO BE REPLACED BY THE QUEUE OF STORED CHUNKS
queueOfStoredChunks = Queue.Queue() #This is the replacement for the staockOFChunkS that need to be reassigned
stackOfClientsWaitingForNextChunk = []
dictionaryOfCurrentClientTasks = {} #key is the client's IP Address , the value is the chunk that client is working on
#If you try to access a non-existing key it will throw an error
socketLock = threading.RLock()
#START OF CLIENT THREAD HANDLER
def ClientThreadHandler(self, clientSocket, clientAddr, socketLock):
try: #CLient THread Handler Try Block
inboundCommandFromClient = "" #initialize the receiving variable
while True:
if(self.stopAllThreads == True):
#print "MAIN THREAD: Stopping the thread\n"
# print "Sending done command to connected client\n"
sendDoneCommandToClient(self, clientSocket, clientAddr)
break
try: #check for commands from client
inboundCommandFromClient = receiveCommandFromClient(self,clientSocket)
except Exception as inst:
print "===================================================================\n"
print "Error in check for commands from the client in client thread handler: " +str(inst)+"\n"
print "===================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in checkForCommandsFromTheClientInClientThreadHeader", "Self", "Self")
try: #Analyzing received command from the client try block
if(len(inboundCommandFromClient) > 0): #ignore if the empty string
identifiedCommand = False
try: #checking to see if the next Command was received from the client try block
if(checkForNextCommandFromClient(self,inboundCommandFromClient)==True):
identifiedCommand= True
if(self.queueOfStoredChunks.qsize() > 0):
#import Chunk
#tempChunk = Chunk.Chunk()
tempChunk = self.queueOfStoredChunks.get()
#tempChunk.params = tempChunk2.params
#tempChunk.data = tempChunk2.data
sendNextCommandToClientByLength(self, clientSocket, tempChunk)
try:
testChunk = getChunkFromDictionaryOfCurrentClientTasks(self, clientAddr)
#if succeeds, , then set value
setChunkToDictionaryOfCurrentClientTasks(self, clientAddr, tempChunk)
except Exception as inst:
#add client to the dictionary
addClientToDictionaryOfCurrentClientTasks(self, clientAddr, tempChunk)
#then request nextchunk from controller
sendNextChunkCommandToController(self)
else:
#put client in stack of clients waiting
pushClientOnToStackOfClientsWaitingForNextChunk(self, clientSocket, clientAddr)
#request nextchunk from controller
sendNextChunkCommandToController(self)
print "==================================================\n"
print "WARNING: The queueOfStoredChunks is empty!!!!"
print "==================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "WARNING: QueueOfSToredChunks is Empty!", "Self", "Self")
''' (Now checks the queue)
#print "Identified inboundCommandFromClient as the Next Command\n"
#check to see if there is a chunk that needs to be reassigned
if(len(self.stackOfChunksThatNeedToBeReassigned) > 0):
#print "There is a chunk that needs to be reassigned."
tempChunk = popChunkFromStackOfChunksThatNeedToBeReassigned(self)
sendNextCommandToClientByLength(self, clientSocket, tempChunk)
try:
tempChunk = getChunkFromDictionaryOfCurrentClientTasks(self,clientAddr)
#if suceed, set value
setChunkToDictionaryOfCurrentClientTasks(self,clientAddr,tempChunk)
except Exception as inst:
#add client to the dictionary
addClientToDictionaryOfCurrentClientTasks(self,clientAddr,tempChunk)
else:
#print "There is no chunk that needs to be reassigned. Requesting nextChunk from the Controller"
sendNextChunkCommandToController(self)
#print "Adding the client to the stackOfClientsWaitingForNextChunk"
pushClientOnToStackOfClientsWaitingForNextChunk(self,clientSocket, clientAddr) '''
except Exception as inst:
print "===================================================================\n"
print "Error in checking to see if the next Command was received from the client in client thread handler: "+str(inst)+"\n"
print "===================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in checkingForNextCommandFromClient", "Self", "Self")
try: #check to see if the found solution command was received from the client
if(identifiedCommand == False):
if(checkForFoundSolutionCommandFromClient(self,inboundCommandFromClient)==True):
identifiedCommand= True
# print "Identified inboundCommandFromClient as the found solution command\n"
for key in self.dictionaryOfCurrentClientTasks.keys():
sendDoneCommandToClient(self,clientSocket, key) #extracts the key from the dictionary and sends the done command to them
# print "Setting the thread termination value to true, stopping all threads\n"
# print "Acquiring stopAllThreads Lock\n"
self.stopAllThreadsLock.acquire()
# print "Acquired stopAllThreads Lock\n"
self.stopAllThreads = True
#print "Releasing stopAllThreads Lock\n"
self.stopAllThreadsLock.release()
# print "Released stopAllThreads Lock\n"
# print "A client has found the solution!!!!!\n"
break
except Exception as inst:
print "===================================================================\n"
print "Error in check to see if found solution command was received from the client in client thread handler: "+str(inst)+"\n"
print "===================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in checkForFoundSOlutionCommandFromClient", "Self", "Self")
try: #check to see if the crashed command was received
if(identifiedCommand == False):
if(checkForCrashedCommandFromClient(self,inboundCommandFromClient)==True):
identifiedCommand= True
# print "Identified inboundCommandFromClient as the Crashed Command\n"
tempChunk = getChunkFromDictionaryOfCurrentClientTasks(self,clientAddr)
pushChunkOnToStackOfChunksThatNeedToBeReassigned(self,tempChunk)
addClientToListOfCrashedClients(self, clientAddr)
delClientFromDictionaryOfCurrentClientTasks(self,clientAddr)
except Exception as inst:
print "===================================================================\n"
print "Error in check to see if crashed command was received from client in client thread handler: "+ str(inst)+"\n"
print "===================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in checkForCrashedCommandFromClient", "Self", "Self")
if(identifiedCommand == False):
#print "Warning: Unknown Command Received from the client: "+str(inboundCommandFromClient)+"\n"
pushCommandOntoTheStackOfIOCommands(self, "UNKNOWN: "+str(inboundCommandFromClient), "Client", "Inbound")
except Exception as inst:
print "===================================================================\n"
print "Error in Analyzing received command from the client try block in the client thread handler: " +str(inst)+"\n"
print "===================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in Analyzing received command from the Client", "Self", "Self")
except Exception as inst:
print "===================================================================\n"
print "Error in Client Thread Handler: " + str(inst) +"\n"
print "===================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in client Thread Handler", "Self", "Self")
finally:
clientSocket.close()
#print "clientSocket has been closed\n"
#print "this thread has closed.\n"
#end of clientthreadhandler
#START OF INITIAL SERVER SETUP
def __init__(self, inboundpipeconnection):
#CLASS VARS
self.pipe = inboundpipeconnection #pipe that connects to the controller
self.stopAllThreadsLock = thread.allocate_lock()
self.stackOfIOCommandsLock = thread.allocate_lock()
#CREATE THE SOCKET
import socket
serverSocket = socket.socket(AF_INET, SOCK_STREAM)
#detect the OS
try: #getOS try block
print "*************************************"
print " Network Server"
print "*************************************"
print "OS DETECTION:"
if(platform.system()=="Windows"): #Detecting Windows
print platform.system()
print platform.win32_ver()
elif(platform.system()=="Linux"): #Detecting Linux
print platform.system()
print platform.dist()
elif(platform.system()=="Darwin"): #Detecting OSX
print platform.system()
print platform.mac_ver()
else: #Detecting an OS that is not listed
print platform.system()
print platform.version()
print platform.release()
print "*************************************"
except Exception as inst:
print "========================================================================================"
print "ERROR: An exception was thrown in getOS try block"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "========================================================================================"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in getOS try block", "Self", "Self")
#get the IP address
try: #getIP tryblock
# print "STATUS: Getting your network IP adddress"
if(platform.system()=="Windows"):
print socket.gethostbyname(socket.gethostname())
elif(platform.system()=="Linux"):
#Source: http://stackoverflow.com/questions/11735821/python-get-localhost-ip
#Claims that this works on linux and windows machines
import fcntl
import struct
import os
def get_interface_ip(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, struct.pack('256s',ifname[:15]))[20:24])
#end of def
def get_lan_ip():
ip = socket.gethostbyname(socket.gethostname())
if ip.startswith("127.") and os.name != "nt":
interfaces = ["eth0","eth1","eth2","wlan0","wlan1","wifi0","ath0","ath1","ppp0"]
for ifname in interfaces:
try:
ip = get_interface_ip(ifname)
print "IP address was retrieved from the " + str(ifname) + " interface."
break
except IOError:
pass
return ip
#end of def
print get_lan_ip()
elif(platform.system()=="Darwin"):
print socket.gethostbyname(socket.gethostname())
else:
#NOTE: MAY REMOVE THIS AND REPLACE WITH THE LINUX DETECTION METHOD
# print "INFO: The system has detected that you are not running Windows, OS X, or Linux."
# print "INFO: System is using a generic IP detection method"
print socket.gethostbyname(socket.gethostname())
except Exception as inst:
print "========================================================================================"
print "ERROR: An exception was thrown in getIP try block"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "========================================================================================"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in getIP try block", "Self", "Self")
try: #try to bind the socket
serverSocket.bind((self.host, self.port))
except Exception as inst:
print "===================================================================\n"
print "Critical Error: Failed to bind the socket: "+str(inst)+"\n"
print "Suggestion: Close this application, then reopen this application and try again\n"
print "===================================================================\n"
#START LISTENING TO SOCKET
serverSocket.listen(5)
#MAKE INITIAL REQUEST OF CHUNKS TO CONTROLLER FOR THE QUEUE
#Initially requesting 5 chunks
sendNextChunkCommandToController(self)
sendNextChunkCommandToController(self)
sendNextChunkCommandToController(self)
sendNextChunkCommandToController(self)
sendNextChunkCommandToController(self)
#MAIN THREAD SERVER LOOP
try: #main thread server loop try block
serverSocket.settimeout(0.25)
# print "MAIN THREAD: Waiting for client(s) to connect\n"
while True: #Primary main thread server while loop
if(self.stopAllThreads == True):
#print "MAIN THREAD: Stopping Main Thread\n"
break
#CHECK TO SEE IF A CLIENT IS TRYING TO CONNECT
try:
#print "MAIN THREAD: Checking to see if client is trying to connect\n"
inboundClientSocket, inboundClientAddr = serverSocket.accept()
#print "MAIN THREAD: A client has connected!!\n"
thread.start_new_thread(self.ClientThreadHandler, (inboundClientSocket,inboundClientAddr,self.socketLock))
except Exception as inst:
if(compareString(str(inst),"timed out",0,0,len("timed out"),len("timed out"))==True):
#do not display an error message
fakeVar= True
else:
print "===================================================================\n"
print "MAIN THREAD: Error in check for client trying to connect try block: " +str(inst)+"\n"
print "===================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "Main Thread ERROR in checkForClientTryingToConnect" , "Self", "Self")
#CHECK TO SEE IF CONTROLLER HAS SENT A MESSAGE TO SERVER
try:
# print "MAIN THREAD: Checking for Commands from the controller\n"
if(self.pipe.poll()):
receivedControllerCommand= self.pipe.recv()
if(receivedControllerCommand is not None): #ignore the empty string
#print "MAIN THREAD: Received command from the controller\n"
identifiedCommand = False
try: #checking for nextChunk Command from Controller
if(checkForNextChunkCommandFromController(self,receivedControllerCommand)==True):
identifiedCommand= True
if(len(self.stackOfClientsWaitingForNextChunk) > 0):
tempClientSocket, tempClientAddr = popClientFromStackOfClientsWaitingForNextChunk(self)
#send straight to client
outboundChunk = receiveNextChunkFromController(self)
sendNextCommandToClientByLength(self, tempClientSocket, outboundChunk)
sendNextChunkCommandToController(self)
else:
self.queueOfStoredChunks.put(receiveNextChunkFromController(self)) #put into the queue [NOT USING THE BLOCKING FEATURE]
# print "MAIN THREAD: Identified receivedControllerCommand as the nextChunk Command\n"
''' (Needs to just add the chunk to the queue)
#check to see if a client is waiting for the nextChunk
if(len(self.stackOfClientsWaitingForNextChunk) > 0):
# print "MAIN THREAD: A client is waiting for the nextChunk\n"
tempClientSocket, tempClientAddress= popClientFromStackOfClientsWaitingForNextChunk(self)
outboundChunk = receiveNextChunkFromController(self)
sendNextCommandToClientByLength(self, tempClientSocket, outboundChunk)
try:
tempChunk = getChunkFromDictionaryOfCurrentClientTasks(self,tempClientAddress)
#if, suceeds, override the old chunk
setChunkToDictionaryOfCurrentClientTasks(self,tempClientAddress,outboundChunk)
except Exception as inst:
#add it if there is not key for that client yet
addClientToDictionaryOfCurrentClientTasks(self,tempClientAddress, outboundChunk)
else: #if there is no client waiting for the next chunk
#print "MAIN THREAD: No clients are waiting for the nextChunk. Adding chunk to the stackOfChunksThatNeedToBeReassigned"
pushChunkOnToStackOfChunksThatNeedToBeReassigned(self,receivedControllerCommand)'''
except Exception as inst:
print "===================================================================\n"
print "MAIN THREAD: Error in checking for nextChunk Command from Controller Try Block: " +str(inst)+"\n"
print "===================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "Main Thread ERROR in checkingForNextChunkCommand from Controller", "Self", "Self")
try: #checking for done command form controller
if(identifiedCommand == False):
if(checkForDoneCommandFromController(self,receivedControllerCommand)==True):
identifiedCommand= True
# print "MAIN THREAD: Identified receivedControllerCommand as the Done Command\n"
#No further actions are needed for this command
except Exception as inst:
print "===================================================================\n"
print "MAIN THREAD: Error in checking for done command from Controller Try Block: "+str(inst)+"\n"
print "===================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "Main Thread ERROR in checkingForDoneCommand from COntroller", "Self", "Self")
if(identifiedCommand == False):
# print "MAIN THREAD: Warning: Unknown Command Received from the Controller: "+str(receivedControllerCommand)+"\n"
pushCommandOntoTheStackOfIOCommands(self, "UNKNOWN: "+str(receivedControllerCommand), "Controller", "Inbound")
else: #if there is nothing on the pipe
#Do not display the message
fakeVar=True
# print "MAIN THREAD: There is no command received from the controller\n"
except Exception as inst:
if(compareString(str(inst),"timed out",0,0,len("timed out"),len("timed out"))==True):
#Do not print out an error message
fakeVar= True
else:
print "===================================================================\n"
print "MAIN THREAD: Error in check to see if controller has sent a message to server try block: " + str(inst) +"\n"
print "===================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "Main Thread ERROR in checkToSeeIfControllerHasSentAMessage", "Self", "Self")
except Exception as inst:
print "===================================================================\n"
print "MAIN THREAD: Error in Main Thread Server Loop: " +str(inst)+"\n"
print "===================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "Main Thread ERROR in Main Thread Server Loop", "Self", "Self")
finally:
#print "Setting stop variable to stop all threads"
#print "Acquiring stopAllThreads Lock\n"
self.stopAllThreadsLock.acquire()
#print "Acquired stopAllThreads Lock\n"
self.stopAllThreads = True
#print "Releasing stopAllThreads Lock\n"
self.stopAllThreadsLock.release()
#print "Released stopAllThreads Lock\n"
#print "Sending done command to all clients, server is finished\n"
serverSocket.settimeout(0.25)
for key in self.dictionaryOfCurrentClientTasks.keys(): #This is potentially replaced by the sendDoneCommand in thread
try:
self.socketLock.acquire()
serverSocket.sendall("done")
self.socketLock.release()
# print "Sent done command to: " + str(key)+"\n"
except Exception as inst:
if(compareString(str(inst),"timed out",0,0,len("timed out"),len("timed out"))==True):
#print "Timed out while sending 'done' command to "+ str(key)+"\n"
fakeVar=True
else:
print "===========================================================\n"
print "MAIN THREAD ERROR in finally block send done command to clients: " +str(inst)+"\n"
print "============================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "Main Thread ERROR in finally block sendDoneCommand", "Self", "Self")
#print "MAIN THREAD: Preparing to close the socket\n"
serverSocket.close()
# print "MAIN THREAD: The serverSocket has been closed\n"
sendDoneCommandToController(self)
# print "MAIN THREAD: Informed the Controller that Server has finished\n"
sendSolutionToController(self) #solution is saved in the class variable
print "-----------------------Stack of IO Commands---------------------------------\n"
for index in range(0,len(self.stackOfIOCommands)):
tempCommandName, tempOrigin_Destination, tempCommandDirection, tempTime = self.stackOfIOCommands.pop(0)
if(compareString(tempCommandDirection, "Inbound",0,0,len("Inbound"),len("Inbound"))==True):
print str(tempCommandDirection)+" command: "+str(tempCommandName)+" was received from: "+str(tempOrigin_Destination)+" at: "+str(tempTime)
else: #if outbound
print str(tempCommandDirection)+" command: "+str(tempCommandName)+" was sent to: "+str(tempOrigin_Destination)+" at: "+str(tempTime)
print "-----------------------End of Stack of IO Commands------------------------\n"
print "The Solution is: '"+str(self.theSolution)+"'\n"
| gpl-3.0 | -670,558,682,601,006,800 | 62.588103 | 199 | 0.521983 | false |
Mitame/YRS2014-Death-Game | data/settings.py | 2 | 1586 | '''
Created on 28 Jul 2014
@author: Levi Wright
'''
import os
if os.getcwd()[-4:] == "data":
os.chdir("..")
running = True
imported = False
import data
import pygame
import time
class level():
pixelsToGrid = 10
fillGaps = True
originLatLong = ([52, 56, 23.44],[0,4,10.75])
originGrid = (53, 54)
milesToGrid = 9,10
x = time.time()
class visuals():
lineColour = (192,192,192,255)
# bgGridColour = (0,104,10,128)
bgGridColour = (0,0,0,64)
bgFillerColour = (0,0,255,128)
smoothscale = True
useMapCache = True
textSpacing = [3,5]
class debug():
renderMap = False
highlightFillerBlocks = False
class controls():
moveUp = (pygame.K_UP, pygame.K_w)
moveDown = (pygame.K_DOWN, pygame.K_s)
moveLeft = (pygame.K_LEFT, pygame.K_a)
moveRight = (pygame.K_RIGHT, pygame.K_d)
class gameplay():
startLife = 66000000
BirthRateMultiplyer = 0.05
ticksPerYear = 1
simPolice = False
class APIs():
year = 2011
month = 5
def postImport():
global imported,gridSize
if not imported:
gridSize = data.generate.maze.mapgrid.size
imported = True
level.originLatLong = tuple(data.apis.utils.degree(*level.originLatLong[i]) for i in range(2))
print(data.apis.utils.gridToLatLong((27,74)))
print(data.apis.utils.gridToLatLong((38,7)))
# level.x = input("Seed: ")
if level.x == "":
level.seed = time.time()
else:
level.seed = level.x
| gpl-2.0 | -2,273,998,920,902,960,600 | 22.4 | 102 | 0.582598 | false |
hbiyik/tribler | src/tribler-core/tribler_core/tests/test_bootstrap.py | 1 | 1532 | from binascii import unhexlify
from ipv8.keyvault.crypto import ECCrypto
from tribler_core.modules.bootstrap import Bootstrap
from tribler_core.tests.tools.base_test import MockObject
from tribler_core.tests.tools.test_as_server import TestAsServer
from tribler_core.utilities.utilities import succeed
class FakeDHT(object):
def connect_peer(self, mid):
matched_node = MockObject()
matched_node.mid = mid
matched_node.public_key = ECCrypto().generate_key("low").pub()
nearby_node = MockObject()
nearby_node.mid = unhexlify('b' * 20)
nearby_node.public_key = ECCrypto().generate_key("low").pub()
return succeed([matched_node, nearby_node])
class TestBootstrapDownload(TestAsServer):
async def setUp(self):
await super(TestBootstrapDownload, self).setUp()
self.bootstrap = Bootstrap(self.temporary_directory(), dht=FakeDHT())
async def test_load_and_fetch_bootstrap_peers(self):
# Before bootstrap download
nodes = await self.bootstrap.fetch_bootstrap_peers()
self.assertEqual(nodes, {})
# Assuming after bootstrap download
self.bootstrap.download = MockObject()
self.bootstrap.download.get_peerlist = lambda: [{'id': 'a' * 20}]
await self.bootstrap.fetch_bootstrap_peers()
# Assuming DHT returns two peers for bootstrap download
self.assertIsNotNone(self.bootstrap.bootstrap_nodes['a' * 20])
self.assertIsNotNone(self.bootstrap.bootstrap_nodes['b' * 20])
| lgpl-3.0 | -1,326,796,016,116,297,000 | 33.818182 | 77 | 0.69517 | false |
blackye/luscan-devel | thirdparty_libs/dns/rdtypes/IN/NSAP.py | 100 | 2181 | # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.exception
import dns.rdata
import dns.tokenizer
class NSAP(dns.rdata.Rdata):
"""NSAP record.
@ivar address: a NASP
@type address: string
@see: RFC 1706"""
__slots__ = ['address']
def __init__(self, rdclass, rdtype, address):
super(NSAP, self).__init__(rdclass, rdtype)
self.address = address
def to_text(self, origin=None, relativize=True, **kw):
return "0x%s" % self.address.encode('hex_codec')
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
address = tok.get_string()
t = tok.get_eol()
if address[0:2] != '0x':
raise dns.exception.SyntaxError('string does not start with 0x')
address = address[2:].replace('.', '')
if len(address) % 2 != 0:
raise dns.exception.SyntaxError('hexstring has odd length')
address = address.decode('hex_codec')
return cls(rdclass, rdtype, address)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
file.write(self.address)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
address = wire[current : current + rdlen].unwrap()
return cls(rdclass, rdtype, address)
from_wire = classmethod(from_wire)
def _cmp(self, other):
return cmp(self.address, other.address)
| gpl-2.0 | 3,231,114,705,271,473,000 | 35.966102 | 79 | 0.675837 | false |
etxc/namebench | tools/check_nameserver_popularity.py | 174 | 2456 | #!/usr/bin/env python
import os
import re
import sys
import pickle
import time
import traceback
import yahoo.search
from yahoo.search.web import WebSearch
APP_ID = 'P5ihFKzV34G69QolFfb3nN7p0rSsYfC9tPGq.IUS.NLWEeJ14SG9Lei0rwFtgwL8cDBrA6Egdw--'
QUERY_MODIFIERS = '-site:txdns.net -site:sitedossier.com -mx -site:dataopedia.com -site:l0t3k.net -syslog -"4.2.2.1" -site:cqcounter.com -site:flow.nttu.edu.tw -site:websiteoutlook.com -site:ipgeolocator.com -site:tdyndns.org -site:ebrara.com -site:onsamehost.com -site:ipaddresscentral.com -site:quia.jp -inetnum -site:domaintools.com -site:domainbyip.com -site:pdos.csail.mit.edu -statistics -"country name" -"Q_RTT" -site:botsvsbrowsers.com -"ptr record" -site:ip-db.com -site:chaip.com.cn -site:lookup365.com -"IP Country" -site:iptoolboxes.com -"Unknown Country" -"Q_RTT" -amerika -whois -Mozilla -site:domaincrawler.com -site:geek-tools.org -site:visualware.com -site:robtex.com -site:domaintool.se -site:opendns.se -site:ungefiltert-surfen.de -site:datakitteh.org -"SLOVAKIA (SK)" -"IP Search" -site:www.medicore.com.ua -site:dig.similarbase.com -site:ipcorporationwiki.com -site:coolwhois.com -site:corporationwiki.com -site:iptool.us'
CACHE_DIR = os.getenv('HOME') + '/.ycache'
BANNED_URL_KEYWORDS = [
'\.xls$', '\.txt$', 'spam', 'nettools', 'namebench\.cfg', 'spam', 'affinity-v1',
'corporationwiki', 'iptools', 'whois', 'iana.org', 'public.*dns', 'blocked',
'firewall', 'websitevaluespy', 'iptool', 'sshd-versions', '\.off$',
'dnsstuff', 'secspider', 'servers\.cfg'
]
def CheckPopularity(ip):
# DUH
cache_path = os.path.join(CACHE_DIR, ip) + '.pickle.pickle'
if os.path.exists(cache_path):
f = open(cache_path)
results = pickle.load(f)
else:
print "miss: %s" % ip
try:
query = '"%s" %s' % (ip, QUERY_MODIFIERS)
srch = WebSearch(APP_ID, query=query, results=50)
results = srch.parse_results()
pf = open(cache_path, 'w')
pickle.dump(results.results, pf)
pf.close()
except yahoo.search.SearchError:
print "%s failed" % (ip)
return []
use_results = []
for result in results:
reject = False
for regexp in BANNED_URL_KEYWORDS:
if re.search(regexp, result['Url'], re.I):
reject = True
if not reject:
use_results.append(result)
return use_results
def GetUrls(ip):
return [ x['Url'] for x in CheckPopularity(ip) ]
if __name__ == "__main__":
print GetUrls(sys.argv[1])
| apache-2.0 | 2,041,882,573,153,117,000 | 42.857143 | 945 | 0.685261 | false |
Scotchester/owning-a-home | test/browser_testing/features/steps/steps_rate_checker_loan_details.py | 11 | 4431 | from behave import given, when, then
from hamcrest.core import assert_that
from hamcrest.core.core.isequal import equal_to
from hamcrest.library.number.ordering_comparison import greater_than, less_than
from hamcrest.library.text.stringcontains import contains_string
from pages.base import Base
from pages.rate_checker import RateChecker
# RATE STRUCTURE
@when(u'I select "{rate_selection}" Rate Structure')
def step(context, rate_selection):
context.rate_checker.set_rate_structure(rate_selection)
@then(u'I should see "{loan_structure}" as the selected Rate Structure')
def step(context, loan_structure):
current_Selection = context.rate_checker.get_rate_structure()
assert_that(current_Selection, equal_to(loan_structure))
# LOAN TERM
@when(u'I select "{number_of_years}" Loan Term')
def step(context, number_of_years):
context.rate_checker.set_loan_term(number_of_years)
@then(u'I should see "{number_of_years}" as the selected Loan Term')
def step(context, number_of_years):
current_Selection = context.rate_checker.get_loan_term()
assert_that(current_Selection, equal_to(number_of_years))
@then(u'Loan term option "{loan_term}" should be "{expected_state}"')
def step(context, loan_term, expected_state):
actual_state = context.rate_checker.is_loan_term_option_enabled(loan_term)
assert_that(actual_state, equal_to(expected_state))
# LOAN TYPE
@given(u'I select "{loan_type}" Loan Type')
@when(u'I change to "{loan_type}" Loan Type')
@when(u'I select "{loan_type}" Loan Type')
def step(context, loan_type):
context.rate_checker.set_loan_type(loan_type)
@then(u'I should see "{loan_type}" as the selected Loan Type')
def step(context, loan_type):
current_Selection = context.rate_checker.get_selected_loan_type()
assert_that(current_Selection, equal_to(loan_type))
@then(u'I should see the Loan Type field highlighted')
def step(context):
actual_state = context.rate_checker.is_loan_type_highlighted()
assert_that(actual_state, equal_to(True))
@then(u'I should NOT see the Loan Type field highlighted')
def step(context):
actual_state = context.rate_checker.is_loan_type_highlighted()
assert_that(actual_state, equal_to(False))
@then(u'I should see an HB alert "{alert_text}"')
def step(context, alert_text):
actual_text = context.rate_checker.get_hb_alert_text(alert_text)
assert_that(actual_text, equal_to(alert_text))
@then(u'I should NOT see an HB alert "{alert_text}"')
def step(context, alert_text):
actual_text = context.rate_checker.is_hb_alert_hidden(alert_text)
assert_that(actual_text, equal_to(alert_text))
@then(u'Loan type option "{loan_type}" should be "{expected_state}"')
def step(context, loan_type, expected_state):
actual_state = context.rate_checker.is_loan_type_option_enabled(loan_type)
assert_that(actual_state, equal_to(expected_state))
# ARM TYPE
@when(u'I select "{arm_type}" ARM Type')
def step(context, arm_type):
context.rate_checker.set_arm_type(arm_type)
@then(u'I should see "{arm_type}" as the selected ARM Type')
def step(context, arm_type):
current_Selection = context.rate_checker.get_arm_type()
assert_that(current_Selection, equal_to(arm_type))
@then(u'I should NOT see the ARM Type selection')
def step(context):
arm_type_selection = context.rate_checker.is_arm_type_visible()
assert_that(arm_type_selection, equal_to(False))
@then(u'I should see the ARM Type field highlighted')
def step(context):
actual_state = context.rate_checker.is_arm_type_highlighted()
assert_that(actual_state, equal_to(True))
@then(u'I should NOT see the ARM Type field highlighted')
def step(context):
actual_state = context.rate_checker.is_arm_type_highlighted()
assert_that(actual_state, equal_to(False))
# INTEREST COST LABEL
@then(u'I should see primary Interest costs over the first "{loan_years}" years')
@then(u'I should see primary Interest costs over "{loan_years}" years')
def step(context, loan_years):
actual_text = context.rate_checker.get_primary_interest_rate(loan_years)
assert_that(actual_text, equal_to(loan_years))
@then(u'I should see Interest costs over the first "{total_years}" years')
@then(u'I should see Interest costs over "{total_years}" years')
def step(context, total_years):
actual_text = context.rate_checker.get_secondary_interest_rate(total_years)
assert_that(actual_text, equal_to(total_years))
| cc0-1.0 | 9,049,096,797,051,839,000 | 33.348837 | 81 | 0.731663 | false |
IronLanguages/ironpython2 | Src/StdLib/Lib/site-packages/win32/lib/rasutil.py | 38 | 1737 | import win32ras
stateStrings = {
win32ras.RASCS_OpenPort : "OpenPort",
win32ras.RASCS_PortOpened : "PortOpened",
win32ras.RASCS_ConnectDevice : "ConnectDevice",
win32ras.RASCS_DeviceConnected : "DeviceConnected",
win32ras.RASCS_AllDevicesConnected : "AllDevicesConnected",
win32ras.RASCS_Authenticate : "Authenticate",
win32ras.RASCS_AuthNotify : "AuthNotify",
win32ras.RASCS_AuthRetry : "AuthRetry",
win32ras.RASCS_AuthCallback : "AuthCallback",
win32ras.RASCS_AuthChangePassword : "AuthChangePassword",
win32ras.RASCS_AuthProject : "AuthProject",
win32ras.RASCS_AuthLinkSpeed : "AuthLinkSpeed",
win32ras.RASCS_AuthAck : "AuthAck",
win32ras.RASCS_ReAuthenticate : "ReAuthenticate",
win32ras.RASCS_Authenticated : "Authenticated",
win32ras.RASCS_PrepareForCallback : "PrepareForCallback",
win32ras.RASCS_WaitForModemReset : "WaitForModemReset",
win32ras.RASCS_WaitForCallback : "WaitForCallback",
win32ras.RASCS_Projected : "Projected",
win32ras.RASCS_StartAuthentication : "StartAuthentication",
win32ras.RASCS_CallbackComplete : "CallbackComplete",
win32ras.RASCS_LogonNetwork : "LogonNetwork",
win32ras.RASCS_Interactive : "Interactive",
win32ras.RASCS_RetryAuthentication : "RetryAuthentication",
win32ras.RASCS_CallbackSetByCaller : "CallbackSetByCaller",
win32ras.RASCS_PasswordExpired : "PasswordExpired",
win32ras.RASCS_Connected : "Connected",
win32ras.RASCS_Disconnected : "Disconnected"
}
def TestCallback( hras, msg, state, error, exterror):
print "Callback called with ", hras, msg, stateStrings[state], error, exterror
def test(rasName = "_ Divert Off"):
return win32ras.Dial(None, None, (rasName,),TestCallback) | apache-2.0 | 1,507,895,711,088,438,300 | 44.736842 | 79 | 0.753022 | false |
lyoniionly/django-cobra | src/cobra/apps/svnkit/abstract_models.py | 1 | 20237 | from __future__ import absolute_import, print_function
import datetime
import mimetypes
import os
import posixpath
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db import transaction
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
import pysvn
from cobra.models import Model
from cobra.models import fields
from cobra.models import sane_repr
from cobra.core.loading import get_class, get_model
from cobra.core.strings import strip, truncatechars
from . import choices
from cobra.core.constants import README_MARKUPS
from .exceptions import map_svn_exceptions
from .markup.hightlighter import make_html
from .utils.binaryornot import get_starting_chunk
from .utils.binaryornot import is_binary_string
NodeManager = get_class('svnkit.managers', 'NodeManager')
@python_2_unicode_compatible
class AbstractRepository(Model):
"""
Meta data for a subversion repository.
"""
project = models.OneToOneField('project.Project')
uuid = models.CharField(max_length=128, editable=False)
root = models.CharField(
help_text=_('Example: svn://example.com or file:///svn/ or http://host:port'),
max_length=512)
prefix = models.CharField(
help_text=_('<strong class="text-danger">Important!</strong> You maybe meet this situation, the svn url you supply is not the '
'root of the repository, and you do not have the right permission '
'to access the real root of repository, input a right prefix of '
'repository, we will replace it for you automatic.<br><strong class="text-danger">If you do not have this problem, please ignore the attention.</strong>'),
max_length=512, blank=True)
uri = models.CharField(
help_text=_('Externally facing URI for the repository, if available'),
max_length=512, blank=True)
is_private = models.BooleanField(default=False)
username = models.CharField(max_length=512, blank=True)
password = models.CharField(max_length=512, blank=True)
last_synced = models.DateTimeField(
default=datetime.datetime.fromtimestamp(0, timezone.utc),
editable=False)
class Meta:
abstract = True
app_label = 'svnkit'
db_table = 'cobra_svn_repository'
verbose_name_plural = _('repositories')
__repr__ = sane_repr('project_id', 'root')
def __str__(self):
return '%s (%s)' % (self.project.name, self.root)
def _get_login(self, realm, username, may_save):
if not (self.username and self.password):
raise ImproperlyConfigured(_(
'repository requires authentication, '
'but no username and password available'))
return (True, self.username, self.password, True)
def get_svn_client(self):
"""
Return a subversion client for this repository. The
authentication information stored with the repository is bound
with the client. The client can be instantiated with a
subversion config file with the COBRA_SVNKIT_SVN_CONFIG_PATH
settings variable.
"""
if settings.COBRA_SVNKIT_SVN_CONFIG_PATH is not None:
client = pysvn.Client(settings.COBRA_SVNKIT_SVN_CONFIG_PATH)
else:
client = pysvn.Client()
# set the exceptions to be more granular
client.exception_style = 1
# hook for cancelling an api call thats taking too long
started_dt = timezone.now()
def _cancel():
current_dt = timezone.now()
delta = (current_dt - started_dt).seconds
if delta > settings.COBRA_SVNKIT_CLIENT_TIMEOUT:
return True
return False
client.callback_cancel = _cancel
# bind the username and password that might be stored with the
# repository model object in case a login is required.
client.callback_get_login = self._get_login
return client
def sync(self):
"""
Update the model object representations of the given repository.
If the UUID has not been obtained for a repository, it is
obtained from the api. New changesets committed to the
repository, since the last time the repository was synced, are
also collected. If no previous sync has been run, all
changesets are collected.
"""
self.last_synced = timezone.now()
if not self.uuid:
self.sync_uuid()
self.sync_changesets()
self.save()
# @map_svn_exceptions
def sync_uuid(self):
"""Get the UUID of the given repository."""
c = self.get_svn_client()
info = c.info2(self.root, recurse=False)
self.uuid = info[0][1]['repos_UUID']
sync_uuid = map_svn_exceptions(sync_uuid)
def sync_changesets(self):
"""
Get new changesets committed to the repository since the last
time they were collected.
"""
Changeset = get_model('svnkit', 'Changeset')
Change = get_model('svnkit', 'Change')
revision = self.get_latest_revision()
c = self.get_svn_client()
log = c.log(
self.root,
revision_end=pysvn.Revision(
pysvn.opt_revision_kind.number, revision),
discover_changed_paths=True)
for item in log:
# ignore the overlap, the changeset is already stored locally
if item['revision'].number == revision:
continue
changeset = Changeset.objects.create(
repository=self,
date=datetime.datetime.fromtimestamp(item['date'], timezone.utc),
revision=item['revision'].number,
author=item.get('author', ''),
message=item.get('message', '') # Normally, message must be exist, but I meet some condition that there is no message.
)
for changed_path in item['changed_paths']:
copyfrom_revision = None
if changed_path['copyfrom_revision']:
copyfrom_revision = changed_path[
'copyfrom_revision'].number
change = Change.objects.create(
changeset=changeset,
path=changed_path['path'],
action=changed_path['action'],
copied_from_path=changed_path['copyfrom_path'],
copied_from_revision=copyfrom_revision)
sync_changesets = transaction.atomic(
map_svn_exceptions(sync_changesets))
def get_latest_revision(self):
"""
Get the latest revision of the repository.
"""
revision = 0
if self.changesets.count():
revision = self.changesets.all()[0].revision
return revision
def get_node(self, path, revision=None):
"""
Get a `svnkit.models.Node` object at the given
path. Optionally specify a revision.
"""
Node = get_model('svnkit', 'Node')
return Node.objects.get_or_sync(self, path, revision)
@python_2_unicode_compatible
class AbstractChangeset(Model):
"""
The meta data about a revision in a subversion repository.
"""
repository = fields.FlexibleForeignKey('svnkit.Repository', related_name='changesets')
date = models.DateTimeField()
revision = models.PositiveIntegerField(db_index=True)
author = models.CharField(max_length=512)
message = models.TextField()
class Meta:
abstract = True
app_label = 'svnkit'
db_table = 'cobra_svn_changeset'
unique_together = (('repository', 'revision'),)
ordering = ('-revision',)
__repr__ = sane_repr('repository_id', 'revision')
def __str__(self):
return 'r%s' % self.revision
@property
def title(self):
message = strip(self.message)
if not message:
message = '<unlabeled message>'
else:
message = truncatechars(message.splitlines()[0], 40)
return message
@property
def rest_message(self):
message = strip(self.message)
if not message:
message = '<unlabeled message>'
else:
split_msgs = message.splitlines()
first_line_msg = split_msgs[0]
if len(first_line_msg) > 40:
split_msgs[0] = '...'+first_line_msg[37:]
else:
del split_msgs[0]
message = '\n'.join(split_msgs)
return message
@models.permalink
def get_absolute_url(self):
return ('svnkit:changeset', (self.repository.project.organization.slug, self.repository.project.slug, self.revision))
def get_previous(self):
"""Get the previous changeset in the repository."""
return self.repository.changesets.filter(revision__lte=self.revision - 1).first()
def get_next(self):
"""Get the next changeset in the repository."""
return self.repository.changesets.filter(revision__gte=self.revision + 1).last()
@python_2_unicode_compatible
class AbstractChange(Model):
"""
A changed path in a changeset, including the action taken.
"""
changeset = fields.FlexibleForeignKey('svnkit.Changeset', related_name='changes')
path = models.CharField(max_length=2048, db_index=True)
action = models.CharField(max_length=1)
copied_from_path = models.CharField(max_length=2048, null=True)
copied_from_revision = models.PositiveIntegerField(null=True)
class Meta:
abstract = True
app_label = 'svnkit'
db_table = 'cobra_svn_change'
unique_together = (('changeset', 'path'),)
ordering = ('changeset', 'path')
__repr__ = sane_repr('action', 'path')
def __str__(self):
return '%s %s' % (self.action, self.path)
def _get_base_change(self):
if hasattr(self, '_base_change'):
return self._base_change
if self.copied_from_revision is not None:
self._base_change = self.__class__.objects.get(
changeset__repository=self.changeset.repository,
revision=self.copied_from_revision
)
return self._base_change
@property
def relative_path(self):
if self.changeset.repository.prefix:
repo_prefix = self.changeset.repository.prefix
if repo_prefix.endswith(posixpath.sep):
repo_prefix = repo_prefix[:-1]
return self.path.replace(repo_prefix, '', 1)
else:
return self.path
def is_addition(self):
return self.action == 'A'
def is_modification(self):
return self.action == 'M'
def is_deletion(self):
return self.action == 'D'
@python_2_unicode_compatible
class AbstractNode(Model):
"""
The meta data for a path at a revision in a repository.
Nodes can be understood as 'views' of a particular path in a
repository at a particular revision number (a revision that may or
may not have made changes at that path/revision). A node's actual
content is stored in a separate model object, since the content
may remain unchanged across a number of revisions at a particular
path. The `get_last_changeset` method can be used to obtain the
changeset and revision in which the node's path was last changed.
This model largely reflects the information available through the
subversion api. The field `cached` indicates when the data was
retrieved from the api, and `cached_indirectly` indicates whether
or not the node was generated from an api call for the node or
from a related node (parent or one of its possible
children). Indirectly cached nodes (which are usually nodes
created as placeholders for heirarchical connections instead of
through a direct api call) require another api call to collect the
remaining missing information. Nodes can be optionally be included
in a regular cleanup.
"""
repository = fields.FlexibleForeignKey('svnkit.Repository', related_name='nodes')
parent = fields.FlexibleForeignKey('svnkit.Node', related_name='children', null=True)
path = models.CharField(max_length=2048, db_index=True)
node_type = models.CharField(max_length=1)
size = models.PositiveIntegerField(default=0)
last_changed = models.DateTimeField(null=True)
revision = models.PositiveIntegerField()
cached = models.DateTimeField(default=timezone.now)
cached_indirectly = models.BooleanField(default=True)
content = fields.FlexibleForeignKey('svnkit.Content', related_name='nodes', null=True)
objects = NodeManager(cache_fields=(
'pk',
))
class Meta:
abstract = True
app_label = 'svnkit'
db_table = 'cobra_svn_node'
unique_together = (('repository', 'path', 'revision'),)
ordering = ('node_type', 'path')
__repr__ = sane_repr('path', 'revision')
def __str__(self):
return '%s@%s' % (self.path, self.revision)
def iter_path(self):
"""
Returns a generator that 'walks` up the node hierarchy,
yielding each parent path until the root node is reached ('/').
"""
path = self.path
yield path
while path != posixpath.sep:
path = posixpath.split(path)[0]
yield path
def iter_path_basename(self):
"""
Returns a generator that 'walks' up the node hierarchy,
yielding a tuple of the path, and the basename of the path for
each parent node until the root node is reached ('/').
"""
for path in self.iter_path():
basename = posixpath.basename(path)
if not basename:
# basename = self.repository.label
basename = self.repository.project.name
yield (path, basename)
def get_last_changeset(self):
"""Get the latest `Changeset` object that affected this node."""
c = self.repository.changesets.filter(
date__lte=self.last_changed)#.exclude(revision=self.revision)
if c.count():
return c[0]
else:
return self.repository.changesets.get(date=self.last_changed)
@models.permalink
def get_absolute_url(self):
repository = self.repository
if self.revision != repository.get_latest_revision():
return (
'svnkit:node-revision', (
repository.project.organization.slug, repository.project.slug, self.revision, self.path))
else:
return ('svnkit:node', (repository.project.organization.slug, repository.project.slug, self.path))
def get_basename(self):
"""
The basename of the node, either a file name or a directory
name.
"""
basename = posixpath.basename(self.path)
return basename
def is_directory(self):
"""Whether the node is a directory."""
return self.node_type == choices.NODE_TYPE_DIR
def is_file(self):
"""Whether the node is a file."""
return self.node_type == choices.NODE_TYPE_FILE
def is_root(self):
"""Whether the node is the root node ('/')."""
return self.is_directory() and self.path == posixpath.sep
def has_properties(self):
"""Whether the node has subversion properties set."""
if self.properties.count():
return True
return False
@python_2_unicode_compatible
class AbstractProperty(Model):
"""
A property that has been set on a node.
"""
node = fields.FlexibleForeignKey('svnkit.Node', related_name='properties')
key = models.CharField(max_length=512, db_index=True)
value = models.TextField()
class Meta:
abstract = True
app_label = 'svnkit'
db_table = 'cobra_svn_property'
unique_together = (('node', 'key'),)
verbose_name_plural = 'properties'
__repr__ = sane_repr('path', 'revision')
def __str__(self):
return '%s: %s' % (self.key, self.value)
@python_2_unicode_compatible
class AbstractContent(Model):
"""
The contents of a node at a revision.
The data is base64 encoded in the database to allow storage of
binary data. The `set_data` and `get_data` methods should be used
to manipulate a node's data. `cached` indicates when the contents
were retrieved from the api. Content objects can optionally be
part of a regular cleanup.
"""
repository = fields.FlexibleForeignKey('svnkit.Repository', related_name='content')
path = models.CharField(max_length=2048)
last_changed = models.DateTimeField()
cached = models.DateTimeField(default=timezone.now)
size = models.PositiveIntegerField(default=0)
data = models.TextField()
class Meta:
abstract = True
app_label = 'svnkit'
db_table = 'cobra_svn_content'
unique_together = (('repository', 'path', 'last_changed'),)
__repr__ = sane_repr('path', 'repository_id')
def __str__(self):
return '%s@%s' % (self.path, self.get_last_changeset())
def set_data(self, data):
self.size = len(data)
self.data = data.encode('base64')
def get_data(self):
if hasattr(self, '_decoded_data'):
return self._decoded_data
self._decoded_data = self.data.decode('base64')
return self._decoded_data
@property
def lines(self):
return self.get_data().count('\n')
def get_last_changeset(self):
"""Get the changeset in which this content was committed."""
return self.repository.changesets.get(date=self.last_changed)
def get_mimetype(self):
"""
Get the mimetype of the content. This is determined by the
extension of the basename of the path. Defaults to
application/octet-stream if the mimetype cannot be determined.
"""
mtype = mimetypes.guess_type(self.path)[0]
if mtype is None:
return 'application/octet-stream'
return mtype
def get_maintype(self):
"""
Get the maintype of the mimetype, i.e. 'image' in 'image/png'.
"""
return self.get_mimetype().split('/')[0]
def get_subtype(self):
"""
Get the subtype of the mimetype, i.e. 'png' in 'image/png'.
"""
return self.get_mimetype().split('/')[-1]
@models.permalink
def get_absolute_url(self):
return ('svnkit:content', (
self.repository.project.organization.slug, self.repository.project.slug, self.pk, self.get_basename()))
def is_binary(self):
"""
Whether or not the content is binary. This is determined in
part by the mimetype, but if the mimetype is not available,
then if the data cannot be decoded into ascii it will be
presumed a binary format.
"""
# mtype = mimetypes.guess_type(self.path)[0]
# if mtype is None:
# try:
# self.get_data().decode('gbk')
# except UnicodeDecodeError:
# return True
# return False
chunk = get_starting_chunk(self.get_data())
return is_binary_string(chunk)
# if not mtype.startswith('text'):
# return True
# return False
def get_basename(self):
"""Get the basename of the node's full path (the filename)."""
basename = posixpath.basename(self.path)
return basename
def get_data_display(self):
"""
Get the content for display in text. Binary formats are just
shown as '(binary)'. Plain text formats get run through the
appropriate pygments lexer if the package is available.
"""
if self.is_binary():
return _('<pre>(binary)</pre>')
try:
txt = self.get_data().decode('utf-8')
except UnicodeDecodeError:
txt = self.get_data().decode('gbk')
return make_html(txt, self.get_basename()) | apache-2.0 | 47,064,571,029,829,176 | 34.505263 | 175 | 0.622177 | false |
danielharbor/openerp | addons/account_check_writing/wizard/__init__.py | 437 | 1082 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_check_batch_printing
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -7,138,752,567,987,986,000 | 44.083333 | 78 | 0.619224 | false |
ZeitOnline/zeit.ldap | bootstrap.py | 299 | 5686 | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", help="use a specific zc.buildout version")
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
options, args = parser.parse_args()
######################################################################
# load/install setuptools
to_reload = False
try:
import pkg_resources
import setuptools
except ImportError:
ez = {}
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
# XXX use a more permanent ez_setup.py URL when available.
exec(urlopen('https://bitbucket.org/pypa/setuptools/raw/0.7.2/ez_setup.py'
).read(), ez)
setup_args = dict(to_dir=tmpeggs, download_delay=0)
ez['use_setuptools'](**setup_args)
if to_reload:
reload(pkg_resources)
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
cmd = [sys.executable, '-c',
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd, env=dict(os.environ, PYTHONPATH=setuptools_path)) != 0:
raise Exception(
"Failed to execute command:\n%s",
repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
| bsd-3-clause | -154,321,133,968,043,940 | 32.447059 | 79 | 0.608336 | false |
davidzchen/tensorflow | tensorflow/python/distribute/ps_values.py | 6 | 10000 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various classes representing distributed values for PS."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import weakref
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import values
from tensorflow.python.distribute import values_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.types import core
# Variable used in PSStrategy TF 1 and CentralStorageStrategy.
class AggregatingVariable(variables_lib.Variable, core.Tensor):
"""A wrapper around a variable that aggregates updates across replicas."""
def __init__(self, strategy, v, aggregation):
self._distribute_strategy = strategy
self._v = v
# NOTE: We don't use "_distributed_container" here because we don't want
# to trigger that code path in regroup().
v._aggregating_container = weakref.ref(self) # pylint: disable=protected-access
self._aggregation = aggregation
def __deepcopy__(self, memo):
"""Perform a deepcopy of the `AggregatingVariable`.
Unlike the deepcopy of a regular tf.Variable, this keeps the original
strategy and devices of the `AggregatingVariable`. To avoid confusion
with the behavior of deepcopy on a regular `Variable` (which does
copy into new devices), we only allow a deepcopy of a `AggregatingVariable`
within its originating strategy scope.
Args:
memo: The memoization object for `deepcopy`.
Returns:
A deep copy of the current `AggregatingVariable`.
Raises:
RuntimeError: If trying to deepcopy into a different strategy.
"""
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
v = copy.deepcopy(self._v, memo)
copied_variable = type(self)(
strategy=self._distribute_strategy,
v=v,
aggregation=self._aggregation)
memo[id(self)] = copied_variable
return copied_variable
def get(self):
return self._v
@property
def distribute_strategy(self):
return self._distribute_strategy
def __getattr__(self, name):
return getattr(self._v, name)
def _assign_func(self, *args, **kwargs):
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
f = kwargs.pop("f")
if ds_context.in_cross_replica_context():
if distribute_lib.get_update_replica_id() is not None:
# We are calling an assign function in an update context.
return f(self._v, *args, **kwargs)
# We are calling an assign function in cross replica context, wrap it in
# an update call.
return self._distribute_strategy.extended.update(
self, f, args=args, kwargs=kwargs)
else:
replica_context = ds_context.get_replica_context()
assert replica_context
# We are calling an assign function in replica context.
# We reduce the value we want to assign/add/sub. More details about how
# we handle the different use cases can be found in the _reduce method.
# We call the function with the reduced value.
if self._aggregation == vs.VariableAggregation.NONE:
raise ValueError(
values_util.aggregation_error_msg.format(
variable_type="AggregatingVariable"))
def merge_fn(strategy,
value,
use_locking=False,
name=None,
read_value=True):
v = values_util.apply_aggregation(strategy, value, self._aggregation,
self)
if name and isinstance(name, values.PerReplica):
name = name.values[0]
return strategy.extended.update(
self,
f,
args=(v,),
kwargs={
"use_locking": use_locking,
"name": name,
"read_value": read_value
})
return replica_context.merge_call(merge_fn, args=args, kwargs=kwargs)
def assign_sub(self, *args, **kwargs):
assign_sub_fn = lambda var, *a, **kw: var.assign_sub(*a, **kw)
return self._assign_func(f=assign_sub_fn, *args, **kwargs)
def assign_add(self, *args, **kwargs):
assign_add_fn = lambda var, *a, **kw: var.assign_add(*a, **kw)
return self._assign_func(f=assign_add_fn, *args, **kwargs)
def assign(self, *args, **kwargs):
assign_fn = lambda var, *a, **kw: var.assign(*a, **kw)
return self._assign_func(f=assign_fn, *args, **kwargs)
@property
def initializer(self):
return self._v.initializer
def initialized_value(self):
return self._v.initialized_value()
@property
def initial_value(self):
return self._v.initial_value
@property
def op(self):
return self._v.op
def read_value(self):
return self._v.read_value()
def eval(self, session=None):
return self._v.eval(session)
@property
def graph(self):
return self._v.graph
@property
def device(self):
return self._v.device
@property
def shape(self):
return self._v.shape
@property
def aggregation(self):
return self._aggregation
@property
def synchronization(self):
return self._v.synchronization
@property
def name(self):
return self._v.name
@property
def trainable(self):
return self._v.trainable
@property
def dtype(self):
return self._v.dtype
# TODO(josh11b): Test saving & restoring.
def _gather_saveables_for_checkpoint(self):
return {trackable.VARIABLE_VALUE_KEY: self._v}
def _map_resources(self, save_options):
"""For implementing `Trackable`."""
# By delegating this method to the wrapped variable, SavedModel with
# AggregatingVariable are identical to SavedModel with normal variables.
obj_map, resource_map = self._v._map_resources(save_options) # pylint:disable=protected-access
obj_map[self] = obj_map[self._v]
return obj_map, resource_map
# pylint: disable=multiple-statements
def __add__(self, o):
return self._v + o
def __radd__(self, o):
return o + self._v
def __sub__(self, o):
return self._v - o
def __rsub__(self, o):
return o - self._v
def __mul__(self, o):
return self._v * o
def __rmul__(self, o):
return o * self._v
def __truediv__(self, o):
return self._v / o
def __rtruediv__(self, o):
return o / self._v
def __floordiv__(self, o):
return self._v // o
def __rfloordiv__(self, o):
return o // self._v
def __mod__(self, o):
return self._v % o
def __rmod__(self, o):
return o % self._v
def __lt__(self, o):
return self._v < o
def __le__(self, o):
return self._v <= o
def __gt__(self, o):
return self._v > o
def __ge__(self, o):
return self._v >= o
def __and__(self, o):
return self._v & o
def __rand__(self, o):
return o & self._v
def __or__(self, o):
return self._v | o
def __ror__(self, o):
return o | self._v
def __xor__(self, o):
return self._v ^ o
def __rxor__(self, o):
return o ^ self._v
def __getitem__(self, o):
return self._v[o]
def __pow__(self, o, modulo=None):
return pow(self._v, o, modulo)
def __rpow__(self, o):
return pow(o, self._v)
def __invert__(self):
return ~self._v
def __neg__(self):
return -self._v
def __abs__(self):
return abs(self._v)
def __div__(self, o):
try:
return self._v.__div__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rdiv__(self, o):
try:
return self._v.__rdiv__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __matmul__(self, o):
try:
return self._v.__matmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rmatmul__(self, o):
try:
return self._v.__rmatmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __str__(self):
return str(self._v)
def __repr__(self):
return repr(self._v)
def _should_act_as_resource_variable(self):
"""Pass resource_variable_ops.is_resource_variable check."""
pass
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
return ops.convert_to_tensor(self.get(), dtype=dtype, name=name,
as_ref=as_ref)
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
def _tensor_conversion_aggregate(var, dtype=None, name=None, as_ref=False):
return var._dense_var_to_tensor(dtype, name, as_ref) # pylint: disable=protected-access
ops.register_tensor_conversion_function(AggregatingVariable,
_tensor_conversion_aggregate)
| apache-2.0 | 6,599,920,328,647,396,000 | 28.154519 | 99 | 0.6385 | false |
levkar/odoo | addons/point_of_sale/models/pos_order.py | 2 | 46267 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from datetime import timedelta
from functools import partial
import psycopg2
from odoo import api, fields, models, tools, _
from odoo.tools import float_is_zero
from odoo.exceptions import UserError
from odoo.http import request
import odoo.addons.decimal_precision as dp
_logger = logging.getLogger(__name__)
class PosOrder(models.Model):
_name = "pos.order"
_description = "Point of Sale Orders"
_order = "id desc"
@api.model
def _amount_line_tax(self, line, fiscal_position_id):
taxes = line.tax_ids.filtered(lambda t: t.company_id.id == line.order_id.company_id.id)
if fiscal_position_id:
taxes = fiscal_position_id.map_tax(taxes, line.product_id, line.order_id.partner_id)
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = taxes.compute_all(price, line.order_id.pricelist_id.currency_id, line.qty, product=line.product_id, partner=line.order_id.partner_id or False)['taxes']
return sum(tax.get('amount', 0.0) for tax in taxes)
@api.model
def _order_fields(self, ui_order):
process_line = partial(self.env['pos.order.line']._order_line_fields)
return {
'name': ui_order['name'],
'user_id': ui_order['user_id'] or False,
'session_id': ui_order['pos_session_id'],
'lines': [process_line(l) for l in ui_order['lines']] if ui_order['lines'] else False,
'pos_reference': ui_order['name'],
'partner_id': ui_order['partner_id'] or False,
'date_order': ui_order['creation_date'],
'fiscal_position_id': ui_order['fiscal_position_id']
}
def _payment_fields(self, ui_paymentline):
return {
'amount': ui_paymentline['amount'] or 0.0,
'payment_date': ui_paymentline['name'],
'statement_id': ui_paymentline['statement_id'],
'payment_name': ui_paymentline.get('note', False),
'journal': ui_paymentline['journal_id'],
}
# This deals with orders that belong to a closed session. In order
# to recover from this situation we create a new rescue session,
# making it obvious that something went wrong.
# A new, separate, rescue session is preferred for every such recovery,
# to avoid adding unrelated orders to live sessions.
def _get_valid_session(self, order):
PosSession = self.env['pos.session']
closed_session = PosSession.browse(order['pos_session_id'])
_logger.warning('session %s (ID: %s) was closed but received order %s (total: %s) belonging to it',
closed_session.name,
closed_session.id,
order['name'],
order['amount_total'])
_logger.warning('attempting to create recovery session for saving order %s', order['name'])
new_session = PosSession.create({
'config_id': closed_session.config_id.id,
'name': _('(RESCUE FOR %(session)s)') % {'session': closed_session.name},
'rescue': True, # avoid conflict with live sessions
})
# bypass opening_control (necessary when using cash control)
new_session.action_pos_session_open()
return new_session
def _match_payment_to_invoice(self, order):
account_precision = self.env['decimal.precision'].precision_get('Account')
# ignore orders with an amount_paid of 0 because those are returns through the POS
if not float_is_zero(order['amount_return'], account_precision) and not float_is_zero(order['amount_paid'], account_precision):
cur_amount_paid = 0
payments_to_keep = []
for payment in order.get('statement_ids'):
if cur_amount_paid + payment[2]['amount'] > order['amount_total']:
payment[2]['amount'] = order['amount_total'] - cur_amount_paid
payments_to_keep.append(payment)
break
cur_amount_paid += payment[2]['amount']
payments_to_keep.append(payment)
order['statement_ids'] = payments_to_keep
order['amount_return'] = 0
@api.model
def _process_order(self, pos_order):
prec_acc = self.env['decimal.precision'].precision_get('Account')
pos_session = self.env['pos.session'].browse(pos_order['pos_session_id'])
if pos_session.state == 'closing_control' or pos_session.state == 'closed':
pos_order['pos_session_id'] = self._get_valid_session(pos_order).id
order = self.create(self._order_fields(pos_order))
journal_ids = set()
for payments in pos_order['statement_ids']:
if not float_is_zero(payments[2]['amount'], precision_digits=prec_acc):
order.add_payment(self._payment_fields(payments[2]))
journal_ids.add(payments[2]['journal_id'])
if pos_session.sequence_number <= pos_order['sequence_number']:
pos_session.write({'sequence_number': pos_order['sequence_number'] + 1})
pos_session.refresh()
if not float_is_zero(pos_order['amount_return'], prec_acc):
cash_journal_id = pos_session.cash_journal_id.id
if not cash_journal_id:
# Select for change one of the cash journals used in this
# payment
cash_journal = self.env['account.journal'].search([
('type', '=', 'cash'),
('id', 'in', list(journal_ids)),
], limit=1)
if not cash_journal:
# If none, select for change one of the cash journals of the POS
# This is used for example when a customer pays by credit card
# an amount higher than total amount of the order and gets cash back
cash_journal = [statement.journal_id for statement in pos_session.statement_ids if statement.journal_id.type == 'cash']
if not cash_journal:
raise UserError(_("No cash statement found for this session. Unable to record returned cash."))
cash_journal_id = cash_journal[0].id
order.add_payment({
'amount': -pos_order['amount_return'],
'payment_date': fields.Datetime.now(),
'payment_name': _('return'),
'journal': cash_journal_id,
})
return order
def _prepare_analytic_account(self, line):
'''This method is designed to be inherited in a custom module'''
return False
def _create_account_move(self, dt, ref, journal_id, company_id):
date_tz_user = fields.Datetime.context_timestamp(self, fields.Datetime.from_string(dt))
date_tz_user = fields.Date.to_string(date_tz_user)
return self.env['account.move'].sudo().create({'ref': ref, 'journal_id': journal_id, 'date': date_tz_user})
def _prepare_invoice(self):
"""
Prepare the dict of values to create the new invoice for a pos order.
"""
return {
'name': self.name,
'origin': self.name,
'account_id': self.partner_id.property_account_receivable_id.id,
'journal_id': self.session_id.config_id.invoice_journal_id.id,
'type': 'out_invoice',
'reference': self.name,
'partner_id': self.partner_id.id,
'comment': self.note or '',
# considering partner's sale pricelist's currency
'currency_id': self.pricelist_id.currency_id.id,
'user_id': self.env.uid,
}
def _action_create_invoice_line(self, line=False, invoice_id=False):
InvoiceLine = self.env['account.invoice.line']
inv_name = line.product_id.name_get()[0][1]
inv_line = {
'invoice_id': invoice_id,
'product_id': line.product_id.id,
'quantity': line.qty,
'account_analytic_id': self._prepare_analytic_account(line),
'name': inv_name,
}
# Oldlin trick
invoice_line = InvoiceLine.sudo().new(inv_line)
invoice_line._onchange_product_id()
invoice_line.invoice_line_tax_ids = invoice_line.invoice_line_tax_ids.filtered(lambda t: t.company_id.id == line.order_id.company_id.id).ids
fiscal_position_id = line.order_id.fiscal_position_id
if fiscal_position_id:
invoice_line.invoice_line_tax_ids = fiscal_position_id.map_tax(invoice_line.invoice_line_tax_ids, line.product_id, line.order_id.partner_id)
invoice_line.invoice_line_tax_ids = invoice_line.invoice_line_tax_ids.ids
# We convert a new id object back to a dictionary to write to
# bridge between old and new api
inv_line = invoice_line._convert_to_write({name: invoice_line[name] for name in invoice_line._cache})
inv_line.update(price_unit=line.price_unit, discount=line.discount)
return InvoiceLine.sudo().create(inv_line)
def _create_account_move_line(self, session=None, move=None):
# Tricky, via the workflow, we only have one id in the ids variable
"""Create a account move line of order grouped by products or not."""
IrProperty = self.env['ir.property']
ResPartner = self.env['res.partner']
if session and not all(session.id == order.session_id.id for order in self):
raise UserError(_('Selected orders do not have the same session!'))
grouped_data = {}
have_to_group_by = session and session.config_id.group_by or False
rounding_method = session and session.config_id.company_id.tax_calculation_rounding_method
for order in self.filtered(lambda o: not o.account_move or order.state == 'paid'):
current_company = order.sale_journal.company_id
account_def = IrProperty.get(
'property_account_receivable_id', 'res.partner')
order_account = order.partner_id.property_account_receivable_id.id or account_def and account_def.id
partner_id = ResPartner._find_accounting_partner(order.partner_id).id or False
if move is None:
# Create an entry for the sale
journal_id = self.env['ir.config_parameter'].sudo().get_param(
'pos.closing.journal_id_%s' % current_company.id, default=order.sale_journal.id)
move = self._create_account_move(
order.session_id.start_at, order.name, int(journal_id), order.company_id.id)
def insert_data(data_type, values):
# if have_to_group_by:
values.update({
'partner_id': partner_id,
'move_id': move.id,
})
if data_type == 'product':
key = ('product', values['partner_id'], (values['product_id'], tuple(values['tax_ids'][0][2]), values['name']), values['analytic_account_id'], values['debit'] > 0)
elif data_type == 'tax':
key = ('tax', values['partner_id'], values['tax_line_id'], values['debit'] > 0)
elif data_type == 'counter_part':
key = ('counter_part', values['partner_id'], values['account_id'], values['debit'] > 0)
else:
return
grouped_data.setdefault(key, [])
if have_to_group_by:
if not grouped_data[key]:
grouped_data[key].append(values)
else:
current_value = grouped_data[key][0]
current_value['quantity'] = current_value.get('quantity', 0.0) + values.get('quantity', 0.0)
current_value['credit'] = current_value.get('credit', 0.0) + values.get('credit', 0.0)
current_value['debit'] = current_value.get('debit', 0.0) + values.get('debit', 0.0)
else:
grouped_data[key].append(values)
# because of the weird way the pos order is written, we need to make sure there is at least one line,
# because just after the 'for' loop there are references to 'line' and 'income_account' variables (that
# are set inside the for loop)
# TOFIX: a deep refactoring of this method (and class!) is needed
# in order to get rid of this stupid hack
assert order.lines, _('The POS order must have lines when calling this method')
# Create an move for each order line
cur = order.pricelist_id.currency_id
for line in order.lines:
amount = line.price_subtotal
# Search for the income account
if line.product_id.property_account_income_id.id:
income_account = line.product_id.property_account_income_id.id
elif line.product_id.categ_id.property_account_income_categ_id.id:
income_account = line.product_id.categ_id.property_account_income_categ_id.id
else:
raise UserError(_('Please define income '
'account for this product: "%s" (id:%d).')
% (line.product_id.name, line.product_id.id))
name = line.product_id.name
if line.notice:
# add discount reason in move
name = name + ' (' + line.notice + ')'
# Create a move for the line for the order line
insert_data('product', {
'name': name,
'quantity': line.qty,
'product_id': line.product_id.id,
'account_id': income_account,
'analytic_account_id': self._prepare_analytic_account(line),
'credit': ((amount > 0) and amount) or 0.0,
'debit': ((amount < 0) and -amount) or 0.0,
'tax_ids': [(6, 0, line.tax_ids_after_fiscal_position.ids)],
'partner_id': partner_id
})
# Create the tax lines
taxes = line.tax_ids_after_fiscal_position.filtered(lambda t: t.company_id.id == current_company.id)
if not taxes:
continue
for tax in taxes.compute_all(line.price_unit * (100.0 - line.discount) / 100.0, cur, line.qty)['taxes']:
insert_data('tax', {
'name': _('Tax') + ' ' + tax['name'],
'product_id': line.product_id.id,
'quantity': line.qty,
'account_id': tax['account_id'] or income_account,
'credit': ((tax['amount'] > 0) and tax['amount']) or 0.0,
'debit': ((tax['amount'] < 0) and -tax['amount']) or 0.0,
'tax_line_id': tax['id'],
'partner_id': partner_id
})
# round tax lines per order
if rounding_method == 'round_globally':
for group_key, group_value in grouped_data.iteritems():
if group_key[0] == 'tax':
for line in group_value:
line['credit'] = cur.round(line['credit'])
line['debit'] = cur.round(line['debit'])
# counterpart
insert_data('counter_part', {
'name': _("Trade Receivables"), # order.name,
'account_id': order_account,
'credit': ((order.amount_total < 0) and -order.amount_total) or 0.0,
'debit': ((order.amount_total > 0) and order.amount_total) or 0.0,
'partner_id': partner_id
})
order.write({'state': 'done', 'account_move': move.id})
all_lines = []
for group_key, group_data in grouped_data.iteritems():
for value in group_data:
all_lines.append((0, 0, value),)
if move: # In case no order was changed
move.sudo().write({'line_ids': all_lines})
move.sudo().post()
return True
def _default_session(self):
return self.env['pos.session'].search([('state', '=', 'opened'), ('user_id', '=', self.env.uid)], limit=1)
def _default_pricelist(self):
return self._default_session().config_id.pricelist_id
name = fields.Char(string='Order Ref', required=True, readonly=True, copy=False, default='/')
company_id = fields.Many2one('res.company', string='Company', required=True, readonly=True, default=lambda self: self.env.user.company_id)
date_order = fields.Datetime(string='Order Date', readonly=True, index=True, default=fields.Datetime.now)
user_id = fields.Many2one('res.users', string='Salesman', help="Person who uses the cash register. It can be a reliever, a student or an interim employee.", default=lambda self: self.env.uid)
amount_tax = fields.Float(compute='_compute_amount_all', string='Taxes', digits=0)
amount_total = fields.Float(compute='_compute_amount_all', string='Total', digits=0)
amount_paid = fields.Float(compute='_compute_amount_all', string='Paid', states={'draft': [('readonly', False)]}, readonly=True, digits=0)
amount_return = fields.Float(compute='_compute_amount_all', string='Returned', digits=0)
lines = fields.One2many('pos.order.line', 'order_id', string='Order Lines', states={'draft': [('readonly', False)]}, readonly=True, copy=True)
statement_ids = fields.One2many('account.bank.statement.line', 'pos_statement_id', string='Payments', states={'draft': [('readonly', False)]}, readonly=True)
pricelist_id = fields.Many2one('product.pricelist', string='Pricelist', required=True, states={
'draft': [('readonly', False)]}, readonly=True, default=_default_pricelist)
partner_id = fields.Many2one('res.partner', string='Customer', change_default=True, index=True, states={'draft': [('readonly', False)], 'paid': [('readonly', False)]})
sequence_number = fields.Integer(string='Sequence Number', help='A session-unique sequence number for the order', default=1)
session_id = fields.Many2one(
'pos.session', string='Session', required=True, index=True,
domain="[('state', '=', 'opened')]", states={'draft': [('readonly', False)]},
readonly=True, default=_default_session)
config_id = fields.Many2one('pos.config', related='session_id.config_id', string="Point of Sale")
state = fields.Selection(
[('draft', 'New'), ('cancel', 'Cancelled'), ('paid', 'Paid'), ('done', 'Posted'), ('invoiced', 'Invoiced')],
'Status', readonly=True, copy=False, default='draft')
invoice_id = fields.Many2one('account.invoice', string='Invoice', copy=False)
account_move = fields.Many2one('account.move', string='Journal Entry', readonly=True, copy=False)
picking_id = fields.Many2one('stock.picking', string='Picking', readonly=True, copy=False)
picking_type_id = fields.Many2one('stock.picking.type', related='session_id.config_id.picking_type_id', string="Picking Type")
location_id = fields.Many2one('stock.location', related='session_id.config_id.stock_location_id', string="Location", store=True)
note = fields.Text(string='Internal Notes')
nb_print = fields.Integer(string='Number of Print', readonly=True, copy=False, default=0)
pos_reference = fields.Char(string='Receipt Ref', readonly=True, copy=False)
sale_journal = fields.Many2one('account.journal', related='session_id.config_id.journal_id', string='Sale Journal', store=True, readonly=True)
fiscal_position_id = fields.Many2one('account.fiscal.position', string='Fiscal Position', default=lambda self: self._default_session().config_id.default_fiscal_position_id)
@api.depends('statement_ids', 'lines.price_subtotal_incl', 'lines.discount')
def _compute_amount_all(self):
for order in self:
order.amount_paid = order.amount_return = order.amount_tax = 0.0
currency = order.pricelist_id.currency_id
order.amount_paid = sum(payment.amount for payment in order.statement_ids)
order.amount_return = sum(payment.amount < 0 and payment.amount or 0 for payment in order.statement_ids)
order.amount_tax = currency.round(sum(self._amount_line_tax(line, order.fiscal_position_id) for line in order.lines))
amount_untaxed = currency.round(sum(line.price_subtotal for line in order.lines))
order.amount_total = order.amount_tax + amount_untaxed
@api.onchange('partner_id')
def _onchange_partner_id(self):
if self.partner_id:
self.pricelist = self.partner_id.property_product_pricelist.id
@api.multi
def write(self, vals):
res = super(PosOrder, self).write(vals)
Partner = self.env['res.partner']
# If you change the partner of the PoS order, change also the partner of the associated bank statement lines
if 'partner_id' in vals:
for order in self:
partner_id = False
if order.invoice_id:
raise UserError(_("You cannot change the partner of a POS order for which an invoice has already been issued."))
if vals['partner_id']:
partner = Partner.browse(vals['partner_id'])
partner_id = Partner._find_accounting_partner(partner).id
order.statement_ids.write({'partner_id': partner_id})
return res
@api.multi
def unlink(self):
for pos_order in self.filtered(lambda pos_order: pos_order.state not in ['draft', 'cancel']):
raise UserError(_('In order to delete a sale, it must be new or cancelled.'))
return super(PosOrder, self).unlink()
@api.model
def create(self, values):
if values.get('session_id'):
# set name based on the sequence specified on the config
session = self.env['pos.session'].browse(values['session_id'])
values['name'] = session.config_id.sequence_id._next()
values.setdefault('pricelist_id', session.config_id.pricelist_id.id)
else:
# fallback on any pos.order sequence
values['name'] = self.env['ir.sequence'].next_by_code('pos.order')
return super(PosOrder, self).create(values)
@api.multi
def action_view_invoice(self):
return {
'name': _('Customer Invoice'),
'view_mode': 'form',
'view_id': self.env.ref('account.invoice_form').id,
'res_model': 'account.invoice',
'context': "{'type':'out_invoice'}",
'type': 'ir.actions.act_window',
'res_id': self.invoice_id.id,
}
@api.multi
def action_pos_order_paid(self):
if not self.test_paid():
raise UserError(_("Order is not paid."))
self.write({'state': 'paid'})
return self.create_picking()
@api.multi
def action_pos_order_invoice(self):
Invoice = self.env['account.invoice']
for order in self:
# Force company for all SUPERUSER_ID action
local_context = dict(self.env.context, force_company=order.company_id.id, company_id=order.company_id.id)
if order.invoice_id:
Invoice += order.invoice_id
continue
if not order.partner_id:
raise UserError(_('Please provide a partner for the sale.'))
invoice = Invoice.new(order._prepare_invoice())
invoice._onchange_partner_id()
invoice.fiscal_position_id = order.fiscal_position_id
inv = invoice._convert_to_write({name: invoice[name] for name in invoice._cache})
new_invoice = Invoice.with_context(local_context).sudo().create(inv)
message = _("This invoice has been created from the point of sale session: <a href=# data-oe-model=pos.order data-oe-id=%d>%s</a>") % (order.id, order.name)
new_invoice.message_post(body=message)
order.write({'invoice_id': new_invoice.id, 'state': 'invoiced'})
Invoice += new_invoice
for line in order.lines:
self.with_context(local_context)._action_create_invoice_line(line, new_invoice.id)
new_invoice.with_context(local_context).sudo().compute_taxes()
order.sudo().write({'state': 'invoiced'})
# this workflow signal didn't exist on account.invoice -> should it have been 'invoice_open' ? (and now method .action_invoice_open())
# shouldn't the created invoice be marked as paid, seing the customer paid in the POS?
# new_invoice.sudo().signal_workflow('validate')
if not Invoice:
return {}
return {
'name': _('Customer Invoice'),
'view_type': 'form',
'view_mode': 'form',
'view_id': self.env.ref('account.invoice_form').id,
'res_model': 'account.invoice',
'context': "{'type':'out_invoice'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': Invoice and Invoice.ids[0] or False,
}
# this method is unused, and so is the state 'cancel'
@api.multi
def action_pos_order_cancel(self):
return self.write({'state': 'cancel'})
@api.multi
def action_pos_order_done(self):
return self._create_account_move_line()
@api.model
def create_from_ui(self, orders):
# Keep only new orders
submitted_references = [o['data']['name'] for o in orders]
pos_order = self.search([('pos_reference', 'in', submitted_references)])
existing_orders = pos_order.read(['pos_reference'])
existing_references = set([o['pos_reference'] for o in existing_orders])
orders_to_save = [o for o in orders if o['data']['name'] not in existing_references]
order_ids = []
for tmp_order in orders_to_save:
to_invoice = tmp_order['to_invoice']
order = tmp_order['data']
if to_invoice:
self._match_payment_to_invoice(order)
pos_order = self._process_order(order)
order_ids.append(pos_order.id)
try:
pos_order.action_pos_order_paid()
except psycopg2.OperationalError:
# do not hide transactional errors, the order(s) won't be saved!
raise
except Exception as e:
_logger.error('Could not fully process the POS Order: %s', tools.ustr(e))
if to_invoice:
pos_order.action_pos_order_invoice()
pos_order.invoice_id.sudo().action_invoice_open()
pos_order.account_move = pos_order.invoice_id.move_id
return order_ids
def test_paid(self):
"""A Point of Sale is paid when the sum
@return: True
"""
for order in self:
if order.lines and not order.amount_total:
continue
if (not order.lines) or (not order.statement_ids) or (abs(order.amount_total - order.amount_paid) > 0.00001):
return False
return True
def create_picking(self):
"""Create a picking for each order and validate it."""
Picking = self.env['stock.picking']
Move = self.env['stock.move']
StockWarehouse = self.env['stock.warehouse']
for order in self:
address = order.partner_id.address_get(['delivery']) or {}
picking_type = order.picking_type_id
return_pick_type = order.picking_type_id.return_picking_type_id or order.picking_type_id
order_picking = Picking
return_picking = Picking
moves = Move
location_id = order.location_id.id
if order.partner_id:
destination_id = order.partner_id.property_stock_customer.id
else:
if (not picking_type) or (not picking_type.default_location_dest_id):
customerloc, supplierloc = StockWarehouse._get_partner_locations()
destination_id = customerloc.id
else:
destination_id = picking_type.default_location_dest_id.id
if picking_type:
message = _("This transfer has been created from the point of sale session: <a href=# data-oe-model=pos.order data-oe-id=%d>%s</a>") % (order.id, order.name)
picking_vals = {
'origin': order.name,
'partner_id': address.get('delivery', False),
'date_done': order.date_order,
'picking_type_id': picking_type.id,
'company_id': order.company_id.id,
'move_type': 'direct',
'note': order.note or "",
'location_id': location_id,
'location_dest_id': destination_id,
}
pos_qty = any([x.qty >= 0 for x in order.lines])
if pos_qty:
order_picking = Picking.create(picking_vals.copy())
order_picking.message_post(body=message)
neg_qty = any([x.qty < 0 for x in order.lines])
if neg_qty:
return_vals = picking_vals.copy()
return_vals.update({
'location_id': destination_id,
'location_dest_id': return_pick_type != picking_type and return_pick_type.default_location_dest_id.id or location_id,
'picking_type_id': return_pick_type.id
})
return_picking = Picking.create(return_vals)
return_picking.message_post(body=message)
for line in order.lines.filtered(lambda l: l.product_id.type in ['product', 'consu']):
moves |= Move.create({
'name': line.name,
'product_uom': line.product_id.uom_id.id,
'picking_id': order_picking.id if line.qty >= 0 else return_picking.id,
'picking_type_id': picking_type.id if line.qty >= 0 else return_pick_type.id,
'product_id': line.product_id.id,
'product_uom_qty': abs(line.qty),
'state': 'draft',
'location_id': location_id if line.qty >= 0 else destination_id,
'location_dest_id': destination_id if line.qty >= 0 else return_pick_type != picking_type and return_pick_type.default_location_dest_id.id or location_id,
})
# prefer associating the regular order picking, not the return
order.write({'picking_id': order_picking.id or return_picking.id})
if return_picking:
order._force_picking_done(return_picking)
if order_picking:
order._force_picking_done(order_picking)
# when the pos.config has no picking_type_id set only the moves will be created
if moves and not return_picking and not order_picking:
moves.action_confirm()
moves.force_assign()
moves.filtered(lambda m: m.product_id.tracking == 'none').action_done()
return True
def _force_picking_done(self, picking):
"""Force picking in order to be set as done."""
self.ensure_one()
picking.action_confirm()
picking.force_assign()
self.set_pack_operation_lot(picking)
if not any([(x.product_id.tracking != 'none') for x in picking.pack_operation_ids]):
picking.action_done()
def set_pack_operation_lot(self, picking=None):
"""Set Serial/Lot number in pack operations to mark the pack operation done."""
StockProductionLot = self.env['stock.production.lot']
PosPackOperationLot = self.env['pos.pack.operation.lot']
for order in self:
for pack_operation in (picking or self.picking_id).pack_operation_ids:
qty = 0
qty_done = 0
pack_lots = []
pos_pack_lots = PosPackOperationLot.search([('order_id', '=', order.id), ('product_id', '=', pack_operation.product_id.id)])
pack_lot_names = [pos_pack.lot_name for pos_pack in pos_pack_lots]
if pack_lot_names:
for lot_name in list(set(pack_lot_names)):
stock_production_lot = StockProductionLot.search([('name', '=', lot_name), ('product_id', '=', pack_operation.product_id.id)])
if stock_production_lot:
if stock_production_lot.product_id.tracking == 'lot':
qty = pack_lot_names.count(lot_name)
else:
qty = 1.0
qty_done += qty
pack_lots.append({'lot_id': stock_production_lot.id, 'qty': qty})
else:
qty_done = pack_operation.product_qty
pack_operation.write({'pack_lot_ids': map(lambda x: (0, 0, x), pack_lots), 'qty_done': qty_done})
def add_payment(self, data):
"""Create a new payment for the order"""
args = {
'amount': data['amount'],
'date': data.get('payment_date', fields.Date.today()),
'name': self.name + ': ' + (data.get('payment_name', '') or ''),
'partner_id': self.env["res.partner"]._find_accounting_partner(self.partner_id).id or False,
}
journal_id = data.get('journal', False)
statement_id = data.get('statement_id', False)
assert journal_id or statement_id, "No statement_id or journal_id passed to the method!"
journal = self.env['account.journal'].browse(journal_id)
# use the company of the journal and not of the current user
company_cxt = dict(self.env.context, force_company=journal.company_id.id)
account_def = self.env['ir.property'].with_context(company_cxt).get('property_account_receivable_id', 'res.partner')
args['account_id'] = (self.partner_id.property_account_receivable_id.id) or (account_def and account_def.id) or False
if not args['account_id']:
if not args['partner_id']:
msg = _('There is no receivable account defined to make payment.')
else:
msg = _('There is no receivable account defined to make payment for the partner: "%s" (id:%d).') % (
self.partner_id.name, self.partner_id.id,)
raise UserError(msg)
context = dict(self.env.context)
context.pop('pos_session_id', False)
for statement in self.session_id.statement_ids:
if statement.id == statement_id:
journal_id = statement.journal_id.id
break
elif statement.journal_id.id == journal_id:
statement_id = statement.id
break
if not statement_id:
raise UserError(_('You have to open at least one cashbox.'))
args.update({
'statement_id': statement_id,
'pos_statement_id': self.id,
'journal_id': journal_id,
'ref': self.session_id.name,
})
self.env['account.bank.statement.line'].with_context(context).create(args)
return statement_id
@api.multi
def refund(self):
"""Create a copy of order for refund order"""
PosOrder = self.env['pos.order']
current_session = self.env['pos.session'].search([('state', '!=', 'closed'), ('user_id', '=', self.env.uid)], limit=1)
if not current_session:
raise UserError(_('To return product(s), you need to open a session that will be used to register the refund.'))
for order in self:
clone = order.copy({
# ot used, name forced by create
'name': order.name + _(' REFUND'),
'session_id': current_session.id,
'date_order': fields.Datetime.now(),
'pos_reference': order.pos_reference,
})
PosOrder += clone
for clone in PosOrder:
for order_line in clone.lines:
order_line.write({'qty': -order_line.qty})
return {
'name': _('Return Products'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'pos.order',
'res_id': PosOrder.ids[0],
'view_id': False,
'context': self.env.context,
'type': 'ir.actions.act_window',
'target': 'current',
}
class PosOrderLine(models.Model):
_name = "pos.order.line"
_description = "Lines of Point of Sale"
_rec_name = "product_id"
def _order_line_fields(self, line):
if line and 'tax_ids' not in line[2]:
product = self.env['product.product'].browse(line[2]['product_id'])
line[2]['tax_ids'] = [(6, 0, [x.id for x in product.taxes_id])]
return line
company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env.user.company_id)
name = fields.Char(string='Line No', required=True, copy=False, default=lambda self: self.env['ir.sequence'].next_by_code('pos.order.line'))
notice = fields.Char(string='Discount Notice')
product_id = fields.Many2one('product.product', string='Product', domain=[('sale_ok', '=', True)], required=True, change_default=True)
price_unit = fields.Float(string='Unit Price', digits=0)
qty = fields.Float('Quantity', digits=dp.get_precision('Product Unit of Measure'), default=1)
price_subtotal = fields.Float(compute='_compute_amount_line_all', digits=0, string='Subtotal w/o Tax')
price_subtotal_incl = fields.Float(compute='_compute_amount_line_all', digits=0, string='Subtotal')
discount = fields.Float(string='Discount (%)', digits=0, default=0.0)
order_id = fields.Many2one('pos.order', string='Order Ref', ondelete='cascade')
create_date = fields.Datetime(string='Creation Date', readonly=True)
tax_ids = fields.Many2many('account.tax', string='Taxes', readonly=True)
tax_ids_after_fiscal_position = fields.Many2many('account.tax', compute='_get_tax_ids_after_fiscal_position', string='Taxes')
pack_lot_ids = fields.One2many('pos.pack.operation.lot', 'pos_order_line_id', string='Lot/serial Number')
@api.depends('price_unit', 'tax_ids', 'qty', 'discount', 'product_id')
def _compute_amount_line_all(self):
for line in self:
currency = line.order_id.pricelist_id.currency_id
taxes = line.tax_ids.filtered(lambda tax: tax.company_id.id == line.order_id.company_id.id)
fiscal_position_id = line.order_id.fiscal_position_id
if fiscal_position_id:
taxes = fiscal_position_id.map_tax(taxes, line.product_id, line.order_id.partner_id)
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
line.price_subtotal = line.price_subtotal_incl = price * line.qty
if taxes:
taxes = taxes.compute_all(price, currency, line.qty, product=line.product_id, partner=line.order_id.partner_id or False)
line.price_subtotal = taxes['total_excluded']
line.price_subtotal_incl = taxes['total_included']
line.price_subtotal = currency.round(line.price_subtotal)
line.price_subtotal_incl = currency.round(line.price_subtotal_incl)
@api.onchange('product_id')
def _onchange_product_id(self):
if self.product_id:
if not self.order_id.pricelist_id:
raise UserError(
_('You have to select a pricelist in the sale form !\n'
'Please set one before choosing a product.'))
price = self.order_id.pricelist_id.get_product_price(
self.product_id, self.qty or 1.0, self.order_id.partner_id)
self._onchange_qty()
self.price_unit = price
self.tax_ids = self.product_id.taxes_id
@api.onchange('qty', 'discount', 'price_unit', 'tax_ids')
def _onchange_qty(self):
if self.product_id:
if not self.order_id.pricelist_id:
raise UserError(_('You have to select a pricelist in the sale form !'))
price = self.price_unit * (1 - (self.discount or 0.0) / 100.0)
self.price_subtotal = self.price_subtotal_incl = price * self.qty
if (self.product_id.taxes_id):
taxes = self.product_id.taxes_id.compute_all(price, self.order_id.pricelist_id.currency_id, self.qty, product=self.product_id, partner=False)
self.price_subtotal = taxes['total_excluded']
self.price_subtotal_incl = taxes['total_included']
@api.multi
def _get_tax_ids_after_fiscal_position(self):
for line in self:
line.tax_ids_after_fiscal_position = line.order_id.fiscal_position_id.map_tax(line.tax_ids, line.product_id, line.order_id.partner_id)
class PosOrderLineLot(models.Model):
_name = "pos.pack.operation.lot"
_description = "Specify product lot/serial number in pos order line"
pos_order_line_id = fields.Many2one('pos.order.line')
order_id = fields.Many2one('pos.order', related="pos_order_line_id.order_id")
lot_name = fields.Char('Lot Name')
product_id = fields.Many2one('product.product', related='pos_order_line_id.product_id')
class ReportSaleDetails(models.AbstractModel):
_name = 'report.point_of_sale.report_saledetails'
@api.model
def get_sale_details(self, date_start=False, date_stop=False, configs=False):
""" Serialise the orders of the day information
params: date_start, date_stop string representing the datetime of order
"""
if not configs:
configs = self.env['pos.config'].search([])
today = fields.Datetime.from_string(fields.Date.context_today(self))
if date_start:
date_start = fields.Datetime.from_string(date_start)
else:
# start by default today 00:00:00
date_start = today
if date_stop:
# set time to 23:59:59
date_stop = fields.Datetime.from_string(date_stop)
else:
# stop by default today 23:59:59
date_stop = today + timedelta(days=1, seconds=-1)
# avoid a date_stop smaller than date_start
date_stop = max(date_stop, date_start)
date_start = fields.Datetime.to_string(date_start)
date_stop = fields.Datetime.to_string(date_stop)
orders = self.env['pos.order'].search([
('date_order', '>=', date_start),
('date_order', '<=', date_stop),
('state', 'in', ['paid','invoiced','done']),
('config_id', 'in', configs.ids)])
user_currency = self.env.user.company_id.currency_id
total = 0.0
products_sold = {}
taxes = {}
for order in orders:
if user_currency != order.pricelist_id.currency_id:
total += order.pricelist_id.currency_id.compute(order.amount_total, user_currency)
else:
total += order.amount_total
currency = order.session_id.currency_id
for line in order.lines:
key = (line.product_id, line.price_unit, line.discount)
products_sold.setdefault(key, 0.0)
products_sold[key] += line.qty
if line.tax_ids_after_fiscal_position:
line_taxes = line.tax_ids_after_fiscal_position.compute_all(line.price_unit * (1-(line.discount or 0.0)/100.0), currency, line.qty, product=line.product_id, partner=line.order_id.partner_id or False)
for tax in line_taxes['taxes']:
taxes.setdefault(tax['id'], {'name': tax['name'], 'total':0.0})
taxes[tax['id']]['total'] += tax['amount']
st_line_ids = self.env["account.bank.statement.line"].search([('pos_statement_id', 'in', orders.ids)]).ids
if st_line_ids:
self.env.cr.execute("""
SELECT aj.name, sum(amount) total
FROM account_bank_statement_line AS absl,
account_bank_statement AS abs,
account_journal AS aj
WHERE absl.statement_id = abs.id
AND abs.journal_id = aj.id
AND absl.id IN %s
GROUP BY aj.name
""", (tuple(st_line_ids),))
payments = self.env.cr.dictfetchall()
else:
payments = []
return {
'total_paid': user_currency.round(total),
'payments': payments,
'company_name': self.env.user.company_id.name,
'taxes': taxes.values(),
'products': sorted([{
'product_id': product.id,
'product_name': product.name,
'code': product.default_code,
'quantity': qty,
'price_unit': price_unit,
'discount': discount,
'uom': product.uom_id.name
} for (product, price_unit, discount), qty in products_sold.items()], key=lambda l: l['product_name'])
}
@api.multi
def render_html(self, docids, data=None):
data = dict(data or {})
configs = self.env['pos.config'].browse(data['config_ids'])
data.update(self.get_sale_details(data['date_start'], data['date_stop'], configs))
return self.env['report'].render('point_of_sale.report_saledetails', data)
| agpl-3.0 | 1,788,195,554,821,148,700 | 49.181128 | 219 | 0.576523 | false |
openrisc/or1k-src | gdb/contrib/exsummary.py | 33 | 5735 | # Copyright 2011, 2013 Free Software Foundation, Inc.
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import sys
import glob
# Compute the summary information from the files created by
# excheck.py. Run in the build directory where you used the
# excheck.py plugin.
class Function:
def __init__(self, name):
self.name = name
self.location = None
self.callers = []
self.can_throw = False
self.marked_nothrow = False
self.reason = None
def log(self, message):
print "%s: note: %s" % (self.location, message)
def set_location(self, location):
self.location = location
# CALLER is an Edge.
def add_caller(self, caller):
# self.log("adding call from %s" % caller.from_fn.name)
self.callers.append(caller)
# self.log("len = %d" % len(self.callers))
def consistency_check(self):
if self.marked_nothrow and self.can_throw:
print ("%s: error: %s marked as both 'throw' and 'nothrow'"
% (self.location, self.name))
def declare_nothrow(self):
self.marked_nothrow = True
self.consistency_check()
def declare_throw(self):
result = not self.can_throw # Return True the first time
self.can_throw = True
self.consistency_check()
return result
def print_stack(self, is_indirect):
if is_indirect:
print ("%s: error: function %s is marked nothrow but is assumed to throw due to indirect call"
% (self.location, self.name))
else:
print ("%s: error: function %s is marked nothrow but can throw"
% (self.location, self.name))
edge = self.reason
while edge is not None:
print ("%s: info: via call to %s"
% (edge.location, edge.to_fn.name))
edge = edge.to_fn.reason
def mark_throw(self, edge, work_list, is_indirect):
if not self.can_throw:
# self.log("can throw")
self.can_throw = True
self.reason = edge
if self.marked_nothrow:
self.print_stack(is_indirect)
else:
# Do this in the 'else' to avoid extra error
# propagation.
work_list.append(self)
class Edge:
def __init__(self, from_fn, to_fn, location):
self.from_fn = from_fn
self.to_fn = to_fn
self.location = location
# Work list of known-throwing functions.
work_list = []
# Map from function name to Function object.
function_map = {}
# Work list of indirect calls.
indirect_functions = []
# Whether we should process cleanup functions as well.
process_cleanups = False
# Whether we should process indirect function calls.
process_indirect = False
def declare(fn_name):
global function_map
if fn_name not in function_map:
function_map[fn_name] = Function(fn_name)
return function_map[fn_name]
def define_function(fn_name, location):
fn = declare(fn_name)
fn.set_location(location)
def declare_throw(fn_name):
global work_list
fn = declare(fn_name)
if fn.declare_throw():
work_list.append(fn)
def declare_nothrow(fn_name):
fn = declare(fn_name)
fn.declare_nothrow()
def declare_cleanup(fn_name):
global process_cleanups
fn = declare(fn_name)
if process_cleanups:
fn.declare_nothrow()
def function_call(to, frm, location):
to_fn = declare(to)
frm_fn = declare(frm)
to_fn.add_caller(Edge(frm_fn, to_fn, location))
def has_indirect_call(fn_name, location):
global indirect_functions
fn = declare(fn_name)
phony = Function("<indirect call>")
phony.add_caller(Edge(fn, phony, location))
indirect_functions.append(phony)
def mark_functions(worklist, is_indirect):
for callee in worklist:
for edge in callee.callers:
edge.from_fn.mark_throw(edge, worklist, is_indirect)
def help_and_exit():
print "Usage: exsummary [OPTION]..."
print ""
print "Read the .py files from the exception checker plugin and"
print "generate an error summary."
print ""
print " --cleanups Include invalid behavior in cleanups"
print " --indirect Include assumed errors due to indirect function calls"
sys.exit(0)
def main():
global work_list
global indirect_functions
global process_cleanups
global process_indirect
for arg in sys.argv:
if arg == '--cleanups':
process_cleanups = True
elif arg == '--indirect':
process_indirect = True
elif arg == '--help':
help_and_exit()
for fname in sorted(glob.glob('*.c.gdb_exc.py')):
execfile(fname)
print "================"
print "= Ordinary marking"
print "================"
mark_functions(work_list, False)
if process_indirect:
print "================"
print "= Indirect marking"
print "================"
mark_functions(indirect_functions, True)
return 0
if __name__ == '__main__':
status = main()
sys.exit(status)
| gpl-2.0 | -1,822,935,145,357,469,000 | 30 | 106 | 0.616042 | false |
dksr/REMIND | python/base/utils/Logging.py | 1 | 8007 | import os
import sys
import logging
import logging.handlers
from Singleton import Singleton
class attrdict(dict):
""" Dictionary with attribute like access """
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.__dict__ = self
class Logging_Manager(Singleton):
def __init__(self):
self.options = {'fquiet': None,
'loglevel': 'debug',
'quiet': None,
'module': "",
'logdir': '.',
'clean': False,
'rotating_log': True,
'rotating_file_mode': "a",
'maxBytes': 0,
'backupCount': 10,
'logfile': 'project.log'}
def getLogger(self, options_dict):
self.options.update(options_dict)
self.options = attrdict(self.options)
return Logger(self.options).getLogger()
class Logger():
def __init__(self,options):
self.options = options
def getLogger(self):
""" Log information based upon users options.
"""
options = self.options
logger = logging.getLogger(options.module)
formatter = logging.Formatter('%(asctime)s %(levelname)s\t%(message)s')
debug_formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s \t%(message)s')
level = logging.__dict__.get(options.loglevel.upper(),logging.DEBUG)
logger.setLevel(level)
# Output logging information to screen
if not options.quiet:
console_hdlr = logging.StreamHandler(sys.stdout)
console_hdlr.setFormatter(debug_formatter)
console_hdlr.setLevel(logging.DEBUG)
logger.addHandler(console_hdlr)
# Output logging information to file
if not options.fquiet:
log_file = os.path.join(options.logdir, options.logfile)
if options.clean and os.path.isfile(log_file):
os.remove(log_file)
if options.rotating_log:
rfm = options.rotating_file_mode
mb = options.maxBytes
bc = options.backupCount
file_hdlr = logging.handlers.RotatingFileHandler(log_file, rfm, mb, bc)
file_hdlr.doRollover()
else:
file_hdlr = logging.FileHandler(log_file)
file_hdlr.setFormatter(formatter)
file_hdlr.setLevel(logging.DEBUG)
logger.addHandler(file_hdlr)
return logger
def initialize_logging(options_dict):
options = {'fquiet': None,
'loglevel': 'debug',
'quiet': None,
'module': "",
'logdir': '.',
'clean': False,
'rotating_log': True,
'rotating_file_mode': "a",
'maxBytes': 0,
'backupCount': 10,
'logfile': 'main_log.log'}
options.update(options_dict)
options = attrdict(options)
logger = logging.getLogger(options.module)
formatter = logging.Formatter('%(asctime)s %(levelname)s \t %(name)s (%(lineno)d): %(message)s')
debug_formatter = logging.Formatter('%(levelname)s \t %(name)s (%(lineno)d): %(message)s')
level = logging.__dict__.get(options.loglevel.upper(),logging.DEBUG)
logger.setLevel(level)
logger.handlers = []
# Output logging information to screen
if not options.quiet:
console_hdlr = logging.StreamHandler(sys.stderr)
console_hdlr.setFormatter(formatter)
console_hdlr.setLevel(logging.DEBUG)
logger.addHandler(console_hdlr)
# Output logging information to file
if not options.fquiet:
if not os.path.isdir(options.logdir):
# if logdir not present, create the path
os.system('mkdir -p ' + options.logdir)
log_file = os.path.join(options.logdir, options.logfile)
if options.clean and os.path.isfile(log_file):
os.remove(log_file)
if options.rotating_log:
rfm = options.rotating_file_mode
mb = options.maxBytes
bc = options.backupCount
file_hdlr = logging.handlers.RotatingFileHandler(log_file, rfm, mb, bc)
file_hdlr.doRollover()
else:
file_hdlr = logging.FileHandler(log_file)
file_hdlr.setFormatter(formatter)
file_hdlr.setLevel(logging.DEBUG)
logger.addHandler(file_hdlr)
return logger
def test1():
import Logging
import logging
import time
class Hel():
def __init__(self):
#self.log = Logging_Manager().getLogger({'module':'Hel'})
options = {'fquiet': None,
'loglevel': 'info',
'quiet': None,
'module': "",
'logdir': '/tmp/',
'clean': False,
'rotating_log': True,
'rotating_file_mode': "a",
'maxBytes': 0,
'backupCount': 10,
'logfile': 'project.log'}
options = attrdict(options)
self.log = Logging.initialize_logging(options)
self.log.info("START TIME: " + time.asctime())
self.log.error('Creating new instance of Hel')
def hel(self):
self.log.debug('iam in hel')
class Hello():
def __init__(self):
self.log = logging.getLogger('Hel')
self.log.info('Creating new instance of Hello')
def hello(self):
self.log.debug('iam in hello')
class Bello():
def __init__(self):
self.log = logging.getLogger('Hel.Hello.Hello')
self.log.info('Creating new instance of BELLO')
def bel(self):
self.log.debug('iam in BEl')
g = Hel()
g.hel()
h = Hello()
h.hello()
b = Bello()
b.bel()
def test2(argv=None):
import Logging
from optparse import OptionParser
if argv is None:
argv = sys.argv[1:]
# Setup command line options
parser = OptionParser("usage: %prog [options]")
parser.add_option("-l", "--logdir", dest="logdir", default=".", help="log DIRECTORY (default ./)")
parser.add_option("-m", "--module", dest="module", default="project", help="module/project name from where logging")
parser.add_option("-f", "--logfile", dest="logfile", default="project.log", help="log file (default project.log)")
parser.add_option("-v", "--loglevel", dest="loglevel", default="debug", help="logging level (debug, info, error)")
parser.add_option("-q", "--quiet", action="store_true", dest="quiet", help="do not log to console")
parser.add_option("-n", "--filequiet", action="store_true", dest="fquiet", help="do not log to file")
parser.add_option("-c", "--clean", dest="clean", action="store_true", default=False, help="remove old log file")
# Process command line options
(options, args) = parser.parse_args(argv)
# Setup logger format and output locations
log = Logging.initialize_logging(options)
# Examples
log.error("This is an error message.")
log.info("This is an info message.")
log.debug("This is a debug message.")
if __name__ == "__main__":
test1()
#test2(['-m', 'test', '-l', '/tmp/', '-c', '-n', '-f', 'log_test.log'])
| mit | 4,871,048,822,783,755,000 | 39.236181 | 124 | 0.517922 | false |
adcomp/super-fruit-pie | tuto/04_platform.py | 1 | 5701 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# David Art <[email protected]>
# Program Arcade Games With Python And Pygame - Build a Platformer
# http://programarcadegames.com
import pygame
import random
# Define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
YELLOW = (255, 255, 0)
WIDTH, HEIGHT = 640, 480
# This class represents the platform we jump on
class Platform (pygame.sprite.Sprite):
def __init__(self, color, width, height):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([width, height])
self.image.fill(color)
self.rect = self.image.get_rect()
# This class represents the bar at the bottom that the player controls
class Player(pygame.sprite.Sprite):
# -- Attributes
# Set speed vector of player
change_x = 0
change_y = 0
# Set to true if it is ok to jump
jump_ok = True
# Count of frames since the player
# collided against something. Used to prevent jumping
# when we haven't hit anything.
frame_since_collision = 0
# -- Methods
# Constructor function
def __init__(self, x, y):
# Call the parent's constructor
pygame.sprite.Sprite.__init__(self)
# Set height, width
self.image = pygame.Surface([16, 16])
self.image.fill(YELLOW)
# Make our top-left corner the passed-in location.
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
# Find a new position for the player
def update(self,blocks):
# Move left/right
self.rect.x += self.change_x
# See if we hit anything
block_hit_list = pygame.sprite.spritecollide(self, blocks, False)
for block in block_hit_list:
# If we are moving right, set our right side to the left side of the item we hit
if self.change_x > 0:
self.rect.right = block.rect.left
else:
# Otherwise if we are moving left, do the opposite.
self.rect.left = block.rect.right
# Move up/down
self.rect.y += self.change_y
# Check and see if we hit anything
block_hit_list = pygame.sprite.spritecollide(self, blocks, False)
for block in block_hit_list:
# We hit something below us. Set the boolean to flag that we can jump
if self.change_y > 0:
self.jump_ok = True
# Keep track of the last time we hit something
self.frame_since_collision = 0
# Reset our position based on the top/bottom of the object.
if self.change_y > 0:
self.rect.bottom = block.rect.top
else:
self.rect.top = block.rect.bottom
# Stop our vertical movement
self.change_y = 0
# If we haven't hit anything in a while, allow us jump
if self.frame_since_collision > 3:
self.jump_ok = False
# Increment frame counter
self.frame_since_collision += 1
# Calculate effect of gravity.
def calc_grav(self):
self.change_y += .35
# See if we are on the ground.
if self.rect.y >= HEIGHT-16 and self.change_y >= 0:
self.change_y = 0
self.rect.y = HEIGHT-16
self.frame_since_collision = 0
self.jump_ok = True
# Called when user hits 'jump' button
def jump(self,blocks):
# If it is ok to jump, set our speed upwards
if self.jump_ok:
self.change_y = -8
# Create platforms
def create_level1(block_list,all_sprites_list):
# 7 blocks
for i in range(4):
block = Platform(WHITE, 128, 16)
# Set x and y based on block number
block.rect.x = 64 + 128 * i
block.rect.y = 160 + 80 * i
block_list.add(block)
all_sprites_list.add(block)
# Initialize the window
pygame.init()
# Set the height and width of the screen
screen = pygame.display.set_mode([WIDTH, HEIGHT])
pygame.display.set_caption("RaspJam")
# Main program, create the blocks
block_list = pygame.sprite.Group()
all_sprites_list = pygame.sprite.Group()
create_level1(block_list,all_sprites_list)
player = Player(20, 15)
player.rect.x = 240
player.rect.y = 0
all_sprites_list.add(player)
#Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# -------- Main Program Loop -----------
while not done:
# --- Event Processing
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
player.change_x = -6
if event.key == pygame.K_RIGHT:
player.change_x = 6
if event.key == pygame.K_SPACE:
player.jump(block_list)
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT:
player.change_x = 0
if event.key == pygame.K_RIGHT:
player.change_x = 0
# --- Game Logic
# Wrap player around the screen
if player.rect.x >= WIDTH:
player.rect.x = -15
if player.rect.x <= -16:
player.rect.x = WIDTH
player.calc_grav()
player.update(block_list)
block_list.update()
# --- Draw Frame
screen.fill(BLACK)
all_sprites_list.draw(screen)
pygame.display.flip()
clock.tick(60)
pygame.quit ()
| mit | 1,031,072,940,009,067,400 | 26.083744 | 92 | 0.570251 | false |
jeffbryner/configlib | tests/test_configlib.py | 1 | 1147 |
class TestConfigLib(object):
def setup(self):
self.config_path = 'tests/fixture/fqdnblocklist.conf'
def test_current_behavior(self):
from configlib import getConfig
res = getConfig('mongohost', 'defaultvalue', self.config_path)
assert res == 'mongodb'
def test_option_parser(self):
from configlib import OptionParser
o = OptionParser
assert o is not None
def test_list_returns_as_string(self):
from configlib import getConfig
res = getConfig('foo', 'zab,za', self.config_path)
print(res)
assert res == 'foo,bar'
assert isinstance(res, str)
def test_failing_syslog_var(self):
from configlib import getConfig
res = getConfig('syslogport', 514, self.config_path)
assert res == 514
def test_boolean_false_var(self):
from configlib import getConfig
res = getConfig('bar', False, self.config_path)
assert res is False
def test_boolean_true_var(self):
from configlib import getConfig
res = getConfig('bar', True, self.config_path)
assert res is True
| mpl-2.0 | -818,607,641,773,670,300 | 30 | 70 | 0.633827 | false |
rahul67/hue | desktop/libs/hadoop/java/scripts/jobtracker_test.py | 38 | 1768 | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append("../if/gen-py")
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
host = "localhost"
port = 9290
from hadoop.api import Jobtracker
from hadoop.api.ttypes import *
#print dir(ThriftTaskPhase)
#exit()
socket = TSocket.TSocket(host,port)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = Jobtracker.Client(protocol)
transport.open()
#print client.trackerName(None,)
#print client.getClusterStatus(None,)
#print dir(client)
#print client.getQueues(None,)
jobs = client.getCompletedJobs(None,)
print jobs
if jobs and len(jobs) > 0:
counters = client.getJobCounters(None, jobs[0].job_id)
for c in counters:
print "--------------------------------------------"
print "CounterGroup:: ", c.displayName
for name in c.counters:
print "Counter '%s':: %s" % (name,c.counters[name].value)
| apache-2.0 | 6,467,295,039,198,972,000 | 33.666667 | 77 | 0.727376 | false |
NRGunby/UCONGA | linalg.py | 1 | 2541 | import math
import numpy
def normalise(a):
'''
Normalises a vector
Accepts: a numpy vector
Returns: a numpy vector pointing in the same direction with magnitude 1
'''
a_norm = numpy.linalg.norm(a)
return numpy.array([float(each)/a_norm for each in a])
def rotation_axis_angle(axis, angle):
'''
Returns the 3x3 matrix for rotation by an angle around an axis
Accepts: an axis as a numpy array, and an angle in radians
Returns: a rotation matrix as a numpy array
'''
sin = math.sin(angle)
cos = math.cos(angle)
comp = 1 - cos
x, y, z = normalise(axis)
mat = numpy.array([[(cos + x*x*comp), (x*y*comp - z*sin), (x*z*comp + y*sin)],
[(y*x*comp + z*sin), (cos + y*y*comp), (y*z*comp - x*sin)],
[(z*x*comp - y*sin), (z*y*comp + x*sin), (cos + z*z*comp)]])
should_be_I = mat.dot(mat.transpose())
I = numpy.ma.identity(3)
numpy.testing.assert_array_almost_equal(I, should_be_I, 3)
return mat
def rotation_from_axes(ax1, ax2): # To test
'''
Calculate the matrix to rotate one vector to another
Accepts: two 3-vectors as numpy arrays
Returns: a rotation matrix as a numpy array
'''
# Probably a more numpy-ish way of doing this
if max(numpy.absolute(ax1 - ax2)) < 1E-7:
return numpy.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
elif max(numpy.absolute(ax1 + ax2)) < 1E-7:
ang = angle_between(ax1, ax2)
z = math.sqrt(1/(1 + (ax1[2]/ax1[1])**2))
y = math.sqrt(1 - z**2)
rot_ax = numpy.array([0, y, z])
return rotation_axis_angle(rot_ax, ang)
else:
ang = angle_between(ax1, ax2)
rot_ax = numpy.cross(ax1, ax2)
return rotation_axis_angle(rot_ax, ang)
def angle_between(vec1, vec2):
'''
Calculate the angle between two vectors
Accepts: two vectors as numpy arrays
Returns: the angle in radians
'''
return math.acos(float(vec1.dot(vec2)) /
(numpy.linalg.norm(vec1) * numpy.linalg.norm(vec2)))
def reflection_plane(vec1, vec2):
'''
Returns the Householder reflection matrix for reflection through
a plane
Accepts: two non-parallel vectors in the plane as numpy arrays
Returns: the 3x3 reflection matrix as a numpy array
'''
norm = numpy.cross(vec1, vec2)
a, b, c = normalise(norm)
return numpy.array([[1 - 2*a*a, -2*a*b, -2*a*c],
[-2*a*b, 1-2*b*b, -2*b*c],
[-2*a*c, -2*b*c, 1-2*c*c]])
| bsd-3-clause | 1,874,089,720,291,955,500 | 32.434211 | 83 | 0.583628 | false |
simzacks/jjb | jenkins_jobs/modules/project_workflow.py | 3 | 2543 | # -*- coding: utf-8 -*-
# Copyright (C) 2015 David Caro <[email protected]>
#
# Based on jenkins_jobs/modules/project_flow.py by
# Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The workflow Project module handles creating Jenkins workflow projects.
You may specify ``workflow`` in the ``project-type`` attribute of
the :ref:`Job` definition.
For now only inline scripts are supported.
Requires the Jenkins :jenkins-wiki:`Workflow Plugin <Workflow+Plugin>`.
In order to use it for job-template you have to escape the curly braces by
doubling them in the DSL: { -> {{ , otherwise it will be interpreted by the
python str.format() command.
:Job Parameters:
* **dsl** (`str`): The DSL content.
* **sandbox** (`bool`): If the script should run in a sandbox (default
false)
Job example:
.. literalinclude::
/../../tests/yamlparser/fixtures/project_workflow_template001.yaml
Job template example:
.. literalinclude::
/../../tests/yamlparser/fixtures/project_workflow_template002.yaml
"""
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
from jenkins_jobs.errors import MissingAttributeError
class Workflow(jenkins_jobs.modules.base.Base):
sequence = 0
def root_xml(self, data):
xml_parent = XML.Element('flow-definition',
{'plugin': 'workflow-job'})
xml_definition = XML.SubElement(xml_parent, 'definition',
{'plugin': 'workflow-cps',
'class': 'org.jenkinsci.plugins.'
'workflow.cps.CpsFlowDefinition'})
try:
XML.SubElement(xml_definition, 'script').text = data['dsl']
except KeyError as e:
raise MissingAttributeError(e.arg[0])
needs_workspace = data.get('sandbox', False)
XML.SubElement(xml_definition, 'sandbox').text = str(
needs_workspace).lower()
return xml_parent
| apache-2.0 | -8,751,261,257,633,136,000 | 33.835616 | 75 | 0.662603 | false |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/newrelic-2.46.0.37/newrelic/core/application.py | 2 | 72779 | """This module implements data recording and reporting for an application.
"""
import logging
import sys
import threading
import time
import os
import traceback
import imp
from functools import partial
import newrelic.packages.six as six
from newrelic.samplers.data_sampler import DataSampler
from newrelic.core.config import global_settings_dump, global_settings
from newrelic.core.data_collector import create_session
from newrelic.network.exceptions import (ForceAgentRestart,
ForceAgentDisconnect, DiscardDataForRequest, RetryDataForRequest)
from newrelic.core.environment import environment_settings
from newrelic.core.rules_engine import RulesEngine, SegmentCollapseEngine
from newrelic.core.stats_engine import StatsEngine, CustomMetrics
from newrelic.core.internal_metrics import (InternalTrace,
InternalTraceContext, internal_metric)
from newrelic.core.xray_session import XraySession
from newrelic.core.profile_sessions import profile_session_manager
from .database_utils import SQLConnections
_logger = logging.getLogger(__name__)
class Application(object):
"""Class which maintains recorded data for a single application.
"""
def __init__(self, app_name, linked_applications=[]):
_logger.debug('Initializing application with name %r and '
'linked applications of %r.', app_name, linked_applications)
self._creation_time = time.time()
self._app_name = app_name
self._linked_applications = sorted(set(linked_applications))
self._process_id = None
self._period_start = 0.0
self._active_session = None
self._harvest_enabled = False
self._transaction_count = 0
self._last_transaction = 0.0
self._global_events_account = 0
self._harvest_count = 0
self._merge_count = 0
self._discard_count = 0
self._agent_restart = 0
self._pending_shutdown = False
self._agent_shutdown = False
self._connected_event = threading.Event()
self._detect_deadlock = False
self._deadlock_event = threading.Event()
self._stats_lock = threading.Lock()
self._stats_engine = StatsEngine()
self._stats_custom_lock = threading.Lock()
self._stats_custom_engine = StatsEngine()
self._agent_commands_lock = threading.Lock()
self._data_samplers_lock = threading.Lock()
# We setup empty rules engines here even though they will be
# replaced when application first registered. This is done to
# avoid a race condition in setting it later. Otherwise we have
# to use unnecessary locking to protect access.
self._rules_engine = { 'url': RulesEngine([]),
'transaction': RulesEngine([]),
'metric': RulesEngine([]),
'segment': SegmentCollapseEngine([])}
self._data_samplers = []
# Thread profiler and state of whether active or not.
#self._thread_profiler = None
#self._profiler_started = False
#self._send_profile_data = False
#self._xray_profiler = None
self.xray_session_running = False
self.profile_manager = profile_session_manager()
# This holds a dictionary of currently active xray sessions.
# key = xray_id
# value = XraySession object
self._active_xrays = {}
@property
def name(self):
return self._app_name
@property
def linked_applications(self):
return self._linked_applications
@property
def configuration(self):
return self._active_session and self._active_session.configuration
@property
def active(self):
return self.configuration is not None
def dump(self, file):
"""Dumps details about the application to the file object."""
print >> file, 'Time Created: %s' % (
time.asctime(time.localtime(self._creation_time)))
print >> file, 'Linked Applications: %r' % (
self._linked_applications)
print >> file, 'Registration PID: %s' % (
self._process_id)
print >> file, 'Harvest Count: %d' % (
self._harvest_count)
print >> file, 'Agent Restart: %d' % (
self._agent_restart)
print >> file, 'Forced Shutdown: %s' % (
self._agent_shutdown)
active_session = self._active_session
if active_session:
print >> file, 'Collector URL: %s' % (
active_session.collector_url)
print >> file, 'Agent Run ID: %d' % (
active_session.agent_run_id)
print >> file, 'URL Normalization Rules: %r' % (
self._rules_engine['url'].rules)
print >> file, 'Metric Normalization Rules: %r' % (
self._rules_engine['metric'].rules)
print >> file, 'Transaction Normalization Rules: %r' % (
self._rules_engine['transaction'].rules)
print >> file, 'Transaction Segment Whitelist Rules: %r' % (
self._rules_engine['segment'].rules)
print >> file, 'Harvest Period Start: %s' % (
time.asctime(time.localtime(self._period_start)))
print >> file, 'Transaction Count: %d' % (
self._transaction_count)
print >> file, 'Last Transaction: %s' % (
time.asctime(time.localtime(self._last_transaction)))
print >> file, 'Global Events Count: %d' % (
self._global_events_account)
print >> file, 'Harvest Metrics Count: %d' % (
self._stats_engine.metrics_count())
print >> file, 'Harvest Merge Count: %d' % (
self._merge_count)
print >> file, 'Harvest Discard Count: %d' % (
self._discard_count)
def activate_session(self, timeout=0.0):
"""Creates a background thread to initiate registration of the
application with the data collector if no active session already
exists. Will wait up to the timeout specified for the session
to be activated.
"""
if self._agent_shutdown:
return
if self._pending_shutdown:
return
if self._active_session:
return
self._process_id = os.getpid()
self._connected_event.clear()
self._deadlock_event.clear()
# If the session is activated when the Python global import lock
# has been acquired by the parent thread then the parent thread
# can potentially deadlock due to lazy imports in code being run
# to activate the session for the application. The deadlock will
# only be broken when the timeout completes at which point the
# activation process will resume. We want to avoid blocking the
# activation process and the parent thread for no good reason,
# so we use an extra event object to try and detect a potential
# deadlock. This works by having the activation thread try and
# explicitly lock the global module import lock. When it can it
# will set the event. If this doesn't occur within our hard
# wired timeout value, we will bail out on assumption that
# timeout has likely occurred.
deadlock_timeout = 0.1
if timeout >= deadlock_timeout:
self._detect_deadlock = True
thread = threading.Thread(target=self.connect_to_data_collector,
name='NR-Activate-Session/%s' % self.name)
thread.setDaemon(True)
thread.start()
if not timeout:
return True
if self._detect_deadlock:
self._deadlock_event.wait(deadlock_timeout)
if not self._deadlock_event.isSet():
_logger.warning('Detected potential deadlock while waiting '
'for activation of session for application %r. '
'Returning after %0.2f seconds rather than waiting. '
'If this problem occurs on every process restart, '
'see the API documentation for proper usage of '
'the newrelic.agent.register_application() function '
'or if necessary report this problem to New Relic '
'support for further investigation.', self._app_name,
deadlock_timeout)
return False
self._connected_event.wait(timeout)
if not self._connected_event.isSet():
_logger.debug('Timeout waiting for activation of session for '
'application %r where timeout was %.02f seconds.',
self._app_name, timeout)
return False
return True
def connect_to_data_collector(self):
"""Performs the actual registration of the application with the
data collector if no current active session.
"""
if self._agent_shutdown:
return
if self._pending_shutdown:
return
if self._active_session:
return
# We perform a short sleep here to ensure that this thread is
# suspended and the main thread gets to run. This is necessary
# for greenlet based systems else this thread would run until
# some other condition occurs which would cause it to yield. If
# that is allowed, then the first request which triggered
# activation of the application would be unduly delayed. A value
# of 10ms seems to work best. If it is made shorter at around
# 1ms, then it doesn't seem to cause a yield and still get a
# delay. So needs to be long enough to ensure a yield but not
# too long to hold off this thread either.
time.sleep(0.01)
# Acquire the Python module import lock and set a flag when we
# have it. This is done to detect the potential for a deadlock
# on the import lock where the code which activated the
# application held an import lock at the time of activation and
# is waiting for registration to complete. This short circuits
# the timeout so the caller isn't blocked for the full wait time
# if this thread was in a deadlock state that prevented it from
# running. Such a deadlock state could occur where subsequent
# code run form this thread performs a deferred module import.
if self._detect_deadlock:
imp.acquire_lock()
self._deadlock_event.set()
imp.release_lock()
# Register the application with the data collector. Any errors
# that occur will be dealt with by create_session(). The result
# will either be a session object or None. In the event of a
# failure to register we will try again, gradually backing off
# for longer and longer periods as we retry. The retry interval
# will be capped at 300 seconds.
active_session = None
retries = [(15, False, False), (15, False, False),
(30, False, False), (60, True, False),
(120, False, False), (300, False, True),]
try:
while not active_session:
if self._agent_shutdown:
return
if self._pending_shutdown:
return
active_session = create_session(None, self._app_name,
self.linked_applications, environment_settings(),
global_settings_dump())
# We were successful, but first need to make sure we do
# not have any problems with the agent normalization
# rules provided by the data collector. These could blow
# up when being compiled if the patterns are broken or
# use text which conflicts with extensions in Python's
# regular expression syntax.
if active_session:
configuration = active_session.configuration
try:
settings = global_settings()
if settings.debug.log_normalization_rules:
_logger.info('The URL normalization rules for '
'%r are %r.', self._app_name,
configuration.url_rules)
_logger.info('The metric normalization rules '
'for %r are %r.', self._app_name,
configuration.metric_name_rules)
_logger.info('The transaction normalization '
'rules for %r are %r.', self._app_name,
configuration.transaction_name_rules)
self._rules_engine['url'] = RulesEngine(
configuration.url_rules)
self._rules_engine['metric'] = RulesEngine(
configuration.metric_name_rules)
self._rules_engine['transaction'] = RulesEngine(
configuration.transaction_name_rules)
self._rules_engine['segment'] = SegmentCollapseEngine(
configuration.transaction_segment_terms)
except Exception:
_logger.exception('The agent normalization rules '
'received from the data collector could not '
'be compiled properly by the agent due to a '
'syntactical error or other problem. Please '
'report this to New Relic support for '
'investigation.')
# For good measure, in this situation we explicitly
# shutdown the session as then the data collector
# will record this. Ignore any error from this. Then
# we discard the session so we go into a retry loop
# on presumption that issue with the URL rules will
# be fixed.
try:
active_session.shutdown_session()
except Exception:
pass
active_session = None
# Were we successful. If not we will sleep for a bit and
# then go back and try again. Log warnings or errors as
# per schedule associated with the retry intervals.
if not active_session:
if retries:
timeout, warning, error = retries.pop(0)
if warning:
_logger.warning('Registration of the application '
'%r with the data collector failed after '
'multiple attempts. Check the prior log '
'entries and remedy any issue as '
'necessary, or if the problem persists, '
'report this problem to New Relic '
'support for further investigation.',
self._app_name)
elif error:
_logger.error('Registration of the application '
'%r with the data collector failed after '
'further additional attempts. Please '
'report this problem to New Relic support '
'for further investigation.',
self._app_name)
else:
timeout = 300
_logger.debug('Retrying registration of the application '
'%r with the data collector after a further %d '
'seconds.', self._app_name, timeout)
time.sleep(timeout)
# We were successful. Ensure we have cleared out any cached
# data from a prior agent run for this application.
configuration = active_session.configuration
with self._stats_lock:
self._stats_engine.reset_stats(configuration)
with self._stats_custom_lock:
self._stats_custom_engine.reset_stats(configuration)
# Record an initial start time for the reporting period and
# clear record of last transaction processed.
self._period_start = time.time()
self._transaction_count = 0
self._last_transaction = 0.0
self._global_events_account = 0
# Clear any prior count of harvest merges due to failures.
self._merge_count = 0
# Update the active session in this object. This will the
# recording of transactions to start.
self._active_session = active_session
# Enable the ability to perform a harvest. This is okay to
# do at this point as the processing of agent commands and
# starting of data samplers are protected by their own locks.
self._harvest_enabled = True
# Flag that the session activation has completed to
# anyone who has been waiting through calling the
# wait_for_session_activation() method.
self._connected_event.set()
# Immediately fetch any agent commands now even before start
# recording any transactions so running of any X-Ray
# sessions is not delayed. This could fail due to issues
# talking to the data collector or if we get back bad data.
# We need to make sure we ignore such errors and still allow
# the registration to be finalised so that agent still
# start up and collect metrics. Any X-Ray sessions will be
# picked up in subsequent harvest.
try:
self.process_agent_commands()
except RetryDataForRequest:
# Ignore any error connecting to the data collector at
# this point as trying to get agent commands at this
# point is an optimisation and not a big issue if it fails.
# Transient issues with the data collector for this will
# just cause noise in the agent logs and worry users. An
# ongoing connection issue will be picked properly with
# the subsequent data harvest.
pass
except Exception:
if not self._agent_shutdown and not self._pending_shutdown:
_logger.exception('Unexpected exception when processing '
'agent commands when registering agent with the '
'data collector. If this problem persists, please '
'report this problem to New Relic support for '
'further investigation.')
else:
raise
# Start any data samplers so they are aware of the start of
# the harvest period.
self.start_data_samplers()
except Exception:
# If an exception occurs after agent has been flagged to be
# shutdown then we ignore the error. This is because all
# sorts of wierd errors could occur when main thread start
# destroying objects and this background thread to register
# the application is still running.
if not self._agent_shutdown and not self._pending_shutdown:
_logger.exception('Unexpected exception when registering '
'agent with the data collector. If this problem '
'persists, please report this problem to New Relic '
'support for further investigation.')
self._active_session.close_connection()
def validate_process(self):
"""Logs a warning message if called in a process different to
where the application was registered. Only logs a message the
first time this is detected for current active session.
"""
process_id = os.getpid()
# Detect where potentially trying to record any data in a
# process different to where the harvest thread was created.
# Note that this only works for the case where a section had
# been activated prior to the process being forked.
if self._process_id and process_id != self._process_id:
_logger.warning('Attempt to reactivate application or record '
'transactions in a process different to where the '
'agent was already registered for application %r. No '
'data will be reported for this process with pid of '
'%d. Registration of the agent for this application '
'occurred in process with pid %d. If no data at all '
'is being reported for your application, then please '
'report this problem to New Relic support for further '
'investigation.', self._app_name, process_id,
self._process_id)
settings = global_settings()
if settings.debug.log_agent_initialization:
_logger.info('Process validation check was triggered '
'from: %r', ''.join(traceback.format_stack()[:-1]))
else:
_logger.debug('Process validation check was triggered '
'from: %r', ''.join(traceback.format_stack()[:-1]))
# We now zero out the process ID so we know we have already
# generated a warning message.
self._process_id = 0
def normalize_name(self, name, rule_type):
"""Applies the agent normalization rules of the the specified
rule type to the supplied name.
"""
if not self._active_session:
return name, False
try:
return self._rules_engine[rule_type].normalize(name)
except Exception:
# In the event that the rules engine blows up because of a
# problem in the rules supplied by the data collector, we
# log the exception and otherwise return the original.
#
# NOTE This has the potential to cause metric grouping
# issues, but we should not be getting broken rules to begin
# with if they are validated properly when entered or
# generated. We could perhaps instead flag that the
# transaction be ignored and thus not reported.
_logger.exception('The application of the normalization '
'rules for %r has failed. This can indicate '
'a problem with the agent rules supplied by the '
'data collector. Please report this problem to New '
'Relic support for further investigation.', name)
return name, False
def register_data_source(self, source, name, settings, **properties):
"""Create a data sampler corresponding to the data source
for this application.
"""
_logger.debug('Register data source %r against application where '
'application=%r, name=%r, settings=%r and properties=%r.',
source, self._app_name, name, settings, properties)
self._data_samplers.append(DataSampler(self._app_name, source,
name, settings, **properties))
def start_data_samplers(self):
"""Starts any data samplers. This will be called when the
application has been successfully registered and monitoring of
transactions commences.
"""
with self._data_samplers_lock:
_logger.debug('Starting data samplers for application %r.',
self._app_name)
for data_sampler in self._data_samplers:
try:
_logger.debug('Starting data sampler for %r in '
'application %r.', data_sampler.name,
self._app_name)
data_sampler.start()
except Exception:
_logger.exception('Unexpected exception when starting '
'data source %r. Custom metrics from this data '
'source may not be subsequently available. If '
'this problem persists, please report this '
'problem to the provider of the data source.',
data_sampler.name)
def stop_data_samplers(self):
"""Stop any data samplers. This will be called when the active
session is terminated due to a harvest reporting error or process
shutdown.
"""
with self._data_samplers_lock:
_logger.debug('Stopping data samplers for application %r.',
self._app_name)
for data_sampler in self._data_samplers:
try:
_logger.debug('Stopping data sampler for %r in '
'application %r.', data_sampler.name,
self._app_name)
data_sampler.stop()
except Exception:
_logger.exception('Unexpected exception when stopping '
'data source %r Custom metrics from this data '
'source may not be subsequently available. If '
'this problem persists, please report this '
'problem to the provider of the data source.',
data_sampler.name)
def record_exception(self, exc=None, value=None, tb=None, params={},
ignore_errors=[]):
"""Record a global exception against the application independent
of a specific transaction.
"""
if not self._active_session:
return
with self._stats_lock:
# It may still actually be rejected if no exception
# supplied or if was in the ignored list. For now
# always attempt anyway and also increment the events
# count still so that short harvest is extended.
self._global_events_account += 1
self._stats_engine.record_exception(exc, value, tb,
params, ignore_errors)
def record_custom_metric(self, name, value):
"""Record a custom metric against the application independent
of a specific transaction.
NOTE that this will require locking of the stats engine for
custom metrics and so under heavy use will have performance
issues. It is better to record the custom metric against an
active transaction as they will then be aggregated at the end of
the transaction when all other metrics are aggregated and so no
additional locking will be required.
"""
if not self._active_session:
return
with self._stats_custom_lock:
self._global_events_account += 1
self._stats_custom_engine.record_custom_metric(name, value)
def record_custom_metrics(self, metrics):
"""Record a set of custom metrics against the application
independent of a specific transaction.
NOTE that this will require locking of the stats engine for
custom metrics and so under heavy use will have performance
issues. It is better to record the custom metric against an
active transaction as they will then be aggregated at the end of
the transaction when all other metrics are aggregated and so no
additional locking will be required.
"""
if not self._active_session:
return
with self._stats_custom_lock:
for name, value in metrics:
self._global_events_account += 1
self._stats_custom_engine.record_custom_metric(name, value)
def record_transaction(self, data, profile_samples=None):
"""Record a single transaction against this application."""
if not self._active_session:
return
settings = self._stats_engine.settings
if settings is None:
return
# Validate that the transaction was started against the same
# agent run ID as we are now recording data for. They might be
# different where a long running transaction covered multiple
# agent runs due to a server side configuration change.
if settings.agent_run_id != data.settings.agent_run_id:
_logger.debug('Discard transaction for application %r as '
'runs over multiple agent runs. Initial agent run ID '
'is %r and the current agent run ID is %r.',
self._app_name, data.settings.agent_run_id,
settings.agent_run_id)
return
# Do checks to see whether trying to record a transaction in a
# different process to that the application was activated in.
self.validate_process()
internal_metrics = CustomMetrics()
with InternalTraceContext(internal_metrics):
try:
# We accumulate stats into a workarea and only then merge it
# into the main one under a thread lock. Do this to ensure
# that the process of generating the metrics into the stats
# don't unecessarily lock out another thread.
stats = self._stats_engine.create_workarea()
stats.record_transaction(data)
except Exception:
_logger.exception('The generation of transaction data has '
'failed. This would indicate some sort of internal '
'implementation issue with the agent. Please report '
'this problem to New Relic support for further '
'investigation.')
if settings.debug.record_transaction_failure:
raise
if (profile_samples and (data.path in
self._stats_engine.xray_sessions or
'WebTransaction/Agent/__profiler__' in
self._stats_engine.xray_sessions)):
try:
background_task, samples = profile_samples
internal_metric('Supportability/Profiling/Counts/'
'stack_traces[sample]', len(samples))
tr_type = 'BACKGROUND' if background_task else 'REQUEST'
if data.path in self._stats_engine.xray_sessions:
self.profile_manager.add_stack_traces(self._app_name,
data.path, tr_type, samples)
if ('WebTransaction/Agent/__profiler__' in
self._stats_engine.xray_sessions):
self.profile_manager.add_stack_traces(self._app_name,
'WebTransaction/Agent/__profiler__', tr_type,
samples)
except Exception:
_logger.exception('Building xray profile tree has failed.'
'This would indicate some sort of internal '
'implementation issue with the agent. Please '
'report this problem to New Relic support for '
'further investigation.')
if settings.debug.record_transaction_failure:
raise
with self._stats_lock:
try:
self._transaction_count += 1
self._last_transaction = data.end_time
internal_metric('Supportability/Transaction/Counts/'
'metric_data', stats.metric_data_count())
self._stats_engine.merge_metric_stats(stats)
self._stats_engine.merge_other_stats(stats)
# We merge the internal statistics here as well even
# though have popped out of the context where we are
# recording. This is okay so long as don't record
# anything else after this point. If we do then that
# data will not be recorded.
self._stats_engine.merge_custom_metrics(
internal_metrics.metrics())
except Exception:
_logger.exception('The merging of transaction data has '
'failed. This would indicate some sort of '
'internal implementation issue with the agent. '
'Please report this problem to New Relic support '
'for further investigation.')
if settings.debug.record_transaction_failure:
raise
def start_xray(self, command_id=0, **kwargs):
"""Handler for agent command 'start_xray_session'. """
if not self._active_session.configuration.xray_session.enabled:
_logger.warning('An xray session was requested '
'for %r but xray sessions are disabled by the current '
'agent configuration. Enable "xray_session.enabled" '
'in the agent configuration.', self._app_name)
return {command_id: {'error': 'The xray sessions are disabled'}}
try:
xray_id = kwargs['x_ray_id']
name = kwargs['key_transaction_name']
duration_s = kwargs.get('duration', 864000) # 86400s = 24hrs
max_traces = kwargs.get('requested_trace_count', 100)
sample_period_s = kwargs.get('sample_period', 0.1)
run_profiler = kwargs.get('run_profiler', True)
except KeyError:
# A KeyError can happen if an xray id was present in
# active_xray_sessions but it was cancelled by the user before the
# agent could get it's metadata.
_logger.warning('An xray session was requested but appropriate '
'parameters were not provided. Report this error '
'to New Relic support for further investigation. '
'Provided Params: %r', kwargs)
return {command_id: {'error': 'The xray sessions error'}}
stop_time_s = self._period_start + duration_s
# Check whether we are already running an xray session for this
# xray id and ignore the subsequent request if we are.
if self._active_xrays.get(xray_id) is not None:
_logger.warning('An xray session was requested for %r but '
'an xray session for the requested key transaction '
'%r with ID of %r is already in progress. Ignoring '
'the subsequent request. This can happen, but if it '
'keeps occurring on a regular basis, please report '
'this problem to New Relic support for further '
'investigation.', self._app_name, name, xray_id)
return {command_id: {'error': 'Xray session already running.'}}
# Check whether we have an xray session running for the same key
# transaction, we should only ever have one. If already have one
# and the existing one has an ID which indicates it is older, then
# stop the existing one so we can replace it with the newer one.
# Otherwise allow the existing one to stand and ignore the new one.
xs = self._stats_engine.xray_sessions.get(name)
if xs:
if xs.xray_id < xray_id:
_logger.warning('An xray session was requested for %r but '
'an xray session with id %r for the requested key '
'transaction %r is already in progress. Replacing '
'the existing older xray session with the newer xray '
'session with id %r. This can happen occassionally. '
'But if it keeps occurring on a regular basis, '
'please report this problem to New Relic support '
'for further investigation.', self._app_name,
xs.xray_id, name, xray_id)
self.stop_xray(x_ray_id=xs.xray_id,
key_transaction_name=xs.key_txn)
else:
_logger.warning('An xray session was requested for %r but '
'a newer xray session with id %r for the requested '
'key transaction %r is already in progress. Ignoring '
'the older xray session request with id %r. This can '
'happen occassionally. But if it keeps occurring '
'on a regular basis, please report this problem '
'to New Relic support for further investigation.',
self._app_name, xs.xray_id, name, xray_id)
return {command_id: {'error': 'Xray session already running.'}}
xs = XraySession(xray_id, name, stop_time_s, max_traces,
sample_period_s)
# Add it to the currently active xrays.
self._active_xrays[xray_id] = xs
# Add it to the stats engine to start recording Xray TTs.
self._stats_engine.xray_sessions[name] = xs
# Start the xray profiler only if requested by collector.
if run_profiler:
profiler_status = self.profile_manager.start_profile_session(
self._app_name, -1, stop_time_s, sample_period_s, False,
name, xray_id)
_logger.info('Starting an xray session for %r. '
'duration:%d mins name:%s xray_id:%d', self._app_name,
duration_s/60, name, xray_id)
return {command_id: {}}
def stop_xray(self, command_id=0, **kwargs):
"""Handler for agent command 'stop_xray_session'. This command
is sent by collector under two conditions:
1. When there are enough traces for the xray session.
2. When the user cancels an xray session in progress.
"""
try:
xray_id = kwargs['x_ray_id']
name = kwargs['key_transaction_name']
except KeyError:
_logger.warning('A stop xray was requested but appropriate '
'parameters were not provided. Report this error '
'to New Relic support for further investigation. '
'Provided Params: %r', kwargs)
return {command_id: {'error': 'Xray session stop error.'}}
# An xray session is deemed as already_running if the xray_id is
# already present in the self._active_xrays or the key txn is already
# tracked in the stats_engine.xray_sessions.
already_running = self._active_xrays.get(xray_id) is not None
already_running |= (self._stats_engine.xray_sessions.get(name) is not
None)
if not already_running:
_logger.warning('A request was received to stop xray '
'session %d for %r, but the xray session is not running. '
'If this keeps occurring on a regular basis, please '
'report this problem to New Relic support for further '
'investigation.', xray_id, self._app_name)
return {command_id: {'error': 'Xray session not running.'}}
try:
xs = self._active_xrays.pop(xray_id)
self._stats_engine.xray_sessions.pop(xs.key_txn)
except KeyError:
pass
# We are deliberately ignoring the return value from
# stop_profile_session because there is a chance that the profiler has
# stopped automatically after the alloted duration has elapsed before
# the collector got a chance to issue the stop_xray command. We don't
# want to raise an alarm for that scenario.
_logger.info('Stopping xray session %d for %r', xray_id,
self._app_name)
self.profile_manager.stop_profile_session(self._app_name, xs.key_txn)
return {command_id: {}}
def cmd_active_xray_sessions(self, command_id=0, **kwargs):
"""Receives a list of xray_ids that are currently active in the
datacollector.
"""
# If xray_sessions are disabled in the config file, just ignore the
# active_xray_sessions command and proceed.
#
# This can happen if one of the hosts has the xrays disabled but the
# other hosts are running x-rays.
if not self._active_session.configuration.xray_session.enabled:
return None
# Create a set from the xray_ids received from the collector.
collector_xray_ids = set(kwargs['xray_ids'])
_logger.debug('X Ray sessions expected to be running for %r are '
'%r.', self._app_name, collector_xray_ids)
# Create a set from the xray_ids currently active in the agent.
agent_xray_ids = set(self._active_xrays)
_logger.debug('X Ray sessions actually running for %r are '
'%r.', self._app_name, agent_xray_ids)
# Result of the (agent_xray_ids - collector_xray_ids) will be
# the ids that are not active in collector but are still active
# in the agent. These xray sessions must be stopped.
stopped_xrays = agent_xray_ids - collector_xray_ids
_logger.debug('X Ray sessions to be stopped for %r are '
'%r.', self._app_name, stopped_xrays)
for xray_id in stopped_xrays:
xs = self._active_xrays.get(xray_id)
self.stop_xray(x_ray_id=xs.xray_id,
key_transaction_name=xs.key_txn)
# Result of the (collector_xray_ids - agent_xray_ids) will be
# the ids that are new xray sessions created in the collector
# but are not yet activated in the agent. Agent will contact the
# collector with each xray_id and ask for it's metadata, then
# start the corresponding xray_sessions. Note that we sort the
# list of IDs and give precedence to larger value, which should
# be newer. Do this just in case the data collector is tardy
# in flushing out a complete one and UI has allowed a new one
# to be created and so have multiple for same key transaction.
new_xrays = sorted(collector_xray_ids - agent_xray_ids, reverse=True)
_logger.debug('X Ray sessions to be started for %r are '
'%r.', self._app_name, new_xrays)
for xray_id in new_xrays:
metadata = self._active_session.get_xray_metadata(xray_id)
if not metadata:
# We may get empty meta data if the xray session had
# completed or been deleted just prior to requesting
# the meta data.
_logger.debug('Meta data for xray session with id %r '
'of %r is empty, ignore it.', xray_id, self._app_name)
else:
self.start_xray(0, **metadata[0])
# Note that 'active_xray_sessions' does NOT need to send an
# acknowledgement back to the collector
return None
def cmd_start_profiler(self, command_id=0, **kwargs):
"""Triggered by the start_profiler agent command to start a
thread profiling session.
"""
if not self._active_session.configuration.thread_profiler.enabled:
_logger.warning('A thread profiling session was requested '
'for %r but thread profiling is disabled by the current '
'agent configuration. Enable "thread_profiler.enabled" '
'in the agent configuration.', self._app_name)
return {command_id: {'error': 'The profiler service is disabled'}}
profile_id = kwargs['profile_id']
sample_period = kwargs['sample_period']
duration_s = kwargs['duration']
profile_agent_code = kwargs['profile_agent_code']
stop_time_s = self._period_start + duration_s
if not hasattr(sys, '_current_frames'):
_logger.warning('A thread profiling session was requested for '
'%r but thread profiling is not supported for the '
'Python interpreter being used. Contact New Relic '
'support for additional information about supported '
'platforms for the thread profiling feature.',
self._app_name)
return {command_id: {'error': 'Profiler not supported'}}
# ProfilerManager will only allow one generic thread profiler to be
# active at any given time. So if a user has multiple applications and
# tries to start an thread profiler from both of them, then it will
# fail and log an error message. The thread profiler will report on all
# threads in the process and not just those handling transactions
# related to the specific application.
success = self.profile_manager.start_profile_session(self._app_name,
profile_id, stop_time_s, sample_period, profile_agent_code)
if not success:
_logger.warning('A thread profiling session was requested for '
'%r but a thread profiling session is already in '
'progress. Ignoring the subsequent request. '
'If this keeps occurring on a regular basis, please '
'report this problem to New Relic support for further '
'investigation.', self._app_name)
return {command_id: {'error': 'Profiler already running'}}
_logger.info('Starting thread profiling session for %r.',
self._app_name)
return {command_id: {}}
def cmd_stop_profiler(self, command_id=0, **kwargs):
"""Triggered by the stop_profiler agent command to forcibly stop
a thread profiling session prior to it having completed normally.
"""
fps = self.profile_manager.full_profile_session
if fps is None:
_logger.warning('A request was received to stop a thread '
'profiling session for %r, but a thread profiling '
'session is not running. If this keeps occurring on '
'a regular basis, please report this problem to New '
'Relic support for further investigation.',
self._app_name)
return {command_id: {'error': 'Profiler not running.'}}
elif kwargs['profile_id'] != fps.profile_id:
_logger.warning('A request was received to stop a thread '
'profiling session for %r, but the ID %r for '
'the current thread profiling session does not '
'match the provided ID of %r. If this keeps occurring on '
'a regular basis, please report this problem to New '
'Relic support for further investigation.',
self._app_name, fps.profile_id,
kwargs['profile_id'])
return {command_id: {'error': 'Profiler not running.'}}
_logger.info('Stopping thread profiler session for %r.',
self._app_name)
# To ensure that the thread profiling session stops, we wait for
# its completion. If we don't need to send back the data from
# the thread profiling session, we discard the thread profiler
# immediately.
self.profile_manager.stop_profile_session(self._app_name)
return {command_id: {}}
def harvest(self, shutdown=False):
"""Performs a harvest, reporting aggregated data for the current
reporting period to the data collector.
"""
if self._agent_shutdown:
return
if shutdown:
self._pending_shutdown = True
if not self._active_session or not self._harvest_enabled:
_logger.debug('Cannot perform a data harvest for %r as '
'there is no active session.', self._app_name)
return
internal_metrics = CustomMetrics()
with InternalTraceContext(internal_metrics):
with InternalTrace('Supportability/Harvest/Calls/harvest'):
self._harvest_count += 1
start = time.time()
_logger.debug('Commencing data harvest of %r.',
self._app_name)
# Create a snapshot of the transaction stats and
# application specific custom metrics stats, then merge
# them together. The originals will be reset at the time
# this is done so that any new metrics that come in from
# this point onwards will be accumulated in a fresh
# bucket.
_logger.debug('Snapshotting metrics for harvest of %r.',
self._app_name)
transaction_count = self._transaction_count
global_events_account = self._global_events_account
with self._stats_lock:
self._transaction_count = 0
self._last_transaction = 0.0
stats = self._stats_engine.harvest_snapshot()
with self._stats_custom_lock:
self._global_events_account = 0
stats_custom = self._stats_custom_engine.harvest_snapshot()
stats.merge_metric_stats(stats_custom)
# Now merge in any metrics from the data samplers
# associated with this application.
#
# NOTE If a data sampler has problems then what data was
# collected up to that point is retained. The data
# collector itself is still retained and would be used
# again on future harvest. If it is a persistent problem
# with the data sampler the issue would then reoccur
# with every harvest. If data sampler is a user provided
# data sampler, then should perhaps deregister it if it
# keeps having problems.
_logger.debug('Fetching metrics from data sources for '
'harvest of %r.', self._app_name)
for data_sampler in self._data_samplers:
try:
for sample in data_sampler.metrics():
try:
name, value = sample
stats.record_custom_metric(name, value)
except Exception:
_logger.exception('The merging of custom '
'metric sample %r from data source %r '
'has failed. Validate the format of '
'the sample. If this issue persists '
'then please report this problem to '
'the data source provider or New '
'Relic support for further '
'investigation.', sample,
data_sampler.name)
break
except Exception:
_logger.exception('The merging of custom metric '
'samples from data source %r has failed. '
'Validate that the data source is producing '
'samples correctly. If this issue persists '
'then please report this problem to the data '
'source provider or New Relic support for '
'further investigation.', data_sampler.name)
# Add a metric we can use to track how many harvest
# periods have occurred.
stats.record_custom_metric('Instance/Reporting', 0)
# Create our time stamp as to when this reporting period
# ends and start reporting the data.
period_end = time.time()
# If this harvest is being forcibly triggered on process
# shutdown, there are transactions recorded, and the
# duration of the harvest period is less than 1 second,
# then artificially push out the end time of the harvest
# period. This is done so that the harvest period is not
# less than 1 second, otherwise the data collector will
# throw the data away. This is desirable for case where
# trying to monitor scripts which perform a one off task
# and then immediately exit. Also useful when running
# test scripts.
if shutdown and (transaction_count or global_events_account):
if period_end - self._period_start < 1.0:
_logger.debug('Stretching harvest duration for '
'forced harvest on shutdown.')
period_end = self._period_start + 1.001
try:
# Send the transaction and custom metric data.
configuration = self._active_session.configuration
# Report internal metrics about sample data set
# for analytics.
if (configuration.collect_analytics_events and
configuration.analytics_events.enabled):
if configuration.analytics_events.transactions.enabled:
sampled_data_set = stats.sampled_data_set
internal_metric('Supportability/RequestSampler/'
'requests', sampled_data_set.count)
internal_metric('Supportability/RequestSampler/'
'samples', len(sampled_data_set.samples))
# Create a metric_normalizer based on normalize_name
# If metric rename rules are empty, set normalizer
# to None and the stats engine will skip steps as
# appropriate.
if self._rules_engine['metric'].rules:
metric_normalizer = partial(self.normalize_name,
rule_type='metric')
else:
metric_normalizer = None
# Pass the metric_normalizer to stats.metric_data to
# do metric renaming.
_logger.debug('Normalizing metrics for harvest of %r.',
self._app_name)
metric_data = stats.metric_data(metric_normalizer)
internal_metric('Supportability/Harvest/Counts/'
'metric_data', len(metric_data))
_logger.debug('Sending metric data for harvest of %r.',
self._app_name)
metric_ids = self._active_session.send_metric_data(
self._period_start, period_end, metric_data)
stats.reset_metric_stats()
# Send data set for analytics, which is a combination
# of Synthetic analytic events, and the sampled data
# set of regular requests.
all_analytic_events = []
if len(stats.synthetics_events):
all_analytic_events.extend(stats.synthetics_events)
if (configuration.collect_analytics_events and
configuration.analytics_events.enabled):
if configuration.analytics_events.transactions.enabled:
samples = stats.sampled_data_set.samples
all_analytic_events.extend(samples)
if len(all_analytic_events):
_logger.debug('Sending analytics event data '
'for harvest of %r.', self._app_name)
self._active_session.analytic_event_data(
all_analytic_events)
stats.reset_sampled_data()
stats.reset_synthetics_events()
# Successful, so we update the stats engine with the
# new metric IDs and reset the reporting period
# start time. If an error occurs after this point,
# any remaining data for the period being reported
# on will be thrown away. We reset the count of
# number of merges we have done due to failures as
# only really want to count errors in being able to
# report the main transaction metrics.
self._merge_count = 0
self._period_start = period_end
self._stats_engine.update_metric_ids(metric_ids)
# Send the accumulated error data.
if configuration.collect_errors:
error_data = stats.error_data()
internal_metric('Supportability/Harvest/Counts/'
'error_data', len(error_data))
if error_data:
_logger.debug('Sending error data for harvest '
'of %r.', self._app_name)
self._active_session.send_errors(error_data)
if configuration.collect_traces:
connections = SQLConnections(
configuration.agent_limits.max_sql_connections)
with connections:
if configuration.slow_sql.enabled:
_logger.debug('Processing slow SQL data '
'for harvest of %r.', self._app_name)
slow_sql_data = stats.slow_sql_data(
connections)
internal_metric('Supportability/Harvest/'
'Counts/sql_trace_data',
len(slow_sql_data))
if slow_sql_data:
_logger.debug('Sending slow SQL data for '
'harvest of %r.', self._app_name)
self._active_session.send_sql_traces(
slow_sql_data)
slow_transaction_data = (
stats.transaction_trace_data(
connections))
internal_metric('Supportability/Harvest/Counts/'
'transaction_sample_data',
len(slow_transaction_data))
if slow_transaction_data:
_logger.debug('Sending slow transaction '
'data for harvest of %r.',
self._app_name)
self._active_session.send_transaction_traces(
slow_transaction_data)
# Fetch agent commands sent from the data collector
# and process them.
_logger.debug('Process agent commands during '
'harvest of %r.', self._app_name)
self.process_agent_commands()
# Send the accumulated profile data back to the data
# collector. Note that this come after we process
# the agent commands as we might receive an agent
# command to stop the profiling session, but still
# send the data back. Having the sending of the
# results last ensures we send back that data from
# the stopped profiling session immediately.
_logger.debug('Send profiling data for harvest of '
'%r.', self._app_name)
self.report_profile_data()
_logger.debug('Done sending data for harvest of '
'%r.', self._app_name)
# If this is a final forced harvest for the process
# then attempt to shutdown the session.
if shutdown:
self.internal_agent_shutdown(restart=False)
except ForceAgentRestart:
# The data collector has indicated that we need to
# perform an internal agent restart. We attempt to
# properly shutdown the session and then initiate a
# new session.
self.internal_agent_shutdown(restart=True)
except ForceAgentDisconnect:
# The data collector has indicated that we need to
# force disconnect and stop reporting. We attempt to
# properly shutdown the session, but don't start a
# new one and flag ourselves as shutdown. This
# notification is presumably sent when a specific
# application is behaving so badly that it needs to
# be stopped entirely. It would require a complete
# process start to be able to attempt to connect
# again and if the server side kill switch is still
# enabled it would be told to disconnect once more.
self.internal_agent_shutdown(restart=False)
except RetryDataForRequest:
# A potentially recoverable error occurred. We merge
# the stats back into that for the current period
# and abort the current harvest if the problem
# occurred when initially reporting the main
# transaction metrics. If the problem occurred when
# reporting other information then that and any
# other non reported information is thrown away.
#
# In order to prevent memory growth will we only
# merge data up to a set maximum number of
# successive times. When this occurs we throw away
# all the metric data and start over. We also only
# merge main metric data and discard errors, slow
# SQL and transaction traces from older harvest
# period.
if self._period_start != period_end:
self._merge_count += 1
agent_limits = configuration.agent_limits
maximum = agent_limits.merge_stats_maximum
if self._merge_count <= maximum:
self._stats_engine.merge_metric_stats(
stats, rollback=True)
# Only merge back sampled data at present.
self._stats_engine.merge_other_stats(stats,
merge_traces=False, merge_errors=False,
merge_sql=False, merge_samples=True,
merge_synthetics_events = True,
rollback=True)
else:
_logger.error('Unable to report main transaction '
'metrics after %r successive attempts. '
'Check the log messages and if necessary '
'please report this problem to New Relic '
'support for further investigation.',
maximum)
self._discard_count += self._merge_count
self._merge_count = 0
# Force an agent restart ourselves.
_logger.debug('Abandoning agent run and forcing '
'a reconnect of the agent.')
self.internal_agent_shutdown(restart=True)
except DiscardDataForRequest:
# An issue must have occurred in reporting the data
# but if we retry with same data the same error is
# likely to occur again so we just throw any data
# not sent away for this reporting period.
self._discard_count += 1
except Exception:
# An unexpected error, likely some sort of internal
# agent implementation issue.
_logger.exception('Unexpected exception when attempting '
'to harvest the metric data and send it to the '
'data collector. Please report this problem to '
'New Relic support for further investigation.')
duration = time.time() - start
_logger.debug('Completed harvest for %r in %.2f seconds.',
self._app_name, duration)
# Force close the socket connection which has been
# created for this harvest if session still exists.
# New connection will be create automatically on the
# next harvest.
if self._active_session:
self._active_session.close_connection()
# Merge back in statistics recorded about the last harvest
# and communication with the data collector. This will be
# part of the data for the next harvest period.
with self._stats_lock:
self._stats_engine.merge_custom_metrics(internal_metrics.metrics())
def report_profile_data(self):
"""Report back any profile data. This may be partial thread
profile data for X-Ray sessions, or final thread profile data
for a full profiling session.
"""
for profile_data in self.profile_manager.profile_data(self._app_name):
if profile_data:
_logger.debug('Reporting thread profiling session data '
'for %r.', self._app_name)
self._active_session.send_profile_data(profile_data)
def internal_agent_shutdown(self, restart=False):
"""Terminates the active agent session for this application and
optionally triggers activation of a new session.
"""
# We need to stop any thread profiler session related to this
# application. This may be a full thread profiling session or
# one run in relation to active X-Ray sessions. We also need to
# throw away any X-Ray sessions. These will be restarted as
# necessary after a reconnect if done.
self.profile_manager.shutdown(self._app_name)
self._active_xrays = {}
# Attempt to report back any profile data which was left when
# all profiling was shutdown due to the agent shutdown for this
# application.
try:
self.report_profile_data()
except Exception:
pass
# Stop any data samplers which are running. These can be internal
# data samplers or user provided custom metric data sources.
self.stop_data_samplers()
# Now shutdown the actual agent session.
try:
self._active_session.shutdown_session()
except Exception:
pass
self._active_session.close_connection()
self._active_session = None
self._harvest_enabled = False
# Initiate a new session if required, otherwise mark the agent
# as shutdown.
if restart:
self._agent_restart += 1
self.activate_session()
else:
self._agent_shutdown = True
def process_agent_commands(self):
"""Fetches agents commands from data collector and process them.
"""
# We use a lock around this as this will be called just after
# having registered the agent, as well as during the normal
# harvest period. We want to avoid a problem if the process is
# being shutdown and a forced harvest was triggered while still
# doing the initial attempt to get the agent commands.
with self._agent_commands_lock:
# Get agent commands from the data collector.
_logger.debug('Process agent commands for %r.', self._app_name)
agent_commands = self._active_session.get_agent_commands()
# Extract the command names from the agent_commands. This is
# used to check for the presence of active_xray_sessions command
# in the list.
cmd_names = [x[1]['name'] for x in agent_commands]
no_xray_cmds = 'active_xray_sessions' not in cmd_names
# If there are active xray sessions running but the agent
# commands doesn't have any active_xray_session ids then all the
# active xray sessions must be stopped.
if self._active_xrays and no_xray_cmds:
_logger.debug('Stopping all X Ray sessions for %r. '
'Current sessions running are %r.', self._app_name,
list(six.iterkeys(self._active_xrays)))
for xs in list(six.itervalues(self._active_xrays)):
self.stop_xray(x_ray_id=xs.xray_id,
key_transaction_name=xs.key_txn)
# For each agent command received, call the appropiate agent
# command handler. Reply to the data collector with the
# acknowledgement of the agent command.
for command in agent_commands:
cmd_id = command[0]
cmd_name = command[1]['name']
cmd_args = command[1]['arguments']
# An agent command is mapped to a method of this class. If
# we don't know about a specific agent command we just
# ignore it.
func_name = 'cmd_%s' % cmd_name
cmd_handler = getattr(self, func_name, None)
if cmd_handler is None:
_logger.debug('Received unknown agent command '
'%r from the data collector for %r.',
cmd_name, self._app_name)
continue
_logger.debug('Process agent command %r from the data '
'collector for %r.', cmd_name, self._app_name)
cmd_res = cmd_handler(cmd_id, **cmd_args)
# Send back any result for the agent command.
if cmd_res:
self._active_session.send_agent_command_results(cmd_res)
| agpl-3.0 | -6,046,307,316,096,256,000 | 42.141079 | 79 | 0.549719 | false |
2014c2g1/c2g1 | exts/exts/sphinxcontrib/bibtex/latex_lexer.py | 38 | 12344 | # -*- coding: utf-8 -*-
"""
Simple incremental latex lexer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import codecs
import collections
import re
class Token(collections.namedtuple("Token", "name text")):
"""Stores information about a matched token."""
__slots__ = () # efficiency
def __new__(cls, name=None, text=None):
return tuple.__new__(
cls,
(name if name is not None else 'unknown',
text if text is not None else b''))
def __nonzero__(self):
return bool(self.text)
def __len__(self):
return len(self.text)
def decode(self, encoding):
if self.name == 'control_word':
return self.text.decode(encoding) + u' '
else:
return self.text.decode(encoding)
# implementation note: we derive from IncrementalDecoder because this
# class serves excellently as a base class for incremental decoders,
# but of course we don't decode yet until later
class LatexLexer(codecs.IncrementalDecoder):
"""A very simple lexer for tex/latex code."""
# implementation note: every token **must** be decodable by inputenc
tokens = [
# comment: for ease, and for speed, we handle it as a token
('comment', br'%.*?\n'),
# control tokens
# in latex, some control tokens skip following whitespace
# ('control-word' and 'control-symbol')
# others do not ('control-symbol-x')
# XXX TBT says no control symbols skip whitespace (except '\ ')
# XXX but tests reveal otherwise?
('control_word', br'[\\][a-zA-Z]+'),
('control_symbol', br'[\\][~' br"'" br'"` =^!]'),
('control_symbol_x', br'[\\][^a-zA-Z]'), # TODO should only match ascii
# parameter tokens
# also support a lone hash so we can lex things like b'#a'
('parameter', br'\#[0-9]|\#'),
# any remaining characters; for ease we also handle space and
# newline as tokens
('space', br' '),
('newline', br'\n'),
('mathshift', br'[$]'),
# note: some chars joined together to make it easier to detect
# symbols that have a special function (i.e. --, ---, etc.)
('chars',
br'---|--|-|[`][`]'
br"|['][']"
br'|[?][`]|[!][`]'
# separate chars because brackets are optional
# e.g. fran\\c cais = fran\\c{c}ais in latex
# so only way to detect \\c acting on c only is this way
br'|[0-9a-zA-Z{}]'
# we have to join everything else together to support
# multibyte encodings: every token must be decodable!!
# this means for instance that \\c öké is NOT equivalent to
# \\c{ö}ké
br'|[^ %#$\n\\]+'),
# trailing garbage which we cannot decode otherwise
# (such as a lone '\' at the end of a buffer)
# is never emitted, but used internally by the buffer
('unknown', br'.'),
]
def __init__(self, errors='strict'):
"""Initialize the codec."""
self.errors = errors
# regular expression used for matching
self.regexp = re.compile(
b"|".join(
b"(?P<" + name.encode() + b">" + regexp + b")"
for name, regexp in self.tokens),
re.DOTALL)
# reset state
self.reset()
def reset(self):
"""Reset state (also called by __init__ to initialize the
state).
"""
# buffer for storing last (possibly incomplete) token
self.raw_buffer = Token()
def getstate(self):
return (self.raw_buffer.text, 0)
def setstate(self, state):
self.raw_buffer = Token('unknown', state[0])
def get_raw_tokens(self, bytes_, final=False):
"""Yield tokens without any further processing. Tokens are one of:
- ``\\<word>``: a control word (i.e. a command)
- ``\\<symbol>``: a control symbol (i.e. \\^ etc.)
- ``#<n>``: a parameter
- a series of byte characters
"""
if not isinstance(bytes_, bytes):
raise TypeError(
'expected bytes but got %s'
% bytes_.__class__.__name__)
if self.raw_buffer:
bytes_ = self.raw_buffer.text + bytes_
self.raw_buffer = Token()
for match in self.regexp.finditer(bytes_):
for name, regexp in self.tokens:
text = match.group(name)
if text is not None:
# yield the buffer token(s)
for token in self.flush_raw_tokens():
yield token
# fill buffer with next token
self.raw_buffer = Token(name, text)
break
else:
# should not happen
raise AssertionError("lexer failed on '%s'" % bytes_)
if final:
for token in self.flush_raw_tokens():
yield token
def flush_raw_tokens(self):
"""Flush the raw token buffer."""
if self.raw_buffer:
yield self.raw_buffer
self.raw_buffer = Token()
class LatexIncrementalLexer(LatexLexer):
"""A very simple incremental lexer for tex/latex code. Roughly
follows the state machine described in Tex By Topic, Chapter 2.
The generated tokens satisfy:
* no newline characters: paragraphs are separated by '\\par'
* spaces following control tokens are compressed
"""
def reset(self):
"""Reset state (also called by __init__ to initialize the
state).
"""
LatexLexer.reset(self)
# three possible states:
# newline (N), skipping spaces (S), and middle of line (M)
self.state = 'N'
# inline math mode?
self.inline_math = False
def getstate(self):
# state 'M' is most common, so let that be zero
return (
self.raw_buffer,
{'M': 0, 'N': 1, 'S': 2}[self.state]
| (4 if self.inline_math else 0)
)
def setstate(self, state):
self.raw_buffer = state[0]
self.state = {0: 'M', 1: 'N', 2: 'S'}[state[1] & 3]
self.inline_math = bool(state[1] & 4)
def get_tokens(self, bytes_, final=False):
"""Yield tokens while maintaining a state. Also skip
whitespace after control words and (some) control symbols.
Replaces newlines by spaces and \\par commands depending on
the context.
"""
# current position relative to the start of bytes_ in the sequence
# of bytes that have been decoded
pos = -len(self.raw_buffer)
for token in self.get_raw_tokens(bytes_, final=final):
pos = pos + len(token)
assert pos >= 0 # first token includes at least self.raw_buffer
if token.name == 'newline':
if self.state == 'N':
# if state was 'N', generate new paragraph
yield Token('control_word', b'\\par')
elif self.state == 'S':
# switch to 'N' state, do not generate a space
self.state = 'N'
elif self.state == 'M':
# switch to 'N' state, generate a space
self.state = 'N'
yield Token('space', b' ')
else:
raise AssertionError(
"unknown tex state '%s'" % self.state)
elif token.name == 'space':
if self.state == 'N':
# remain in 'N' state, no space token generated
pass
elif self.state == 'S':
# remain in 'S' state, no space token generated
pass
elif self.state == 'M':
# in M mode, generate the space,
# but switch to space skip mode
self.state = 'S'
yield token
else:
raise AssertionError(
"unknown state %s" % repr(self.state))
elif token.name == 'char':
self.state = 'M'
yield token
elif token.name == 'mathshift':
self.inline_math = not self.inline_math
yield token
elif token.name == 'parameter':
self.state = 'M'
yield token
elif token.name == 'control_word':
# go to space skip mode
self.state = 'S'
yield token
elif token.name == 'control_symbol':
# go to space skip mode
self.state = 'S'
yield token
elif token.name == 'control_symbol_x':
# don't skip following space, so go to M mode
self.state = 'M'
yield token
elif token.name == 'comment':
# go to newline mode, no token is generated
# note: comment includes the newline
self.state = 'N'
elif token.name == 'chars':
self.state = 'M'
yield token
elif token.name == 'unknown':
if self.errors == 'strict':
# current position within bytes_
# this is the position right after the unknown token
raise UnicodeDecodeError(
"latex", # codec
bytes_, # problematic input
pos - len(token), # start of problematic token
pos, # end of it
"unknown token %s" % repr(token.text))
elif self.errors == 'ignore':
# do nothing
pass
elif self.errors == 'replace':
yield Token('chars', b'?' * len(token))
else:
raise NotImplementedError(
"error mode %s not supported" % repr(self.errors))
class LatexIncrementalDecoder(LatexIncrementalLexer):
"""Simple incremental decoder. Transforms lexed latex tokens into
unicode.
To customize decoding, subclass and override
:meth:`get_unicode_tokens`.
"""
inputenc = "ascii"
"""Input encoding. **Must** extend ascii."""
def get_unicode_tokens(self, bytes_, final=False):
""":meth:`decode` calls this function to produce the final
sequence of unicode strings. This implementation simply
decodes every sequence in *inputenc* encoding. Override to
process the tokens in some other way (for example, for token
translation).
"""
for token in self.get_tokens(bytes_, final=final):
yield token.decode(self.inputenc)
def decode(self, bytes_, final=False):
try:
return u''.join(self.get_unicode_tokens(bytes_, final=final))
except UnicodeDecodeError as e:
# API requires that the encode method raises a ValueError
# in this case
raise ValueError(e)
class LatexIncrementalEncoder(codecs.IncrementalEncoder):
"""Simple incremental encoder for latex."""
inputenc = "ascii"
"""Input encoding. **Must** extend ascii."""
def get_latex_bytes(self, unicode_):
""":meth:`encode` calls this function to produce the final
sequence of latex bytes. This implementation simply
encodes every sequence in *inputenc* encoding. Override to
process the unicode in some other way (for example, for character
translation).
"""
if not isinstance(unicode_, basestring):
raise TypeError(
"expected unicode for encode input, but got {0} instead"
.format(unicode_.__class__.__name__))
for c in unicode_:
yield c.encode(inputenc, self.errors)
def encode(self, unicode_, final=False):
"""Encode unicode string into a latex byte sequence."""
try:
return b''.join(self.get_latex_bytes(unicode_, final=final))
except (UnicodeDecodeError, e):
# API requires that the encode method raises a ValueError
# in this case
raise ValueError(e)
| gpl-2.0 | 211,486,954,619,856,700 | 37.322981 | 79 | 0.528282 | false |
jwinzer/OpenSlides | server/openslides/mediafiles/migrations/0004_directories_and_permissions_1.py | 7 | 2062 | # Generated by Django 2.2.2 on 2019-06-28 06:06
from django.db import migrations, models
import openslides.mediafiles.models
import openslides.utils.models
class Migration(migrations.Migration):
dependencies = [("mediafiles", "0003_auto_20190119_1425")]
operations = [
migrations.AlterModelOptions(
name="mediafile",
options={
"default_permissions": (),
"ordering": ("title",),
"permissions": (
("can_see", "Can see the list of files"),
("can_manage", "Can manage files"),
),
},
),
migrations.RenameField(
model_name="mediafile", old_name="timestamp", new_name="create_timestamp"
),
migrations.AddField(
model_name="mediafile",
name="access_groups",
field=models.ManyToManyField(blank=True, to="users.Group"),
),
migrations.AddField(
model_name="mediafile",
name="is_directory",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="mediafile",
name="parent",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=models.deletion.SET_NULL,
related_name="children",
to="mediafiles.Mediafile",
),
),
migrations.AddField(
model_name="mediafile",
name="original_filename",
field=models.CharField(default="", max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name="mediafile",
name="mediafile",
field=models.FileField(
null=True, upload_to=openslides.mediafiles.models.get_file_path
),
),
migrations.AlterField(
model_name="mediafile", name="title", field=models.CharField(max_length=255)
),
]
| mit | 2,441,541,100,533,031,400 | 30.723077 | 88 | 0.525218 | false |
walkers-mv/luigi | luigi/util.py | 22 | 8150 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import functools
import logging
from luigi import six
from luigi import task
from luigi import parameter
from luigi.deprecate_kwarg import deprecate_kwarg # NOQA: removing this breaks code
if six.PY3:
xrange = range
logger = logging.getLogger('luigi-interface')
def common_params(task_instance, task_cls):
"""
Grab all the values in task_instance that are found in task_cls.
"""
if not isinstance(task_cls, task.Register):
raise TypeError("task_cls must be an uninstantiated Task")
task_instance_param_names = dict(task_instance.get_params()).keys()
task_cls_param_names = dict(task_cls.get_params()).keys()
common_param_names = list(set.intersection(set(task_instance_param_names), set(task_cls_param_names)))
common_param_vals = [(key, dict(task_cls.get_params())[key]) for key in common_param_names]
common_kwargs = dict([(key, task_instance.param_kwargs[key]) for key in common_param_names])
vals = dict(task_instance.get_param_values(common_param_vals, [], common_kwargs))
return vals
def task_wraps(P):
# In order to make the behavior of a wrapper class nicer, we set the name of the
# new class to the wrapped class, and copy over the docstring and module as well.
# This makes it possible to pickle the wrapped class etc.
# Btw, this is a slight abuse of functools.wraps. It's meant to be used only for
# functions, but it works for classes too, if you pass updated=[]
return functools.wraps(P, updated=[])
class inherits(object):
"""
Task inheritance.
Usage:
.. code-block:: python
class AnotherTask(luigi.Task):
n = luigi.IntParameter()
# ...
@inherits(AnotherTask):
class MyTask(luigi.Task):
def requires(self):
return self.clone_parent()
def run(self):
print self.n # this will be defined
# ...
"""
def __init__(self, task_to_inherit):
super(inherits, self).__init__()
self.task_to_inherit = task_to_inherit
def __call__(self, task_that_inherits):
for param_name, param_obj in self.task_to_inherit.get_params():
if not hasattr(task_that_inherits, param_name):
setattr(task_that_inherits, param_name, param_obj)
# Modify task_that_inherits by subclassing it and adding methods
@task_wraps(task_that_inherits)
class Wrapped(task_that_inherits):
def clone_parent(_self, **args):
return _self.clone(cls=self.task_to_inherit, **args)
return Wrapped
class requires(object):
"""
Same as @inherits, but also auto-defines the requires method.
"""
def __init__(self, task_to_require):
super(requires, self).__init__()
self.inherit_decorator = inherits(task_to_require)
def __call__(self, task_that_requires):
task_that_requires = self.inherit_decorator(task_that_requires)
# Modify task_that_requres by subclassing it and adding methods
@task_wraps(task_that_requires)
class Wrapped(task_that_requires):
def requires(_self):
return _self.clone_parent()
return Wrapped
class copies(object):
"""
Auto-copies a task.
Usage:
.. code-block:: python
@copies(MyTask):
class CopyOfMyTask(luigi.Task):
def output(self):
return LocalTarget(self.date.strftime('/var/xyz/report-%Y-%m-%d'))
"""
def __init__(self, task_to_copy):
super(copies, self).__init__()
self.requires_decorator = requires(task_to_copy)
def __call__(self, task_that_copies):
task_that_copies = self.requires_decorator(task_that_copies)
# Modify task_that_copies by subclassing it and adding methods
@task_wraps(task_that_copies)
class Wrapped(task_that_copies):
def run(_self):
i, o = _self.input(), _self.output()
f = o.open('w') # TODO: assert that i, o are Target objects and not complex datastructures
for line in i.open('r'):
f.write(line)
f.close()
return Wrapped
def delegates(task_that_delegates):
""" Lets a task call methods on subtask(s).
The way this works is that the subtask is run as a part of the task, but
the task itself doesn't have to care about the requirements of the subtasks.
The subtask doesn't exist from the scheduler's point of view, and
its dependencies are instead required by the main task.
Example:
.. code-block:: python
class PowersOfN(luigi.Task):
n = luigi.IntParameter()
def f(self, x): return x ** self.n
@delegates
class T(luigi.Task):
def subtasks(self): return PowersOfN(5)
def run(self): print self.subtasks().f(42)
"""
if not hasattr(task_that_delegates, 'subtasks'):
# This method can (optionally) define a couple of delegate tasks that
# will be accessible as interfaces, meaning that the task can access
# those tasks and run methods defined on them, etc
raise AttributeError('%s needs to implement the method "subtasks"' % task_that_delegates)
@task_wraps(task_that_delegates)
class Wrapped(task_that_delegates):
def deps(self):
# Overrides method in base class
return task.flatten(self.requires()) + task.flatten([t.deps() for t in task.flatten(self.subtasks())])
def run(self):
for t in task.flatten(self.subtasks()):
t.run()
task_that_delegates.run(self)
return Wrapped
def previous(task):
"""
Return a previous Task of the same family.
By default checks if this task family only has one non-global parameter and if
it is a DateParameter, DateHourParameter or DateIntervalParameter in which case
it returns with the time decremented by 1 (hour, day or interval)
"""
params = task.get_params()
previous_params = {}
previous_date_params = {}
for param_name, param_obj in params:
param_value = getattr(task, param_name)
if isinstance(param_obj, parameter.DateParameter):
previous_date_params[param_name] = param_value - datetime.timedelta(days=1)
elif isinstance(param_obj, parameter.DateMinuteParameter):
previous_date_params[param_name] = param_value - datetime.timedelta(minutes=1)
elif isinstance(param_obj, parameter.DateHourParameter):
previous_date_params[param_name] = param_value - datetime.timedelta(hours=1)
elif isinstance(param_obj, parameter.DateIntervalParameter):
previous_date_params[param_name] = param_value.prev()
else:
previous_params[param_name] = param_value
previous_params.update(previous_date_params)
if len(previous_date_params) == 0:
raise NotImplementedError("No task parameter - can't determine previous task")
elif len(previous_date_params) > 1:
raise NotImplementedError("Too many date-related task parameters - can't determine previous task")
else:
return task.clone(**previous_params)
def get_previous_completed(task, max_steps=10):
prev = task
for _ in xrange(max_steps):
prev = previous(prev)
logger.debug("Checking if %s is complete", prev.task_id)
if prev.complete():
return prev
return None
| apache-2.0 | 7,613,724,085,534,362,000 | 32.677686 | 114 | 0.643926 | false |
blaggacao/server-tools | __unported__/fetchmail_attach_from_folder/wizard/attach_mail_manually.py | 6 | 5137 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>)
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv.orm import TransientModel
class attach_mail_manually(TransientModel):
_name = 'fetchmail.attach.mail.manually'
_columns = {
'folder_id': fields.many2one('fetchmail.server.folder', 'Folder',
readonly=True),
'mail_ids': fields.one2many(
'fetchmail.attach.mail.manually.mail', 'wizard_id', 'Emails'),
}
def default_get(self, cr, uid, fields_list, context=None):
if context is None:
context = {}
defaults = super(attach_mail_manually, self).default_get(cr, uid,
fields_list, context)
for folder in self.pool.get('fetchmail.server.folder').browse(cr, uid,
[context.get('default_folder_id')], context):
defaults['mail_ids']=[]
connection = folder.server_id.connect()
connection.select(folder.path)
result, msgids = connection.search(None,
'FLAGGED' if folder.flag_nonmatching else 'UNDELETED')
if result != 'OK':
logger.error('Could not search mailbox %s on %s' % (
folder.path, this.server))
continue
attach_mail_manually_mail._columns['object_id'].selection=[
(folder.model_id.model, folder.model_id.name)]
for msgid in msgids[0].split():
result, msgdata = connection.fetch(msgid, '(RFC822)')
if result != 'OK':
logger.error('Could not fetch %s in %s on %s' % (
msgid, folder.path, this.server))
continue
mail_message = self.pool.get('mail.thread').message_parse(
cr, uid, msgdata[0][1],
save_original=folder.server_id.original,
context=context)
defaults['mail_ids'].append((0, 0, {
'msgid': msgid,
'subject': mail_message.get('subject', ''),
'date': mail_message.get('date', ''),
'object_id': folder.model_id.model+',False'
}))
connection.close()
return defaults
def attach_mails(self, cr, uid, ids, context=None):
for this in self.browse(cr, uid, ids, context):
for mail in this.mail_ids:
connection = this.folder_id.server_id.connect()
connection.select(this.folder_id.path)
result, msgdata = connection.fetch(mail.msgid, '(RFC822)')
if result != 'OK':
logger.error('Could not fetch %s in %s on %s' % (
msgid, folder.path, this.server))
continue
mail_message = self.pool.get('mail.thread').message_parse(
cr, uid, msgdata[0][1],
save_original=this.folder_id.server_id.original,
context=context)
this.folder_id.server_id.attach_mail(connection,
mail.object_id.id, this.folder_id, mail_message,
mail.msgid)
connection.close()
return {'type': 'ir.actions.act_window_close'}
class attach_mail_manually_mail(TransientModel):
_name = 'fetchmail.attach.mail.manually.mail'
_columns = {
'wizard_id': fields.many2one('fetchmail.attach.mail.manually',
readonly=True),
'msgid': fields.char('Message id', size=16, readonly=True),
'subject': fields.char('Subject', size=128, readonly=True),
'date': fields.datetime('Date', readonly=True),
'object_id': fields.reference('Object',
selection=lambda self, cr, uid, context:
[(m.model, m.name) for m in
self.pool.get('ir.model').browse(cr, uid,
self.pool.get('ir.model').search(cr, uid, []),
context)], size=128),
}
| agpl-3.0 | 3,642,711,475,559,043,600 | 44.061404 | 78 | 0.528324 | false |
MaheshBhosale/totem-tosha | distribute-0.6.35/setuptools/command/install_egg_info.py | 357 | 3833 | from setuptools import Command
from setuptools.archive_util import unpack_archive
from distutils import log, dir_util
import os, shutil, pkg_resources
class install_egg_info(Command):
"""Install an .egg-info directory for the package"""
description = "Install an .egg-info directory for the package"
user_options = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
def finalize_options(self):
self.set_undefined_options('install_lib',('install_dir','install_dir'))
ei_cmd = self.get_finalized_command("egg_info")
basename = pkg_resources.Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version
).egg_name()+'.egg-info'
self.source = ei_cmd.egg_info
self.target = os.path.join(self.install_dir, basename)
self.outputs = [self.target]
def run(self):
self.run_command('egg_info')
target = self.target
if os.path.isdir(self.target) and not os.path.islink(self.target):
dir_util.remove_tree(self.target, dry_run=self.dry_run)
elif os.path.exists(self.target):
self.execute(os.unlink,(self.target,),"Removing "+self.target)
if not self.dry_run:
pkg_resources.ensure_directory(self.target)
self.execute(self.copytree, (),
"Copying %s to %s" % (self.source, self.target)
)
self.install_namespaces()
def get_outputs(self):
return self.outputs
def copytree(self):
# Copy the .egg-info tree to site-packages
def skimmer(src,dst):
# filter out source-control directories; note that 'src' is always
# a '/'-separated path, regardless of platform. 'dst' is a
# platform-specific path.
for skip in '.svn/','CVS/':
if src.startswith(skip) or '/'+skip in src:
return None
self.outputs.append(dst)
log.debug("Copying %s to %s", src, dst)
return dst
unpack_archive(self.source, self.target, skimmer)
def install_namespaces(self):
nsp = self._get_all_ns_packages()
if not nsp: return
filename,ext = os.path.splitext(self.target)
filename += '-nspkg.pth'; self.outputs.append(filename)
log.info("Installing %s",filename)
if not self.dry_run:
f = open(filename,'wt')
for pkg in nsp:
# ensure pkg is not a unicode string under Python 2.7
pkg = str(pkg)
pth = tuple(pkg.split('.'))
trailer = '\n'
if '.' in pkg:
trailer = (
"; m and setattr(sys.modules[%r], %r, m)\n"
% ('.'.join(pth[:-1]), pth[-1])
)
f.write(
"import sys,types,os; "
"p = os.path.join(sys._getframe(1).f_locals['sitedir'], "
"*%(pth)r); "
"ie = os.path.exists(os.path.join(p,'__init__.py')); "
"m = not ie and "
"sys.modules.setdefault(%(pkg)r,types.ModuleType(%(pkg)r)); "
"mp = (m or []) and m.__dict__.setdefault('__path__',[]); "
"(p not in mp) and mp.append(p)%(trailer)s"
% locals()
)
f.close()
def _get_all_ns_packages(self):
nsp = {}
for pkg in self.distribution.namespace_packages or []:
pkg = pkg.split('.')
while pkg:
nsp['.'.join(pkg)] = 1
pkg.pop()
nsp=list(nsp)
nsp.sort() # set up shorter names first
return nsp
| apache-2.0 | 2,799,021,233,399,775,000 | 29.664 | 85 | 0.519958 | false |
Reflexe/doc_to_pdf | Windows/program/python-core-3.5.0/lib/mailbox.py | 5 | 78418 | """Read/write support for Maildir, mbox, MH, Babyl, and MMDF mailboxes."""
# Notes for authors of new mailbox subclasses:
#
# Remember to fsync() changes to disk before closing a modified file
# or returning from a flush() method. See functions _sync_flush() and
# _sync_close().
import os
import time
import calendar
import socket
import errno
import copy
import warnings
import email
import email.message
import email.generator
import io
import contextlib
try:
import fcntl
except ImportError:
fcntl = None
__all__ = [ 'Mailbox', 'Maildir', 'mbox', 'MH', 'Babyl', 'MMDF',
'Message', 'MaildirMessage', 'mboxMessage', 'MHMessage',
'BabylMessage', 'MMDFMessage']
linesep = os.linesep.encode('ascii')
class Mailbox:
"""A group of messages in a particular place."""
def __init__(self, path, factory=None, create=True):
"""Initialize a Mailbox instance."""
self._path = os.path.abspath(os.path.expanduser(path))
self._factory = factory
def add(self, message):
"""Add message and return assigned key."""
raise NotImplementedError('Method must be implemented by subclass')
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
raise NotImplementedError('Method must be implemented by subclass')
def __delitem__(self, key):
self.remove(key)
def discard(self, key):
"""If the keyed message exists, remove it."""
try:
self.remove(key)
except KeyError:
pass
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
raise NotImplementedError('Method must be implemented by subclass')
def get(self, key, default=None):
"""Return the keyed message, or default if it doesn't exist."""
try:
return self.__getitem__(key)
except KeyError:
return default
def __getitem__(self, key):
"""Return the keyed message; raise KeyError if it doesn't exist."""
if not self._factory:
return self.get_message(key)
else:
with contextlib.closing(self.get_file(key)) as file:
return self._factory(file)
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass')
def get_string(self, key):
"""Return a string representation or raise a KeyError.
Uses email.message.Message to create a 7bit clean string
representation of the message."""
return email.message_from_bytes(self.get_bytes(key)).as_string()
def get_bytes(self, key):
"""Return a byte string representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass')
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass')
def iterkeys(self):
"""Return an iterator over keys."""
raise NotImplementedError('Method must be implemented by subclass')
def keys(self):
"""Return a list of keys."""
return list(self.iterkeys())
def itervalues(self):
"""Return an iterator over all messages."""
for key in self.iterkeys():
try:
value = self[key]
except KeyError:
continue
yield value
def __iter__(self):
return self.itervalues()
def values(self):
"""Return a list of messages. Memory intensive."""
return list(self.itervalues())
def iteritems(self):
"""Return an iterator over (key, message) tuples."""
for key in self.iterkeys():
try:
value = self[key]
except KeyError:
continue
yield (key, value)
def items(self):
"""Return a list of (key, message) tuples. Memory intensive."""
return list(self.iteritems())
def __contains__(self, key):
"""Return True if the keyed message exists, False otherwise."""
raise NotImplementedError('Method must be implemented by subclass')
def __len__(self):
"""Return a count of messages in the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
def clear(self):
"""Delete all messages."""
for key in self.keys():
self.discard(key)
def pop(self, key, default=None):
"""Delete the keyed message and return it, or default."""
try:
result = self[key]
except KeyError:
return default
self.discard(key)
return result
def popitem(self):
"""Delete an arbitrary (key, message) pair and return it."""
for key in self.iterkeys():
return (key, self.pop(key)) # This is only run once.
else:
raise KeyError('No messages in mailbox')
def update(self, arg=None):
"""Change the messages that correspond to certain keys."""
if hasattr(arg, 'iteritems'):
source = arg.iteritems()
elif hasattr(arg, 'items'):
source = arg.items()
else:
source = arg
bad_key = False
for key, message in source:
try:
self[key] = message
except KeyError:
bad_key = True
if bad_key:
raise KeyError('No message with key(s)')
def flush(self):
"""Write any pending changes to the disk."""
raise NotImplementedError('Method must be implemented by subclass')
def lock(self):
"""Lock the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
def unlock(self):
"""Unlock the mailbox if it is locked."""
raise NotImplementedError('Method must be implemented by subclass')
def close(self):
"""Flush and close the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
def _string_to_bytes(self, message):
# If a message is not 7bit clean, we refuse to handle it since it
# likely came from reading invalid messages in text mode, and that way
# lies mojibake.
try:
return message.encode('ascii')
except UnicodeError:
raise ValueError("String input must be ASCII-only; "
"use bytes or a Message instead")
# Whether each message must end in a newline
_append_newline = False
def _dump_message(self, message, target, mangle_from_=False):
# This assumes the target file is open in binary mode.
"""Dump message contents to target file."""
if isinstance(message, email.message.Message):
buffer = io.BytesIO()
gen = email.generator.BytesGenerator(buffer, mangle_from_, 0)
gen.flatten(message)
buffer.seek(0)
data = buffer.read()
data = data.replace(b'\n', linesep)
target.write(data)
if self._append_newline and not data.endswith(linesep):
# Make sure the message ends with a newline
target.write(linesep)
elif isinstance(message, (str, bytes, io.StringIO)):
if isinstance(message, io.StringIO):
warnings.warn("Use of StringIO input is deprecated, "
"use BytesIO instead", DeprecationWarning, 3)
message = message.getvalue()
if isinstance(message, str):
message = self._string_to_bytes(message)
if mangle_from_:
message = message.replace(b'\nFrom ', b'\n>From ')
message = message.replace(b'\n', linesep)
target.write(message)
if self._append_newline and not message.endswith(linesep):
# Make sure the message ends with a newline
target.write(linesep)
elif hasattr(message, 'read'):
if hasattr(message, 'buffer'):
warnings.warn("Use of text mode files is deprecated, "
"use a binary mode file instead", DeprecationWarning, 3)
message = message.buffer
lastline = None
while True:
line = message.readline()
# Universal newline support.
if line.endswith(b'\r\n'):
line = line[:-2] + b'\n'
elif line.endswith(b'\r'):
line = line[:-1] + b'\n'
if not line:
break
if mangle_from_ and line.startswith(b'From '):
line = b'>From ' + line[5:]
line = line.replace(b'\n', linesep)
target.write(line)
lastline = line
if self._append_newline and lastline and not lastline.endswith(linesep):
# Make sure the message ends with a newline
target.write(linesep)
else:
raise TypeError('Invalid message type: %s' % type(message))
class Maildir(Mailbox):
"""A qmail-style Maildir mailbox."""
colon = ':'
def __init__(self, dirname, factory=None, create=True):
"""Initialize a Maildir instance."""
Mailbox.__init__(self, dirname, factory, create)
self._paths = {
'tmp': os.path.join(self._path, 'tmp'),
'new': os.path.join(self._path, 'new'),
'cur': os.path.join(self._path, 'cur'),
}
if not os.path.exists(self._path):
if create:
os.mkdir(self._path, 0o700)
for path in self._paths.values():
os.mkdir(path, 0o700)
else:
raise NoSuchMailboxError(self._path)
self._toc = {}
self._toc_mtimes = {'cur': 0, 'new': 0}
self._last_read = 0 # Records last time we read cur/new
self._skewfactor = 0.1 # Adjust if os/fs clocks are skewing
def add(self, message):
"""Add message and return assigned key."""
tmp_file = self._create_tmp()
try:
self._dump_message(message, tmp_file)
except BaseException:
tmp_file.close()
os.remove(tmp_file.name)
raise
_sync_close(tmp_file)
if isinstance(message, MaildirMessage):
subdir = message.get_subdir()
suffix = self.colon + message.get_info()
if suffix == self.colon:
suffix = ''
else:
subdir = 'new'
suffix = ''
uniq = os.path.basename(tmp_file.name).split(self.colon)[0]
dest = os.path.join(self._path, subdir, uniq + suffix)
if isinstance(message, MaildirMessage):
os.utime(tmp_file.name,
(os.path.getatime(tmp_file.name), message.get_date()))
# No file modification should be done after the file is moved to its
# final position in order to prevent race conditions with changes
# from other programs
try:
if hasattr(os, 'link'):
os.link(tmp_file.name, dest)
os.remove(tmp_file.name)
else:
os.rename(tmp_file.name, dest)
except OSError as e:
os.remove(tmp_file.name)
if e.errno == errno.EEXIST:
raise ExternalClashError('Name clash with existing message: %s'
% dest)
else:
raise
return uniq
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
os.remove(os.path.join(self._path, self._lookup(key)))
def discard(self, key):
"""If the keyed message exists, remove it."""
# This overrides an inapplicable implementation in the superclass.
try:
self.remove(key)
except (KeyError, FileNotFoundError):
pass
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
old_subpath = self._lookup(key)
temp_key = self.add(message)
temp_subpath = self._lookup(temp_key)
if isinstance(message, MaildirMessage):
# temp's subdir and suffix were specified by message.
dominant_subpath = temp_subpath
else:
# temp's subdir and suffix were defaults from add().
dominant_subpath = old_subpath
subdir = os.path.dirname(dominant_subpath)
if self.colon in dominant_subpath:
suffix = self.colon + dominant_subpath.split(self.colon)[-1]
else:
suffix = ''
self.discard(key)
tmp_path = os.path.join(self._path, temp_subpath)
new_path = os.path.join(self._path, subdir, key + suffix)
if isinstance(message, MaildirMessage):
os.utime(tmp_path,
(os.path.getatime(tmp_path), message.get_date()))
# No file modification should be done after the file is moved to its
# final position in order to prevent race conditions with changes
# from other programs
os.rename(tmp_path, new_path)
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
subpath = self._lookup(key)
with open(os.path.join(self._path, subpath), 'rb') as f:
if self._factory:
msg = self._factory(f)
else:
msg = MaildirMessage(f)
subdir, name = os.path.split(subpath)
msg.set_subdir(subdir)
if self.colon in name:
msg.set_info(name.split(self.colon)[-1])
msg.set_date(os.path.getmtime(os.path.join(self._path, subpath)))
return msg
def get_bytes(self, key):
"""Return a bytes representation or raise a KeyError."""
with open(os.path.join(self._path, self._lookup(key)), 'rb') as f:
return f.read().replace(linesep, b'\n')
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
f = open(os.path.join(self._path, self._lookup(key)), 'rb')
return _ProxyFile(f)
def iterkeys(self):
"""Return an iterator over keys."""
self._refresh()
for key in self._toc:
try:
self._lookup(key)
except KeyError:
continue
yield key
def __contains__(self, key):
"""Return True if the keyed message exists, False otherwise."""
self._refresh()
return key in self._toc
def __len__(self):
"""Return a count of messages in the mailbox."""
self._refresh()
return len(self._toc)
def flush(self):
"""Write any pending changes to disk."""
# Maildir changes are always written immediately, so there's nothing
# to do.
pass
def lock(self):
"""Lock the mailbox."""
return
def unlock(self):
"""Unlock the mailbox if it is locked."""
return
def close(self):
"""Flush and close the mailbox."""
return
def list_folders(self):
"""Return a list of folder names."""
result = []
for entry in os.listdir(self._path):
if len(entry) > 1 and entry[0] == '.' and \
os.path.isdir(os.path.join(self._path, entry)):
result.append(entry[1:])
return result
def get_folder(self, folder):
"""Return a Maildir instance for the named folder."""
return Maildir(os.path.join(self._path, '.' + folder),
factory=self._factory,
create=False)
def add_folder(self, folder):
"""Create a folder and return a Maildir instance representing it."""
path = os.path.join(self._path, '.' + folder)
result = Maildir(path, factory=self._factory)
maildirfolder_path = os.path.join(path, 'maildirfolder')
if not os.path.exists(maildirfolder_path):
os.close(os.open(maildirfolder_path, os.O_CREAT | os.O_WRONLY,
0o666))
return result
def remove_folder(self, folder):
"""Delete the named folder, which must be empty."""
path = os.path.join(self._path, '.' + folder)
for entry in os.listdir(os.path.join(path, 'new')) + \
os.listdir(os.path.join(path, 'cur')):
if len(entry) < 1 or entry[0] != '.':
raise NotEmptyError('Folder contains message(s): %s' % folder)
for entry in os.listdir(path):
if entry != 'new' and entry != 'cur' and entry != 'tmp' and \
os.path.isdir(os.path.join(path, entry)):
raise NotEmptyError("Folder contains subdirectory '%s': %s" %
(folder, entry))
for root, dirs, files in os.walk(path, topdown=False):
for entry in files:
os.remove(os.path.join(root, entry))
for entry in dirs:
os.rmdir(os.path.join(root, entry))
os.rmdir(path)
def clean(self):
"""Delete old files in "tmp"."""
now = time.time()
for entry in os.listdir(os.path.join(self._path, 'tmp')):
path = os.path.join(self._path, 'tmp', entry)
if now - os.path.getatime(path) > 129600: # 60 * 60 * 36
os.remove(path)
_count = 1 # This is used to generate unique file names.
def _create_tmp(self):
"""Create a file in the tmp subdirectory and open and return it."""
now = time.time()
hostname = socket.gethostname()
if '/' in hostname:
hostname = hostname.replace('/', r'\057')
if ':' in hostname:
hostname = hostname.replace(':', r'\072')
uniq = "%s.M%sP%sQ%s.%s" % (int(now), int(now % 1 * 1e6), os.getpid(),
Maildir._count, hostname)
path = os.path.join(self._path, 'tmp', uniq)
try:
os.stat(path)
except FileNotFoundError:
Maildir._count += 1
try:
return _create_carefully(path)
except FileExistsError:
pass
# Fall through to here if stat succeeded or open raised EEXIST.
raise ExternalClashError('Name clash prevented file creation: %s' %
path)
def _refresh(self):
"""Update table of contents mapping."""
# If it has been less than two seconds since the last _refresh() call,
# we have to unconditionally re-read the mailbox just in case it has
# been modified, because os.path.mtime() has a 2 sec resolution in the
# most common worst case (FAT) and a 1 sec resolution typically. This
# results in a few unnecessary re-reads when _refresh() is called
# multiple times in that interval, but once the clock ticks over, we
# will only re-read as needed. Because the filesystem might be being
# served by an independent system with its own clock, we record and
# compare with the mtimes from the filesystem. Because the other
# system's clock might be skewing relative to our clock, we add an
# extra delta to our wait. The default is one tenth second, but is an
# instance variable and so can be adjusted if dealing with a
# particularly skewed or irregular system.
if time.time() - self._last_read > 2 + self._skewfactor:
refresh = False
for subdir in self._toc_mtimes:
mtime = os.path.getmtime(self._paths[subdir])
if mtime > self._toc_mtimes[subdir]:
refresh = True
self._toc_mtimes[subdir] = mtime
if not refresh:
return
# Refresh toc
self._toc = {}
for subdir in self._toc_mtimes:
path = self._paths[subdir]
for entry in os.listdir(path):
p = os.path.join(path, entry)
if os.path.isdir(p):
continue
uniq = entry.split(self.colon)[0]
self._toc[uniq] = os.path.join(subdir, entry)
self._last_read = time.time()
def _lookup(self, key):
"""Use TOC to return subpath for given key, or raise a KeyError."""
try:
if os.path.exists(os.path.join(self._path, self._toc[key])):
return self._toc[key]
except KeyError:
pass
self._refresh()
try:
return self._toc[key]
except KeyError:
raise KeyError('No message with key: %s' % key)
# This method is for backward compatibility only.
def next(self):
"""Return the next message in a one-time iteration."""
if not hasattr(self, '_onetime_keys'):
self._onetime_keys = self.iterkeys()
while True:
try:
return self[next(self._onetime_keys)]
except StopIteration:
return None
except KeyError:
continue
class _singlefileMailbox(Mailbox):
"""A single-file mailbox."""
def __init__(self, path, factory=None, create=True):
"""Initialize a single-file mailbox."""
Mailbox.__init__(self, path, factory, create)
try:
f = open(self._path, 'rb+')
except OSError as e:
if e.errno == errno.ENOENT:
if create:
f = open(self._path, 'wb+')
else:
raise NoSuchMailboxError(self._path)
elif e.errno in (errno.EACCES, errno.EROFS):
f = open(self._path, 'rb')
else:
raise
self._file = f
self._toc = None
self._next_key = 0
self._pending = False # No changes require rewriting the file.
self._pending_sync = False # No need to sync the file
self._locked = False
self._file_length = None # Used to record mailbox size
def add(self, message):
"""Add message and return assigned key."""
self._lookup()
self._toc[self._next_key] = self._append_message(message)
self._next_key += 1
# _append_message appends the message to the mailbox file. We
# don't need a full rewrite + rename, sync is enough.
self._pending_sync = True
return self._next_key - 1
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
self._lookup(key)
del self._toc[key]
self._pending = True
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
self._lookup(key)
self._toc[key] = self._append_message(message)
self._pending = True
def iterkeys(self):
"""Return an iterator over keys."""
self._lookup()
yield from self._toc.keys()
def __contains__(self, key):
"""Return True if the keyed message exists, False otherwise."""
self._lookup()
return key in self._toc
def __len__(self):
"""Return a count of messages in the mailbox."""
self._lookup()
return len(self._toc)
def lock(self):
"""Lock the mailbox."""
if not self._locked:
_lock_file(self._file)
self._locked = True
def unlock(self):
"""Unlock the mailbox if it is locked."""
if self._locked:
_unlock_file(self._file)
self._locked = False
def flush(self):
"""Write any pending changes to disk."""
if not self._pending:
if self._pending_sync:
# Messages have only been added, so syncing the file
# is enough.
_sync_flush(self._file)
self._pending_sync = False
return
# In order to be writing anything out at all, self._toc must
# already have been generated (and presumably has been modified
# by adding or deleting an item).
assert self._toc is not None
# Check length of self._file; if it's changed, some other process
# has modified the mailbox since we scanned it.
self._file.seek(0, 2)
cur_len = self._file.tell()
if cur_len != self._file_length:
raise ExternalClashError('Size of mailbox file changed '
'(expected %i, found %i)' %
(self._file_length, cur_len))
new_file = _create_temporary(self._path)
try:
new_toc = {}
self._pre_mailbox_hook(new_file)
for key in sorted(self._toc.keys()):
start, stop = self._toc[key]
self._file.seek(start)
self._pre_message_hook(new_file)
new_start = new_file.tell()
while True:
buffer = self._file.read(min(4096,
stop - self._file.tell()))
if not buffer:
break
new_file.write(buffer)
new_toc[key] = (new_start, new_file.tell())
self._post_message_hook(new_file)
self._file_length = new_file.tell()
except:
new_file.close()
os.remove(new_file.name)
raise
_sync_close(new_file)
# self._file is about to get replaced, so no need to sync.
self._file.close()
# Make sure the new file's mode is the same as the old file's
mode = os.stat(self._path).st_mode
os.chmod(new_file.name, mode)
try:
os.rename(new_file.name, self._path)
except FileExistsError:
os.remove(self._path)
os.rename(new_file.name, self._path)
self._file = open(self._path, 'rb+')
self._toc = new_toc
self._pending = False
self._pending_sync = False
if self._locked:
_lock_file(self._file, dotlock=False)
def _pre_mailbox_hook(self, f):
"""Called before writing the mailbox to file f."""
return
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
return
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
return
def close(self):
"""Flush and close the mailbox."""
try:
self.flush()
finally:
try:
if self._locked:
self.unlock()
finally:
self._file.close() # Sync has been done by self.flush() above.
def _lookup(self, key=None):
"""Return (start, stop) or raise KeyError."""
if self._toc is None:
self._generate_toc()
if key is not None:
try:
return self._toc[key]
except KeyError:
raise KeyError('No message with key: %s' % key)
def _append_message(self, message):
"""Append message to mailbox and return (start, stop) offsets."""
self._file.seek(0, 2)
before = self._file.tell()
if len(self._toc) == 0 and not self._pending:
# This is the first message, and the _pre_mailbox_hook
# hasn't yet been called. If self._pending is True,
# messages have been removed, so _pre_mailbox_hook must
# have been called already.
self._pre_mailbox_hook(self._file)
try:
self._pre_message_hook(self._file)
offsets = self._install_message(message)
self._post_message_hook(self._file)
except BaseException:
self._file.truncate(before)
raise
self._file.flush()
self._file_length = self._file.tell() # Record current length of mailbox
return offsets
class _mboxMMDF(_singlefileMailbox):
"""An mbox or MMDF mailbox."""
_mangle_from_ = True
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
from_line = self._file.readline().replace(linesep, b'')
string = self._file.read(stop - self._file.tell())
msg = self._message_factory(string.replace(linesep, b'\n'))
msg.set_from(from_line[5:].decode('ascii'))
return msg
def get_string(self, key, from_=False):
"""Return a string representation or raise a KeyError."""
return email.message_from_bytes(
self.get_bytes(key)).as_string(unixfrom=from_)
def get_bytes(self, key, from_=False):
"""Return a string representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
if not from_:
self._file.readline()
string = self._file.read(stop - self._file.tell())
return string.replace(linesep, b'\n')
def get_file(self, key, from_=False):
"""Return a file-like representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
if not from_:
self._file.readline()
return _PartialFile(self._file, self._file.tell(), stop)
def _install_message(self, message):
"""Format a message and blindly write to self._file."""
from_line = None
if isinstance(message, str):
message = self._string_to_bytes(message)
if isinstance(message, bytes) and message.startswith(b'From '):
newline = message.find(b'\n')
if newline != -1:
from_line = message[:newline]
message = message[newline + 1:]
else:
from_line = message
message = b''
elif isinstance(message, _mboxMMDFMessage):
author = message.get_from().encode('ascii')
from_line = b'From ' + author
elif isinstance(message, email.message.Message):
from_line = message.get_unixfrom() # May be None.
if from_line is not None:
from_line = from_line.encode('ascii')
if from_line is None:
from_line = b'From MAILER-DAEMON ' + time.asctime(time.gmtime()).encode()
start = self._file.tell()
self._file.write(from_line + linesep)
self._dump_message(message, self._file, self._mangle_from_)
stop = self._file.tell()
return (start, stop)
class mbox(_mboxMMDF):
"""A classic mbox mailbox."""
_mangle_from_ = True
# All messages must end in a newline character, and
# _post_message_hooks outputs an empty line between messages.
_append_newline = True
def __init__(self, path, factory=None, create=True):
"""Initialize an mbox mailbox."""
self._message_factory = mboxMessage
_mboxMMDF.__init__(self, path, factory, create)
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
f.write(linesep)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
last_was_empty = False
self._file.seek(0)
while True:
line_pos = self._file.tell()
line = self._file.readline()
if line.startswith(b'From '):
if len(stops) < len(starts):
if last_was_empty:
stops.append(line_pos - len(linesep))
else:
# The last line before the "From " line wasn't
# blank, but we consider it a start of a
# message anyway.
stops.append(line_pos)
starts.append(line_pos)
last_was_empty = False
elif not line:
if last_was_empty:
stops.append(line_pos - len(linesep))
else:
stops.append(line_pos)
break
elif line == linesep:
last_was_empty = True
else:
last_was_empty = False
self._toc = dict(enumerate(zip(starts, stops)))
self._next_key = len(self._toc)
self._file_length = self._file.tell()
class MMDF(_mboxMMDF):
"""An MMDF mailbox."""
def __init__(self, path, factory=None, create=True):
"""Initialize an MMDF mailbox."""
self._message_factory = MMDFMessage
_mboxMMDF.__init__(self, path, factory, create)
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
f.write(b'\001\001\001\001' + linesep)
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
f.write(linesep + b'\001\001\001\001' + linesep)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
self._file.seek(0)
next_pos = 0
while True:
line_pos = next_pos
line = self._file.readline()
next_pos = self._file.tell()
if line.startswith(b'\001\001\001\001' + linesep):
starts.append(next_pos)
while True:
line_pos = next_pos
line = self._file.readline()
next_pos = self._file.tell()
if line == b'\001\001\001\001' + linesep:
stops.append(line_pos - len(linesep))
break
elif not line:
stops.append(line_pos)
break
elif not line:
break
self._toc = dict(enumerate(zip(starts, stops)))
self._next_key = len(self._toc)
self._file.seek(0, 2)
self._file_length = self._file.tell()
class MH(Mailbox):
"""An MH mailbox."""
def __init__(self, path, factory=None, create=True):
"""Initialize an MH instance."""
Mailbox.__init__(self, path, factory, create)
if not os.path.exists(self._path):
if create:
os.mkdir(self._path, 0o700)
os.close(os.open(os.path.join(self._path, '.mh_sequences'),
os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o600))
else:
raise NoSuchMailboxError(self._path)
self._locked = False
def add(self, message):
"""Add message and return assigned key."""
keys = self.keys()
if len(keys) == 0:
new_key = 1
else:
new_key = max(keys) + 1
new_path = os.path.join(self._path, str(new_key))
f = _create_carefully(new_path)
closed = False
try:
if self._locked:
_lock_file(f)
try:
try:
self._dump_message(message, f)
except BaseException:
# Unlock and close so it can be deleted on Windows
if self._locked:
_unlock_file(f)
_sync_close(f)
closed = True
os.remove(new_path)
raise
if isinstance(message, MHMessage):
self._dump_sequences(message, new_key)
finally:
if self._locked:
_unlock_file(f)
finally:
if not closed:
_sync_close(f)
return new_key
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
path = os.path.join(self._path, str(key))
try:
f = open(path, 'rb+')
except OSError as e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
else:
f.close()
os.remove(path)
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
path = os.path.join(self._path, str(key))
try:
f = open(path, 'rb+')
except OSError as e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
try:
if self._locked:
_lock_file(f)
try:
os.close(os.open(path, os.O_WRONLY | os.O_TRUNC))
self._dump_message(message, f)
if isinstance(message, MHMessage):
self._dump_sequences(message, key)
finally:
if self._locked:
_unlock_file(f)
finally:
_sync_close(f)
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
try:
if self._locked:
f = open(os.path.join(self._path, str(key)), 'rb+')
else:
f = open(os.path.join(self._path, str(key)), 'rb')
except OSError as e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
with f:
if self._locked:
_lock_file(f)
try:
msg = MHMessage(f)
finally:
if self._locked:
_unlock_file(f)
for name, key_list in self.get_sequences().items():
if key in key_list:
msg.add_sequence(name)
return msg
def get_bytes(self, key):
"""Return a bytes representation or raise a KeyError."""
try:
if self._locked:
f = open(os.path.join(self._path, str(key)), 'rb+')
else:
f = open(os.path.join(self._path, str(key)), 'rb')
except OSError as e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
with f:
if self._locked:
_lock_file(f)
try:
return f.read().replace(linesep, b'\n')
finally:
if self._locked:
_unlock_file(f)
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
try:
f = open(os.path.join(self._path, str(key)), 'rb')
except OSError as e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
return _ProxyFile(f)
def iterkeys(self):
"""Return an iterator over keys."""
return iter(sorted(int(entry) for entry in os.listdir(self._path)
if entry.isdigit()))
def __contains__(self, key):
"""Return True if the keyed message exists, False otherwise."""
return os.path.exists(os.path.join(self._path, str(key)))
def __len__(self):
"""Return a count of messages in the mailbox."""
return len(list(self.iterkeys()))
def lock(self):
"""Lock the mailbox."""
if not self._locked:
self._file = open(os.path.join(self._path, '.mh_sequences'), 'rb+')
_lock_file(self._file)
self._locked = True
def unlock(self):
"""Unlock the mailbox if it is locked."""
if self._locked:
_unlock_file(self._file)
_sync_close(self._file)
del self._file
self._locked = False
def flush(self):
"""Write any pending changes to the disk."""
return
def close(self):
"""Flush and close the mailbox."""
if self._locked:
self.unlock()
def list_folders(self):
"""Return a list of folder names."""
result = []
for entry in os.listdir(self._path):
if os.path.isdir(os.path.join(self._path, entry)):
result.append(entry)
return result
def get_folder(self, folder):
"""Return an MH instance for the named folder."""
return MH(os.path.join(self._path, folder),
factory=self._factory, create=False)
def add_folder(self, folder):
"""Create a folder and return an MH instance representing it."""
return MH(os.path.join(self._path, folder),
factory=self._factory)
def remove_folder(self, folder):
"""Delete the named folder, which must be empty."""
path = os.path.join(self._path, folder)
entries = os.listdir(path)
if entries == ['.mh_sequences']:
os.remove(os.path.join(path, '.mh_sequences'))
elif entries == []:
pass
else:
raise NotEmptyError('Folder not empty: %s' % self._path)
os.rmdir(path)
def get_sequences(self):
"""Return a name-to-key-list dictionary to define each sequence."""
results = {}
with open(os.path.join(self._path, '.mh_sequences'), 'r', encoding='ASCII') as f:
all_keys = set(self.keys())
for line in f:
try:
name, contents = line.split(':')
keys = set()
for spec in contents.split():
if spec.isdigit():
keys.add(int(spec))
else:
start, stop = (int(x) for x in spec.split('-'))
keys.update(range(start, stop + 1))
results[name] = [key for key in sorted(keys) \
if key in all_keys]
if len(results[name]) == 0:
del results[name]
except ValueError:
raise FormatError('Invalid sequence specification: %s' %
line.rstrip())
return results
def set_sequences(self, sequences):
"""Set sequences using the given name-to-key-list dictionary."""
f = open(os.path.join(self._path, '.mh_sequences'), 'r+', encoding='ASCII')
try:
os.close(os.open(f.name, os.O_WRONLY | os.O_TRUNC))
for name, keys in sequences.items():
if len(keys) == 0:
continue
f.write(name + ':')
prev = None
completing = False
for key in sorted(set(keys)):
if key - 1 == prev:
if not completing:
completing = True
f.write('-')
elif completing:
completing = False
f.write('%s %s' % (prev, key))
else:
f.write(' %s' % key)
prev = key
if completing:
f.write(str(prev) + '\n')
else:
f.write('\n')
finally:
_sync_close(f)
def pack(self):
"""Re-name messages to eliminate numbering gaps. Invalidates keys."""
sequences = self.get_sequences()
prev = 0
changes = []
for key in self.iterkeys():
if key - 1 != prev:
changes.append((key, prev + 1))
if hasattr(os, 'link'):
os.link(os.path.join(self._path, str(key)),
os.path.join(self._path, str(prev + 1)))
os.unlink(os.path.join(self._path, str(key)))
else:
os.rename(os.path.join(self._path, str(key)),
os.path.join(self._path, str(prev + 1)))
prev += 1
self._next_key = prev + 1
if len(changes) == 0:
return
for name, key_list in sequences.items():
for old, new in changes:
if old in key_list:
key_list[key_list.index(old)] = new
self.set_sequences(sequences)
def _dump_sequences(self, message, key):
"""Inspect a new MHMessage and update sequences appropriately."""
pending_sequences = message.get_sequences()
all_sequences = self.get_sequences()
for name, key_list in all_sequences.items():
if name in pending_sequences:
key_list.append(key)
elif key in key_list:
del key_list[key_list.index(key)]
for sequence in pending_sequences:
if sequence not in all_sequences:
all_sequences[sequence] = [key]
self.set_sequences(all_sequences)
class Babyl(_singlefileMailbox):
"""An Rmail-style Babyl mailbox."""
_special_labels = frozenset({'unseen', 'deleted', 'filed', 'answered',
'forwarded', 'edited', 'resent'})
def __init__(self, path, factory=None, create=True):
"""Initialize a Babyl mailbox."""
_singlefileMailbox.__init__(self, path, factory, create)
self._labels = {}
def add(self, message):
"""Add message and return assigned key."""
key = _singlefileMailbox.add(self, message)
if isinstance(message, BabylMessage):
self._labels[key] = message.get_labels()
return key
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
_singlefileMailbox.remove(self, key)
if key in self._labels:
del self._labels[key]
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
_singlefileMailbox.__setitem__(self, key, message)
if isinstance(message, BabylMessage):
self._labels[key] = message.get_labels()
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
self._file.readline() # Skip b'1,' line specifying labels.
original_headers = io.BytesIO()
while True:
line = self._file.readline()
if line == b'*** EOOH ***' + linesep or not line:
break
original_headers.write(line.replace(linesep, b'\n'))
visible_headers = io.BytesIO()
while True:
line = self._file.readline()
if line == linesep or not line:
break
visible_headers.write(line.replace(linesep, b'\n'))
# Read up to the stop, or to the end
n = stop - self._file.tell()
assert n >= 0
body = self._file.read(n)
body = body.replace(linesep, b'\n')
msg = BabylMessage(original_headers.getvalue() + body)
msg.set_visible(visible_headers.getvalue())
if key in self._labels:
msg.set_labels(self._labels[key])
return msg
def get_bytes(self, key):
"""Return a string representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
self._file.readline() # Skip b'1,' line specifying labels.
original_headers = io.BytesIO()
while True:
line = self._file.readline()
if line == b'*** EOOH ***' + linesep or not line:
break
original_headers.write(line.replace(linesep, b'\n'))
while True:
line = self._file.readline()
if line == linesep or not line:
break
headers = original_headers.getvalue()
n = stop - self._file.tell()
assert n >= 0
data = self._file.read(n)
data = data.replace(linesep, b'\n')
return headers + data
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
return io.BytesIO(self.get_bytes(key).replace(b'\n', linesep))
def get_labels(self):
"""Return a list of user-defined labels in the mailbox."""
self._lookup()
labels = set()
for label_list in self._labels.values():
labels.update(label_list)
labels.difference_update(self._special_labels)
return list(labels)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
self._file.seek(0)
next_pos = 0
label_lists = []
while True:
line_pos = next_pos
line = self._file.readline()
next_pos = self._file.tell()
if line == b'\037\014' + linesep:
if len(stops) < len(starts):
stops.append(line_pos - len(linesep))
starts.append(next_pos)
labels = [label.strip() for label
in self._file.readline()[1:].split(b',')
if label.strip()]
label_lists.append(labels)
elif line == b'\037' or line == b'\037' + linesep:
if len(stops) < len(starts):
stops.append(line_pos - len(linesep))
elif not line:
stops.append(line_pos - len(linesep))
break
self._toc = dict(enumerate(zip(starts, stops)))
self._labels = dict(enumerate(label_lists))
self._next_key = len(self._toc)
self._file.seek(0, 2)
self._file_length = self._file.tell()
def _pre_mailbox_hook(self, f):
"""Called before writing the mailbox to file f."""
babyl = b'BABYL OPTIONS:' + linesep
babyl += b'Version: 5' + linesep
labels = self.get_labels()
labels = (label.encode() for label in labels)
babyl += b'Labels:' + b','.join(labels) + linesep
babyl += b'\037'
f.write(babyl)
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
f.write(b'\014' + linesep)
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
f.write(linesep + b'\037')
def _install_message(self, message):
"""Write message contents and return (start, stop)."""
start = self._file.tell()
if isinstance(message, BabylMessage):
special_labels = []
labels = []
for label in message.get_labels():
if label in self._special_labels:
special_labels.append(label)
else:
labels.append(label)
self._file.write(b'1')
for label in special_labels:
self._file.write(b', ' + label.encode())
self._file.write(b',,')
for label in labels:
self._file.write(b' ' + label.encode() + b',')
self._file.write(linesep)
else:
self._file.write(b'1,,' + linesep)
if isinstance(message, email.message.Message):
orig_buffer = io.BytesIO()
orig_generator = email.generator.BytesGenerator(orig_buffer, False, 0)
orig_generator.flatten(message)
orig_buffer.seek(0)
while True:
line = orig_buffer.readline()
self._file.write(line.replace(b'\n', linesep))
if line == b'\n' or not line:
break
self._file.write(b'*** EOOH ***' + linesep)
if isinstance(message, BabylMessage):
vis_buffer = io.BytesIO()
vis_generator = email.generator.BytesGenerator(vis_buffer, False, 0)
vis_generator.flatten(message.get_visible())
while True:
line = vis_buffer.readline()
self._file.write(line.replace(b'\n', linesep))
if line == b'\n' or not line:
break
else:
orig_buffer.seek(0)
while True:
line = orig_buffer.readline()
self._file.write(line.replace(b'\n', linesep))
if line == b'\n' or not line:
break
while True:
buffer = orig_buffer.read(4096) # Buffer size is arbitrary.
if not buffer:
break
self._file.write(buffer.replace(b'\n', linesep))
elif isinstance(message, (bytes, str, io.StringIO)):
if isinstance(message, io.StringIO):
warnings.warn("Use of StringIO input is deprecated, "
"use BytesIO instead", DeprecationWarning, 3)
message = message.getvalue()
if isinstance(message, str):
message = self._string_to_bytes(message)
body_start = message.find(b'\n\n') + 2
if body_start - 2 != -1:
self._file.write(message[:body_start].replace(b'\n', linesep))
self._file.write(b'*** EOOH ***' + linesep)
self._file.write(message[:body_start].replace(b'\n', linesep))
self._file.write(message[body_start:].replace(b'\n', linesep))
else:
self._file.write(b'*** EOOH ***' + linesep + linesep)
self._file.write(message.replace(b'\n', linesep))
elif hasattr(message, 'readline'):
if hasattr(message, 'buffer'):
warnings.warn("Use of text mode files is deprecated, "
"use a binary mode file instead", DeprecationWarning, 3)
message = message.buffer
original_pos = message.tell()
first_pass = True
while True:
line = message.readline()
# Universal newline support.
if line.endswith(b'\r\n'):
line = line[:-2] + b'\n'
elif line.endswith(b'\r'):
line = line[:-1] + b'\n'
self._file.write(line.replace(b'\n', linesep))
if line == b'\n' or not line:
if first_pass:
first_pass = False
self._file.write(b'*** EOOH ***' + linesep)
message.seek(original_pos)
else:
break
while True:
line = message.readline()
if not line:
break
# Universal newline support.
if line.endswith(b'\r\n'):
line = line[:-2] + linesep
elif line.endswith(b'\r'):
line = line[:-1] + linesep
elif line.endswith(b'\n'):
line = line[:-1] + linesep
self._file.write(line)
else:
raise TypeError('Invalid message type: %s' % type(message))
stop = self._file.tell()
return (start, stop)
class Message(email.message.Message):
"""Message with mailbox-format-specific properties."""
def __init__(self, message=None):
"""Initialize a Message instance."""
if isinstance(message, email.message.Message):
self._become_message(copy.deepcopy(message))
if isinstance(message, Message):
message._explain_to(self)
elif isinstance(message, bytes):
self._become_message(email.message_from_bytes(message))
elif isinstance(message, str):
self._become_message(email.message_from_string(message))
elif isinstance(message, io.TextIOWrapper):
self._become_message(email.message_from_file(message))
elif hasattr(message, "read"):
self._become_message(email.message_from_binary_file(message))
elif message is None:
email.message.Message.__init__(self)
else:
raise TypeError('Invalid message type: %s' % type(message))
def _become_message(self, message):
"""Assume the non-format-specific state of message."""
type_specific = getattr(message, '_type_specific_attributes', [])
for name in message.__dict__:
if name not in type_specific:
self.__dict__[name] = message.__dict__[name]
def _explain_to(self, message):
"""Copy format-specific state to message insofar as possible."""
if isinstance(message, Message):
return # There's nothing format-specific to explain.
else:
raise TypeError('Cannot convert to specified type')
class MaildirMessage(Message):
"""Message with Maildir-specific properties."""
_type_specific_attributes = ['_subdir', '_info', '_date']
def __init__(self, message=None):
"""Initialize a MaildirMessage instance."""
self._subdir = 'new'
self._info = ''
self._date = time.time()
Message.__init__(self, message)
def get_subdir(self):
"""Return 'new' or 'cur'."""
return self._subdir
def set_subdir(self, subdir):
"""Set subdir to 'new' or 'cur'."""
if subdir == 'new' or subdir == 'cur':
self._subdir = subdir
else:
raise ValueError("subdir must be 'new' or 'cur': %s" % subdir)
def get_flags(self):
"""Return as a string the flags that are set."""
if self._info.startswith('2,'):
return self._info[2:]
else:
return ''
def set_flags(self, flags):
"""Set the given flags and unset all others."""
self._info = '2,' + ''.join(sorted(flags))
def add_flag(self, flag):
"""Set the given flag(s) without changing others."""
self.set_flags(''.join(set(self.get_flags()) | set(flag)))
def remove_flag(self, flag):
"""Unset the given string flag(s) without changing others."""
if self.get_flags():
self.set_flags(''.join(set(self.get_flags()) - set(flag)))
def get_date(self):
"""Return delivery date of message, in seconds since the epoch."""
return self._date
def set_date(self, date):
"""Set delivery date of message, in seconds since the epoch."""
try:
self._date = float(date)
except ValueError:
raise TypeError("can't convert to float: %s" % date)
def get_info(self):
"""Get the message's "info" as a string."""
return self._info
def set_info(self, info):
"""Set the message's "info" string."""
if isinstance(info, str):
self._info = info
else:
raise TypeError('info must be a string: %s' % type(info))
def _explain_to(self, message):
"""Copy Maildir-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
message.set_flags(self.get_flags())
message.set_subdir(self.get_subdir())
message.set_date(self.get_date())
elif isinstance(message, _mboxMMDFMessage):
flags = set(self.get_flags())
if 'S' in flags:
message.add_flag('R')
if self.get_subdir() == 'cur':
message.add_flag('O')
if 'T' in flags:
message.add_flag('D')
if 'F' in flags:
message.add_flag('F')
if 'R' in flags:
message.add_flag('A')
message.set_from('MAILER-DAEMON', time.gmtime(self.get_date()))
elif isinstance(message, MHMessage):
flags = set(self.get_flags())
if 'S' not in flags:
message.add_sequence('unseen')
if 'R' in flags:
message.add_sequence('replied')
if 'F' in flags:
message.add_sequence('flagged')
elif isinstance(message, BabylMessage):
flags = set(self.get_flags())
if 'S' not in flags:
message.add_label('unseen')
if 'T' in flags:
message.add_label('deleted')
if 'R' in flags:
message.add_label('answered')
if 'P' in flags:
message.add_label('forwarded')
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class _mboxMMDFMessage(Message):
"""Message with mbox- or MMDF-specific properties."""
_type_specific_attributes = ['_from']
def __init__(self, message=None):
"""Initialize an mboxMMDFMessage instance."""
self.set_from('MAILER-DAEMON', True)
if isinstance(message, email.message.Message):
unixfrom = message.get_unixfrom()
if unixfrom is not None and unixfrom.startswith('From '):
self.set_from(unixfrom[5:])
Message.__init__(self, message)
def get_from(self):
"""Return contents of "From " line."""
return self._from
def set_from(self, from_, time_=None):
"""Set "From " line, formatting and appending time_ if specified."""
if time_ is not None:
if time_ is True:
time_ = time.gmtime()
from_ += ' ' + time.asctime(time_)
self._from = from_
def get_flags(self):
"""Return as a string the flags that are set."""
return self.get('Status', '') + self.get('X-Status', '')
def set_flags(self, flags):
"""Set the given flags and unset all others."""
flags = set(flags)
status_flags, xstatus_flags = '', ''
for flag in ('R', 'O'):
if flag in flags:
status_flags += flag
flags.remove(flag)
for flag in ('D', 'F', 'A'):
if flag in flags:
xstatus_flags += flag
flags.remove(flag)
xstatus_flags += ''.join(sorted(flags))
try:
self.replace_header('Status', status_flags)
except KeyError:
self.add_header('Status', status_flags)
try:
self.replace_header('X-Status', xstatus_flags)
except KeyError:
self.add_header('X-Status', xstatus_flags)
def add_flag(self, flag):
"""Set the given flag(s) without changing others."""
self.set_flags(''.join(set(self.get_flags()) | set(flag)))
def remove_flag(self, flag):
"""Unset the given string flag(s) without changing others."""
if 'Status' in self or 'X-Status' in self:
self.set_flags(''.join(set(self.get_flags()) - set(flag)))
def _explain_to(self, message):
"""Copy mbox- or MMDF-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
flags = set(self.get_flags())
if 'O' in flags:
message.set_subdir('cur')
if 'F' in flags:
message.add_flag('F')
if 'A' in flags:
message.add_flag('R')
if 'R' in flags:
message.add_flag('S')
if 'D' in flags:
message.add_flag('T')
del message['status']
del message['x-status']
maybe_date = ' '.join(self.get_from().split()[-5:])
try:
message.set_date(calendar.timegm(time.strptime(maybe_date,
'%a %b %d %H:%M:%S %Y')))
except (ValueError, OverflowError):
pass
elif isinstance(message, _mboxMMDFMessage):
message.set_flags(self.get_flags())
message.set_from(self.get_from())
elif isinstance(message, MHMessage):
flags = set(self.get_flags())
if 'R' not in flags:
message.add_sequence('unseen')
if 'A' in flags:
message.add_sequence('replied')
if 'F' in flags:
message.add_sequence('flagged')
del message['status']
del message['x-status']
elif isinstance(message, BabylMessage):
flags = set(self.get_flags())
if 'R' not in flags:
message.add_label('unseen')
if 'D' in flags:
message.add_label('deleted')
if 'A' in flags:
message.add_label('answered')
del message['status']
del message['x-status']
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class mboxMessage(_mboxMMDFMessage):
"""Message with mbox-specific properties."""
class MHMessage(Message):
"""Message with MH-specific properties."""
_type_specific_attributes = ['_sequences']
def __init__(self, message=None):
"""Initialize an MHMessage instance."""
self._sequences = []
Message.__init__(self, message)
def get_sequences(self):
"""Return a list of sequences that include the message."""
return self._sequences[:]
def set_sequences(self, sequences):
"""Set the list of sequences that include the message."""
self._sequences = list(sequences)
def add_sequence(self, sequence):
"""Add sequence to list of sequences including the message."""
if isinstance(sequence, str):
if not sequence in self._sequences:
self._sequences.append(sequence)
else:
raise TypeError('sequence type must be str: %s' % type(sequence))
def remove_sequence(self, sequence):
"""Remove sequence from the list of sequences including the message."""
try:
self._sequences.remove(sequence)
except ValueError:
pass
def _explain_to(self, message):
"""Copy MH-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
sequences = set(self.get_sequences())
if 'unseen' in sequences:
message.set_subdir('cur')
else:
message.set_subdir('cur')
message.add_flag('S')
if 'flagged' in sequences:
message.add_flag('F')
if 'replied' in sequences:
message.add_flag('R')
elif isinstance(message, _mboxMMDFMessage):
sequences = set(self.get_sequences())
if 'unseen' not in sequences:
message.add_flag('RO')
else:
message.add_flag('O')
if 'flagged' in sequences:
message.add_flag('F')
if 'replied' in sequences:
message.add_flag('A')
elif isinstance(message, MHMessage):
for sequence in self.get_sequences():
message.add_sequence(sequence)
elif isinstance(message, BabylMessage):
sequences = set(self.get_sequences())
if 'unseen' in sequences:
message.add_label('unseen')
if 'replied' in sequences:
message.add_label('answered')
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class BabylMessage(Message):
"""Message with Babyl-specific properties."""
_type_specific_attributes = ['_labels', '_visible']
def __init__(self, message=None):
"""Initialize a BabylMessage instance."""
self._labels = []
self._visible = Message()
Message.__init__(self, message)
def get_labels(self):
"""Return a list of labels on the message."""
return self._labels[:]
def set_labels(self, labels):
"""Set the list of labels on the message."""
self._labels = list(labels)
def add_label(self, label):
"""Add label to list of labels on the message."""
if isinstance(label, str):
if label not in self._labels:
self._labels.append(label)
else:
raise TypeError('label must be a string: %s' % type(label))
def remove_label(self, label):
"""Remove label from the list of labels on the message."""
try:
self._labels.remove(label)
except ValueError:
pass
def get_visible(self):
"""Return a Message representation of visible headers."""
return Message(self._visible)
def set_visible(self, visible):
"""Set the Message representation of visible headers."""
self._visible = Message(visible)
def update_visible(self):
"""Update and/or sensibly generate a set of visible headers."""
for header in self._visible.keys():
if header in self:
self._visible.replace_header(header, self[header])
else:
del self._visible[header]
for header in ('Date', 'From', 'Reply-To', 'To', 'CC', 'Subject'):
if header in self and header not in self._visible:
self._visible[header] = self[header]
def _explain_to(self, message):
"""Copy Babyl-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
labels = set(self.get_labels())
if 'unseen' in labels:
message.set_subdir('cur')
else:
message.set_subdir('cur')
message.add_flag('S')
if 'forwarded' in labels or 'resent' in labels:
message.add_flag('P')
if 'answered' in labels:
message.add_flag('R')
if 'deleted' in labels:
message.add_flag('T')
elif isinstance(message, _mboxMMDFMessage):
labels = set(self.get_labels())
if 'unseen' not in labels:
message.add_flag('RO')
else:
message.add_flag('O')
if 'deleted' in labels:
message.add_flag('D')
if 'answered' in labels:
message.add_flag('A')
elif isinstance(message, MHMessage):
labels = set(self.get_labels())
if 'unseen' in labels:
message.add_sequence('unseen')
if 'answered' in labels:
message.add_sequence('replied')
elif isinstance(message, BabylMessage):
message.set_visible(self.get_visible())
for label in self.get_labels():
message.add_label(label)
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class MMDFMessage(_mboxMMDFMessage):
"""Message with MMDF-specific properties."""
class _ProxyFile:
"""A read-only wrapper of a file."""
def __init__(self, f, pos=None):
"""Initialize a _ProxyFile."""
self._file = f
if pos is None:
self._pos = f.tell()
else:
self._pos = pos
def read(self, size=None):
"""Read bytes."""
return self._read(size, self._file.read)
def read1(self, size=None):
"""Read bytes."""
return self._read(size, self._file.read1)
def readline(self, size=None):
"""Read a line."""
return self._read(size, self._file.readline)
def readlines(self, sizehint=None):
"""Read multiple lines."""
result = []
for line in self:
result.append(line)
if sizehint is not None:
sizehint -= len(line)
if sizehint <= 0:
break
return result
def __iter__(self):
"""Iterate over lines."""
while True:
line = self.readline()
if not line:
return
yield line
def tell(self):
"""Return the position."""
return self._pos
def seek(self, offset, whence=0):
"""Change position."""
if whence == 1:
self._file.seek(self._pos)
self._file.seek(offset, whence)
self._pos = self._file.tell()
def close(self):
"""Close the file."""
if hasattr(self, '_file'):
try:
if hasattr(self._file, 'close'):
self._file.close()
finally:
del self._file
def _read(self, size, read_method):
"""Read size bytes using read_method."""
if size is None:
size = -1
self._file.seek(self._pos)
result = read_method(size)
self._pos = self._file.tell()
return result
def __enter__(self):
"""Context management protocol support."""
return self
def __exit__(self, *exc):
self.close()
def readable(self):
return self._file.readable()
def writable(self):
return self._file.writable()
def seekable(self):
return self._file.seekable()
def flush(self):
return self._file.flush()
@property
def closed(self):
if not hasattr(self, '_file'):
return True
if not hasattr(self._file, 'closed'):
return False
return self._file.closed
class _PartialFile(_ProxyFile):
"""A read-only wrapper of part of a file."""
def __init__(self, f, start=None, stop=None):
"""Initialize a _PartialFile."""
_ProxyFile.__init__(self, f, start)
self._start = start
self._stop = stop
def tell(self):
"""Return the position with respect to start."""
return _ProxyFile.tell(self) - self._start
def seek(self, offset, whence=0):
"""Change position, possibly with respect to start or stop."""
if whence == 0:
self._pos = self._start
whence = 1
elif whence == 2:
self._pos = self._stop
whence = 1
_ProxyFile.seek(self, offset, whence)
def _read(self, size, read_method):
"""Read size bytes using read_method, honoring start and stop."""
remaining = self._stop - self._pos
if remaining <= 0:
return b''
if size is None or size < 0 or size > remaining:
size = remaining
return _ProxyFile._read(self, size, read_method)
def close(self):
# do *not* close the underlying file object for partial files,
# since it's global to the mailbox object
if hasattr(self, '_file'):
del self._file
def _lock_file(f, dotlock=True):
"""Lock file f using lockf and dot locking."""
dotlock_done = False
try:
if fcntl:
try:
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError as e:
if e.errno in (errno.EAGAIN, errno.EACCES, errno.EROFS):
raise ExternalClashError('lockf: lock unavailable: %s' %
f.name)
else:
raise
if dotlock:
try:
pre_lock = _create_temporary(f.name + '.lock')
pre_lock.close()
except OSError as e:
if e.errno in (errno.EACCES, errno.EROFS):
return # Without write access, just skip dotlocking.
else:
raise
try:
if hasattr(os, 'link'):
os.link(pre_lock.name, f.name + '.lock')
dotlock_done = True
os.unlink(pre_lock.name)
else:
os.rename(pre_lock.name, f.name + '.lock')
dotlock_done = True
except FileExistsError:
os.remove(pre_lock.name)
raise ExternalClashError('dot lock unavailable: %s' %
f.name)
except:
if fcntl:
fcntl.lockf(f, fcntl.LOCK_UN)
if dotlock_done:
os.remove(f.name + '.lock')
raise
def _unlock_file(f):
"""Unlock file f using lockf and dot locking."""
if fcntl:
fcntl.lockf(f, fcntl.LOCK_UN)
if os.path.exists(f.name + '.lock'):
os.remove(f.name + '.lock')
def _create_carefully(path):
"""Create a file if it doesn't exist and open for reading and writing."""
fd = os.open(path, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0o666)
try:
return open(path, 'rb+')
finally:
os.close(fd)
def _create_temporary(path):
"""Create a temp file based on path and open for reading and writing."""
return _create_carefully('%s.%s.%s.%s' % (path, int(time.time()),
socket.gethostname(),
os.getpid()))
def _sync_flush(f):
"""Ensure changes to file f are physically on disk."""
f.flush()
if hasattr(os, 'fsync'):
os.fsync(f.fileno())
def _sync_close(f):
"""Close file f, ensuring all changes are physically on disk."""
_sync_flush(f)
f.close()
class Error(Exception):
"""Raised for module-specific errors."""
class NoSuchMailboxError(Error):
"""The specified mailbox does not exist and won't be created."""
class NotEmptyError(Error):
"""The specified mailbox is not empty and deletion was requested."""
class ExternalClashError(Error):
"""Another process caused an action to fail."""
class FormatError(Error):
"""A file appears to have an invalid format."""
| mpl-2.0 | 8,511,278,432,863,483,000 | 35.609711 | 89 | 0.528399 | false |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v3_1_0/collection_schedule_broker.py | 17 | 28107 | from ..broker import Broker
class CollectionScheduleBroker(Broker):
controller = "collection_schedules"
def index(self, **kwargs):
"""Lists the available collection schedules. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.2
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param device_group_id: The internal NetMRI identifier for device group.
:type device_group_id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param device_group_id: The internal NetMRI identifier for device group.
:type device_group_id: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, device_group_id, device_id, schedule, schedule_type.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each CollectionSchedule. Valid values are id, device_group_id, device_id, schedule, schedule_type. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return collection_schedules: An array of the CollectionSchedule objects that match the specified input criteria.
:rtype collection_schedules: Array of CollectionSchedule
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified collection schedule.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for schedule.
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return collection_schedule: The collection schedule identified by the specified id.
:rtype collection_schedule: CollectionSchedule
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def search(self, **kwargs):
"""Lists the available collection schedules matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param device_group_id: The internal NetMRI identifier for device group.
:type device_group_id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param device_group_id: The internal NetMRI identifier for device group.
:type device_group_id: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param device_id: The internal NetMRI identifier for device.
:type device_id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param device_id: The internal NetMRI identifier for device.
:type device_id: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for schedule.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for schedule.
:type id: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param schedule: Schedule name.
:type schedule: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param schedule: Schedule name.
:type schedule: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param schedule_type: The type of schedule.
:type schedule_type: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param schedule_type: The type of schedule.
:type schedule_type: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, device_group_id, device_id, schedule, schedule_type.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each CollectionSchedule. Valid values are id, device_group_id, device_id, schedule, schedule_type. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against collection schedules, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: device_group_id, device_id, id, schedule, schedule_type.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return collection_schedules: An array of the CollectionSchedule objects that match the specified input criteria.
:rtype collection_schedules: Array of CollectionSchedule
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available collection schedules matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: device_group_id, device_id, id, schedule, schedule_type.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_device_group_id: The operator to apply to the field device_group_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. device_group_id: The internal NetMRI identifier for device group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_device_group_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_device_group_id: If op_device_group_id is specified, the field named in this input will be compared to the value in device_group_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_device_group_id must be specified if op_device_group_id is specified.
:type val_f_device_group_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_device_group_id: If op_device_group_id is specified, this value will be compared to the value in device_group_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_device_group_id must be specified if op_device_group_id is specified.
:type val_c_device_group_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_device_id: The operator to apply to the field device_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. device_id: The internal NetMRI identifier for device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_device_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_device_id: If op_device_id is specified, the field named in this input will be compared to the value in device_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_device_id must be specified if op_device_id is specified.
:type val_f_device_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_device_id: If op_device_id is specified, this value will be compared to the value in device_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_device_id must be specified if op_device_id is specified.
:type val_c_device_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_id: The operator to apply to the field id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. id: The internal NetMRI identifier for schedule. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_id: If op_id is specified, the field named in this input will be compared to the value in id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_id must be specified if op_id is specified.
:type val_f_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_id: If op_id is specified, this value will be compared to the value in id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_id must be specified if op_id is specified.
:type val_c_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_schedule: The operator to apply to the field schedule. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. schedule: Schedule name. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_schedule: If op_schedule is specified, the field named in this input will be compared to the value in schedule using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_schedule must be specified if op_schedule is specified.
:type val_f_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_schedule: If op_schedule is specified, this value will be compared to the value in schedule using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_schedule must be specified if op_schedule is specified.
:type val_c_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_schedule_type: The operator to apply to the field schedule_type. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. schedule_type: The type of schedule. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_schedule_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_schedule_type: If op_schedule_type is specified, the field named in this input will be compared to the value in schedule_type using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_schedule_type must be specified if op_schedule_type is specified.
:type val_f_schedule_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_schedule_type: If op_schedule_type is specified, this value will be compared to the value in schedule_type using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_schedule_type must be specified if op_schedule_type is specified.
:type val_c_schedule_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, device_group_id, device_id, schedule, schedule_type.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each CollectionSchedule. Valid values are id, device_group_id, device_id, schedule, schedule_type. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return collection_schedules: An array of the CollectionSchedule objects that match the specified input criteria.
:rtype collection_schedules: Array of CollectionSchedule
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def destroy(self, **kwargs):
"""Deletes the specified collection schedule from NetMRI.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for schedule.
:type id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("destroy"), kwargs)
def update(self, **kwargs):
"""Updates a device group specific polling schedule.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: Id of the polling schedule to update.
:type id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param schedule_cron: Either the cron entry (cron schedule string), or a number that denotes the frequency (in minutes) polling should run.
:type schedule_cron: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param schedule_json: NetMRI internal parameters generated by 'cronscheduler' form transmitted in json format for setting cron schedule string.
:type schedule_json: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("update"), kwargs)
def create(self, **kwargs):
"""Creates device group specific polling schedule.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param device_group_id: Device group Id polling schedule belongs to.
:type device_group_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param device_id: Device Id polling schedule belongs to.
:type device_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param schedule_cron: Either the cron entry (cron schedule string), or a number that denotes the frequency (in minutes) polling should run.
:type schedule_cron: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param schedule_json: NetMRI internal parameters generated by 'cronscheduler' form transmitted in json format for setting cron schedule string.
:type schedule_json: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("create"), kwargs)
def spm_schedules(self, **kwargs):
"""Lists the Switch Port Management polling schedule entries for a device group.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param device_group_id: Device group ID for the requested polling schedules.
:type device_group_id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("spm_schedules"), kwargs)
| apache-2.0 | 5,527,004,766,665,639,000 | 44.85155 | 474 | 0.575479 | false |
pk-sam/crosswalk-test-suite | wrt/tct-rt02-wrt-tests/inst.apk.py | 903 | 3180 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARAMETERS = None
ADB_CMD = "adb"
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s uninstall org.xwalk.%s" % (
ADB_CMD, PARAMETERS.device, os.path.basename(os.path.splitext(file)[0]))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def instPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s install %s" % (ADB_CMD,
PARAMETERS.device, os.path.join(root, file))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.device:
(return_code, output) = doCMD("adb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
if not PARAMETERS.device:
print "No device found"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause | 1,567,336,760,014,492,700 | 28.719626 | 92 | 0.543396 | false |
Radagast-red/golem | golem/interface/client/environments.py | 1 | 1641 | from golem.interface.command import group, CommandHelper, Argument, command, CommandResult
@group(name="envs", help="Manage environments")
class Environments(object):
name = Argument('name', help="Environment name")
table_headers = ['name', 'supported', 'active', 'performance', 'description']
sort = Argument(
'--sort',
choices=table_headers,
optional=True,
default=None,
help="Sort environments"
)
@command(argument=sort, help="Show environments")
def show(self, sort):
deferred = Environments.client.get_environments_perf()
result = CommandHelper.wait_for(deferred) or []
values = []
for env in result:
values.append([
env['id'],
str(env['supported']),
str(env['active']),
str(env['performance']),
env['description']
])
return CommandResult.to_tabular(Environments.table_headers, values, sort=sort)
@command(argument=name, help="Enable environment")
def enable(self, name):
deferred = Environments.client.enable_environment(name)
return CommandHelper.wait_for(deferred)
@command(argument=name, help="Disable environment")
def disable(self, name):
deferred = Environments.client.disable_environment(name)
return CommandHelper.wait_for(deferred)
@command(argument=name, help="Recount performance for an environment")
def recount(self, name):
deferred = Environments.client.run_benchmark(name)
return CommandHelper.wait_for(deferred, timeout=1800)
| gpl-3.0 | -652,449,734,486,537,700 | 31.176471 | 90 | 0.632541 | false |
pymedusa/SickRage | ext/tornado/platform/auto.py | 4 | 1877 | #
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of platform-specific functionality.
For each function or class described in `tornado.platform.interface`,
the appropriate platform-specific implementation exists in this module.
Most code that needs access to this functionality should do e.g.::
from tornado.platform.auto import set_close_exec
"""
from __future__ import absolute_import, division, print_function
import os
if 'APPENGINE_RUNTIME' in os.environ:
from tornado.platform.common import Waker
def set_close_exec(fd):
pass
elif os.name == 'nt':
from tornado.platform.common import Waker
from tornado.platform.windows import set_close_exec
else:
from tornado.platform.posix import set_close_exec, Waker
try:
# monotime monkey-patches the time module to have a monotonic function
# in versions of python before 3.3.
import monotime
# Silence pyflakes warning about this unused import
monotime
except ImportError:
pass
try:
# monotonic can provide a monotonic function in versions of python before
# 3.3, too.
from monotonic import monotonic as monotonic_time
except ImportError:
try:
from time import monotonic as monotonic_time
except ImportError:
monotonic_time = None
__all__ = ['Waker', 'set_close_exec', 'monotonic_time']
| gpl-3.0 | 137,944,749,172,538,960 | 31.362069 | 77 | 0.741609 | false |
google/llvm-propeller | llvm/bindings/python/llvm/tests/test_object.py | 36 | 2201 | from numbers import Integral
from .base import TestBase
from ..object import ObjectFile
from ..object import Relocation
from ..object import Section
from ..object import Symbol
class TestObjectFile(TestBase):
def get_object_file(self):
source = self.get_test_binary()
return ObjectFile(filename=source)
def test_create_from_file(self):
self.get_object_file()
def test_get_sections(self):
o = self.get_object_file()
count = 0
for section in o.get_sections():
count += 1
assert isinstance(section, Section)
assert isinstance(section.name, str)
assert isinstance(section.size, Integral)
assert isinstance(section.contents, str)
assert isinstance(section.address, Integral)
assert len(section.contents) == section.size
self.assertGreater(count, 0)
for section in o.get_sections():
section.cache()
def test_get_symbols(self):
o = self.get_object_file()
count = 0
for symbol in o.get_symbols():
count += 1
assert isinstance(symbol, Symbol)
assert isinstance(symbol.name, str)
assert isinstance(symbol.address, Integral)
assert isinstance(symbol.size, Integral)
self.assertGreater(count, 0)
for symbol in o.get_symbols():
symbol.cache()
def test_symbol_section_accessor(self):
o = self.get_object_file()
for symbol in o.get_symbols():
section = symbol.section
assert isinstance(section, Section)
break
def test_get_relocations(self):
o = self.get_object_file()
for section in o.get_sections():
for relocation in section.get_relocations():
assert isinstance(relocation, Relocation)
assert isinstance(relocation.address, Integral)
assert isinstance(relocation.offset, Integral)
assert isinstance(relocation.type_number, Integral)
assert isinstance(relocation.type_name, str)
assert isinstance(relocation.value_string, str)
| apache-2.0 | 717,577,544,563,070,100 | 30.898551 | 67 | 0.612449 | false |
wavewave/madgraph-auto-model | modelrepo/ADMXQLD333/particles.py | 8 | 24125 | # This file was automatically created by FeynRules $Revision: 510 $
# Mathematica version: 7.0 for Linux x86 (64-bit) (February 18, 2009)
# Date: Wed 2 Mar 2011 15:09:55
from __future__ import division
from object_library import all_particles, Particle
import parameters as Param
a = Particle(pdg_code = 22,
name = 'a',
antiname = 'a',
spin = 3,
color = 1,
mass = Param.ZERO,
width = Param.ZERO,
texname = 'a',
antitexname = 'a',
charge = 0,
Y = 0,
GhostNumber = 0)
Z = Particle(pdg_code = 23,
name = 'Z',
antiname = 'Z',
spin = 3,
color = 1,
mass = Param.MZ,
width = Param.WZ,
texname = 'Z',
antitexname = 'Z',
charge = 0,
Y = 0,
GhostNumber = 0)
W__plus__ = Particle(pdg_code = 24,
name = 'W+',
antiname = 'W-',
spin = 3,
color = 1,
mass = Param.MW,
width = Param.WW,
texname = 'W+',
antitexname = 'W+',
charge = 1,
Y = 0,
GhostNumber = 0)
W__minus__ = W__plus__.anti()
g = Particle(pdg_code = 21,
name = 'g',
antiname = 'g',
spin = 3,
color = 8,
mass = Param.ZERO,
width = Param.ZERO,
texname = 'g',
antitexname = 'g',
charge = 0,
Y = 0,
GhostNumber = 0)
n1 = Particle(pdg_code = 1000022,
name = 'n1',
antiname = 'n1',
spin = 2,
color = 1,
mass = Param.Mneu1,
width = Param.Wneu1,
texname = 'n1',
antitexname = 'n1',
charge = 0,
Y = 0,
GhostNumber = 0)
n2 = Particle(pdg_code = 1000023,
name = 'n2',
antiname = 'n2',
spin = 2,
color = 1,
mass = Param.Mneu2,
width = Param.Wneu2,
texname = 'n2',
antitexname = 'n2',
charge = 0,
Y = 0,
GhostNumber = 0)
n3 = Particle(pdg_code = 1000025,
name = 'n3',
antiname = 'n3',
spin = 2,
color = 1,
mass = Param.Mneu3,
width = Param.Wneu3,
texname = 'n3',
antitexname = 'n3',
charge = 0,
Y = 0,
GhostNumber = 0)
n4 = Particle(pdg_code = 1000035,
name = 'n4',
antiname = 'n4',
spin = 2,
color = 1,
mass = Param.Mneu4,
width = Param.Wneu4,
texname = 'n4',
antitexname = 'n4',
charge = 0,
Y = 0,
GhostNumber = 0)
x1__plus__ = Particle(pdg_code = 1000024,
name = 'x1+',
antiname = 'x1-',
spin = 2,
color = 1,
mass = Param.Mch1,
width = Param.Wch1,
texname = 'x1+',
antitexname = 'x1+',
charge = 1,
Y = 0,
GhostNumber = 0)
x1__minus__ = x1__plus__.anti()
x2__plus__ = Particle(pdg_code = 1000037,
name = 'x2+',
antiname = 'x2-',
spin = 2,
color = 1,
mass = Param.Mch2,
width = Param.Wch2,
texname = 'x2+',
antitexname = 'x2+',
charge = 1,
Y = 0,
GhostNumber = 0)
x2__minus__ = x2__plus__.anti()
go = Particle(pdg_code = 1000021,
name = 'go',
antiname = 'go',
spin = 2,
color = 8,
mass = Param.Mgo,
width = Param.Wgo,
texname = 'go',
antitexname = 'go',
charge = 0,
Y = 0,
GhostNumber = 0)
h01 = Particle(pdg_code = 25,
name = 'h1',
antiname = 'h1',
spin = 1,
color = 1,
mass = Param.MH01,
width = Param.WH01,
texname = 'h1',
antitexname = 'h1',
charge = 0,
Y = 0,
GhostNumber = 0)
h02 = Particle(pdg_code = 35,
name = 'h02',
antiname = 'h02',
spin = 1,
color = 1,
mass = Param.MH02,
width = Param.WH02,
texname = 'h02',
antitexname = 'h02',
charge = 0,
Y = 0,
GhostNumber = 0)
A0 = Particle(pdg_code = 36,
name = 'A0',
antiname = 'A0',
spin = 1,
color = 1,
mass = Param.MA0,
width = Param.WA0,
texname = 'A0',
antitexname = 'A0',
charge = 0,
Y = 0,
GhostNumber = 0)
H__plus__ = Particle(pdg_code = 37,
name = 'H+',
antiname = 'H-',
spin = 1,
color = 1,
mass = Param.MH,
width = Param.WH,
texname = 'H+',
antitexname = 'H+',
charge = 1,
Y = 0,
GhostNumber = 0)
H__minus__ = H__plus__.anti()
G0 = Particle(pdg_code = 250,
name = 'G0',
antiname = 'G0',
spin = 1,
color = 1,
mass = Param.MZ,
width = Param.ZERO,
texname = 'G0',
antitexname = 'G0',
GoldstoneBoson = True,
charge = 0,
Y = 0,
GhostNumber = 0)
G__plus__ = Particle(pdg_code = 251,
name = 'G+',
antiname = 'G-',
spin = 1,
color = 1,
mass = Param.MW,
width = Param.ZERO,
texname = 'G+',
antitexname = 'G+',
GoldstoneBoson = True,
charge = 1,
Y = 0,
GhostNumber = 0)
G__minus__ = G__plus__.anti()
ve = Particle(pdg_code = 12,
name = 've',
antiname = 've~',
spin = 2,
color = 1,
mass = Param.Mve,
width = Param.ZERO,
texname = 've',
antitexname = 've',
charge = 0,
Y = 0,
GhostNumber = 0)
ve__tilde__ = ve.anti()
vm = Particle(pdg_code = 14,
name = 'vm',
antiname = 'vm~',
spin = 2,
color = 1,
mass = Param.Mvm,
width = Param.ZERO,
texname = 'vm',
antitexname = 'vm',
charge = 0,
Y = 0,
GhostNumber = 0)
vm__tilde__ = vm.anti()
vt = Particle(pdg_code = 16,
name = 'vt',
antiname = 'vt~',
spin = 2,
color = 1,
mass = Param.Mvt,
width = Param.ZERO,
texname = 'vt',
antitexname = 'vt',
charge = 0,
Y = 0,
GhostNumber = 0)
vt__tilde__ = vt.anti()
e__minus__ = Particle(pdg_code = 11,
name = 'e-',
antiname = 'e+',
spin = 2,
color = 1,
mass = Param.Me,
width = Param.ZERO,
texname = 'e-',
antitexname = 'e-',
charge = -1,
Y = 0,
GhostNumber = 0)
e__plus__ = e__minus__.anti()
mu__minus__ = Particle(pdg_code = 13,
name = 'mu-',
antiname = 'mu+',
spin = 2,
color = 1,
mass = Param.Mm,
width = Param.ZERO,
texname = 'mu-',
antitexname = 'mu-',
charge = -1,
Y = 0,
GhostNumber = 0)
mu__plus__ = mu__minus__.anti()
tau__minus__ = Particle(pdg_code = 15,
name = 'tau-',
antiname = 'tau+',
spin = 2,
color = 1,
mass = Param.Mta,
width = Param.ZERO,
texname = 'tau-',
antitexname = 'tau-',
charge = -1,
Y = 0,
GhostNumber = 0)
tau__plus__ = tau__minus__.anti()
u = Particle(pdg_code = 2,
name = 'u',
antiname = 'u~',
spin = 2,
color = 3,
mass = Param.MU,
width = Param.ZERO,
texname = 'u',
antitexname = 'u',
charge = 2/3,
Y = 0,
GhostNumber = 0)
u__tilde__ = u.anti()
c = Particle(pdg_code = 4,
name = 'c',
antiname = 'c~',
spin = 2,
color = 3,
mass = Param.MC,
width = Param.ZERO,
texname = 'c',
antitexname = 'c',
charge = 2/3,
Y = 0,
GhostNumber = 0)
c__tilde__ = c.anti()
t = Particle(pdg_code = 6,
name = 't',
antiname = 't~',
spin = 2,
color = 3,
mass = Param.MT,
width = Param.WT,
texname = 't',
antitexname = 't',
charge = 2/3,
Y = 0,
GhostNumber = 0)
t__tilde__ = t.anti()
d = Particle(pdg_code = 1,
name = 'd',
antiname = 'd~',
spin = 2,
color = 3,
mass = Param.MD,
width = Param.ZERO,
texname = 'd',
antitexname = 'd',
charge = -1/3,
Y = 0,
GhostNumber = 0)
d__tilde__ = d.anti()
s = Particle(pdg_code = 3,
name = 's',
antiname = 's~',
spin = 2,
color = 3,
mass = Param.MS,
width = Param.ZERO,
texname = 's',
antitexname = 's',
charge = -1/3,
Y = 0,
GhostNumber = 0)
s__tilde__ = s.anti()
b = Particle(pdg_code = 5,
name = 'b',
antiname = 'b~',
spin = 2,
color = 3,
mass = Param.MB,
width = Param.ZERO,
texname = 'b',
antitexname = 'b',
charge = -1/3,
Y = 0,
GhostNumber = 0)
b__tilde__ = b.anti()
sv1 = Particle(pdg_code = 1000012,
name = 'sv1',
antiname = 'sv1~',
spin = 1,
color = 1,
mass = Param.Msn1,
width = Param.Wsn1,
texname = 'sv1',
antitexname = 'sv1',
charge = 0,
Y = 0,
GhostNumber = 0)
sv1__tilde__ = sv1.anti()
sv2 = Particle(pdg_code = 1000014,
name = 'sv2',
antiname = 'sv2~',
spin = 1,
color = 1,
mass = Param.Msn2,
width = Param.Wsn2,
texname = 'sv2',
antitexname = 'sv2',
charge = 0,
Y = 0,
GhostNumber = 0)
sv2__tilde__ = sv2.anti()
sv3 = Particle(pdg_code = 1000016,
name = 'sv3',
antiname = 'sv3~',
spin = 1,
color = 1,
mass = Param.Msn3,
width = Param.Wsn3,
texname = 'sv3',
antitexname = 'sv3',
charge = 0,
Y = 0,
GhostNumber = 0)
sv3__tilde__ = sv3.anti()
sl1__minus__ = Particle(pdg_code = 1000011,
name = 'sl1-',
antiname = 'sl1+',
spin = 1,
color = 1,
mass = Param.Msl1,
width = Param.Wsl1,
texname = 'sl1-',
antitexname = 'sl1-',
charge = -1,
Y = 0,
GhostNumber = 0)
sl1__plus__ = sl1__minus__.anti()
sl2__minus__ = Particle(pdg_code = 1000013,
name = 'sl2-',
antiname = 'sl2+',
spin = 1,
color = 1,
mass = Param.Msl2,
width = Param.Wsl2,
texname = 'sl2-',
antitexname = 'sl2-',
charge = -1,
Y = 0,
GhostNumber = 0)
sl2__plus__ = sl2__minus__.anti()
sl3__minus__ = Particle(pdg_code = 1000015,
name = 'sl3-',
antiname = 'sl3+',
spin = 1,
color = 1,
mass = Param.Msl3,
width = Param.Wsl3,
texname = 'sl3-',
antitexname = 'sl3-',
charge = -1,
Y = 0,
GhostNumber = 0)
sl3__plus__ = sl3__minus__.anti()
sl4__minus__ = Particle(pdg_code = 2000011,
name = 'sl4-',
antiname = 'sl4+',
spin = 1,
color = 1,
mass = Param.Msl4,
width = Param.Wsl4,
texname = 'sl4-',
antitexname = 'sl4-',
charge = -1,
Y = 0,
GhostNumber = 0)
sl4__plus__ = sl4__minus__.anti()
sl5__minus__ = Particle(pdg_code = 2000013,
name = 'sl5-',
antiname = 'sl5+',
spin = 1,
color = 1,
mass = Param.Msl5,
width = Param.Wsl5,
texname = 'sl5-',
antitexname = 'sl5-',
charge = -1,
Y = 0,
GhostNumber = 0)
sl5__plus__ = sl5__minus__.anti()
sl6__minus__ = Particle(pdg_code = 2000015,
name = 'sl6-',
antiname = 'sl6+',
spin = 1,
color = 1,
mass = Param.Msl6,
width = Param.Wsl6,
texname = 'sl6-',
antitexname = 'sl6-',
charge = -1,
Y = 0,
GhostNumber = 0)
sl6__plus__ = sl6__minus__.anti()
su1 = Particle(pdg_code = 1000002,
name = 'su1',
antiname = 'su1~',
spin = 1,
color = 3,
mass = Param.Msu1,
width = Param.Wsu1,
texname = 'su1',
antitexname = 'su1',
charge = 2/3,
Y = 0,
GhostNumber = 0)
su1__tilde__ = su1.anti()
su2 = Particle(pdg_code = 1000004,
name = 'su2',
antiname = 'su2~',
spin = 1,
color = 3,
mass = Param.Msu2,
width = Param.Wsu2,
texname = 'su2',
antitexname = 'su2',
charge = 2/3,
Y = 0,
GhostNumber = 0)
su2__tilde__ = su2.anti()
su3 = Particle(pdg_code = 1000006,
name = 'su3',
antiname = 'su3~',
spin = 1,
color = 3,
mass = Param.Msu3,
width = Param.Wsu3,
texname = 'su3',
antitexname = 'su3',
charge = 2/3,
Y = 0,
GhostNumber = 0)
su3__tilde__ = su3.anti()
su4 = Particle(pdg_code = 2000002,
name = 'su4',
antiname = 'su4~',
spin = 1,
color = 3,
mass = Param.Msu4,
width = Param.Wsu4,
texname = 'su4',
antitexname = 'su4',
charge = 2/3,
Y = 0,
GhostNumber = 0)
su4__tilde__ = su4.anti()
su5 = Particle(pdg_code = 2000004,
name = 'su5',
antiname = 'su5~',
spin = 1,
color = 3,
mass = Param.Msu5,
width = Param.Wsu5,
texname = 'su5',
antitexname = 'su5',
charge = 2/3,
Y = 0,
GhostNumber = 0)
su5__tilde__ = su5.anti()
su6 = Particle(pdg_code = 2000006,
name = 'su6',
antiname = 'su6~',
spin = 1,
color = 3,
mass = Param.Msu6,
width = Param.Wsu6,
texname = 'su6',
antitexname = 'su6',
charge = 2/3,
Y = 0,
GhostNumber = 0)
su6__tilde__ = su6.anti()
sd1 = Particle(pdg_code = 1000001,
name = 'sd1',
antiname = 'sd1~',
spin = 1,
color = 3,
mass = Param.Msd1,
width = Param.Wsd1,
texname = 'sd1',
antitexname = 'sd1',
charge = -1/3,
Y = 0,
GhostNumber = 0)
sd1__tilde__ = sd1.anti()
sd2 = Particle(pdg_code = 1000003,
name = 'sd2',
antiname = 'sd2~',
spin = 1,
color = 3,
mass = Param.Msd2,
width = Param.Wsd2,
texname = 'sd2',
antitexname = 'sd2',
charge = -1/3,
Y = 0,
GhostNumber = 0)
sd2__tilde__ = sd2.anti()
sd3 = Particle(pdg_code = 1000005,
name = 'sd3',
antiname = 'sd3~',
spin = 1,
color = 3,
mass = Param.Msd3,
width = Param.Wsd3,
texname = 'sd3',
antitexname = 'sd3',
charge = -1/3,
Y = 0,
GhostNumber = 0)
sd3__tilde__ = sd3.anti()
sd4 = Particle(pdg_code = 2000001,
name = 'sd4',
antiname = 'sd4~',
spin = 1,
color = 3,
mass = Param.Msd4,
width = Param.Wsd4,
texname = 'sd4',
antitexname = 'sd4',
charge = -1/3,
Y = 0,
GhostNumber = 0)
sd4__tilde__ = sd4.anti()
sd5 = Particle(pdg_code = 2000003,
name = 'sd5',
antiname = 'sd5~',
spin = 1,
color = 3,
mass = Param.Msd5,
width = Param.Wsd5,
texname = 'sd5',
antitexname = 'sd5',
charge = -1/3,
Y = 0,
GhostNumber = 0)
sd5__tilde__ = sd5.anti()
sd6 = Particle(pdg_code = 2000005,
name = 'sd6',
antiname = 'sd6~',
spin = 1,
color = 3,
mass = Param.Msd6,
width = Param.Wsd6,
texname = 'sd6',
antitexname = 'sd6',
charge = -1/3,
Y = 0,
GhostNumber = 0)
sd6__tilde__ = sd6.anti()
ghG = Particle(pdg_code = 9000001,
name = 'ghG',
antiname = 'ghG~',
spin = -1,
color = 8,
mass = Param.ZERO,
width = Param.ZERO,
texname = 'ghG',
antitexname = 'ghG',
charge = 0,
Y = 0,
GhostNumber = 1)
ghG__tilde__ = ghG.anti()
ghA = Particle(pdg_code = 9000002,
name = 'ghA',
antiname = 'ghA~',
spin = -1,
color = 1,
mass = Param.ZERO,
width = Param.ZERO,
texname = 'ghA',
antitexname = 'ghA',
charge = 0,
Y = 0,
GhostNumber = 1)
ghA__tilde__ = ghA.anti()
ghZ = Particle(pdg_code = 9000003,
name = 'ghZ',
antiname = 'ghZ~',
spin = -1,
color = 1,
mass = Param.MZ,
width = Param.WZ,
texname = 'ghZ',
antitexname = 'ghZ',
charge = 0,
Y = 0,
GhostNumber = 1)
ghZ__tilde__ = ghZ.anti()
ghWp = Particle(pdg_code = 9000004,
name = 'ghWp',
antiname = 'ghWp~',
spin = -1,
color = 1,
mass = Param.MW,
width = Param.WW,
texname = 'ghWp',
antitexname = 'ghWp',
charge = 1,
Y = 0,
GhostNumber = 1)
ghWp__tilde__ = ghWp.anti()
ghWm = Particle(pdg_code = 9000005,
name = 'ghWm',
antiname = 'ghWm~',
spin = -1,
color = 1,
mass = Param.MW,
width = Param.WW,
texname = 'ghWm',
antitexname = 'ghWm',
charge = -1,
Y = 0,
GhostNumber = 1)
ghWm__tilde__ = ghWm.anti()
#
# added for ADM
#
# by Deshpreet Singh Bedi and Ian-Woo Kim
# date: Apr 6, 2012
# updated: May 16, 2012
#
# fermion
xx = Particle( pdg_code = 9000101,
name = 'xx',
antiname = 'xx~',
spin = 2,
color = 1,
mass = Param.mxx, # we should change
width = Param.ZERO, # Param.wxx, # we should change
texname = 'xx',
antitexname = 'xx',
charge = 0,
Y = 0,
GhostNumber = 0)
xx__tilde__ = xx.anti()
# scalar
sxxp = Particle(pdg_code = 9000201,
name = 'sxxp',
antiname = 'sxxp~',
spin = 1,
color = 1,
mass = Param.msxxp, # we should change
width = Param.ZERO, # we should change
texname = 'sxp',
antitexname = 's\\tilde{xp}',
charge = 0,
Y = 0,
GhostNumber = 0)
sxxp__tilde__ = sxxp.anti()
sxxn = Particle(pdg_code = 9000202,
name = 'sxxn',
antiname = 'sxxn~',
spin = 1,
color = 1,
mass = Param.msxxn, # we should change
width = Param.ZERO, # we should change
texname = 'sxn',
antitexname = 's\\tilde{xn}',
charge = 0,
Y = 0,
GhostNumber = 0)
sxxn__tilde__ = sxxn.anti()
| bsd-2-clause | -3,648,602,252,764,644,400 | 26.571429 | 69 | 0.339067 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.