text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
iterate on all members and check if any has joined the replica
<END_TASK>
<USER_TASK:>
Description:
def is_replicaset_initialized(self):
"""
iterate on all members and check if any has joined the replica
""" |
# it's possible isMaster returns an "incomplete" result if we
# query a replica set member while it's loading the replica set config
# https://jira.mongodb.org/browse/SERVER-13458
# let's try to detect this state before proceeding
# seems like if the "secondary" field is present, but "setName" isn't,
# it's a good indicator that we just need to wait a bit
# add an uptime check in for good measure
for member in self.get_members():
server = member.get_server()
if server.has_joined_replica():
return True
return False |
<SYSTEM_TASK:>
Attempts to find an id for member_conf where fom current members confs
<END_TASK>
<USER_TASK:>
Description:
def match_member_id(self, member_conf, current_member_confs):
"""
Attempts to find an id for member_conf where fom current members confs
there exists a element.
Returns the id of an element of current confs
WHERE member_conf.host and element.host are EQUAL or map to same host
""" |
if current_member_confs is None:
return None
for curr_mem_conf in current_member_confs:
if is_same_address(member_conf['host'], curr_mem_conf['host']):
return curr_mem_conf['_id']
return None |
<SYSTEM_TASK:>
Returns the distribution info
<END_TASK>
<USER_TASK:>
Description:
def get_os_dist_info():
"""
Returns the distribution info
""" |
distribution = platform.dist()
dist_name = distribution[0].lower()
dist_version_str = distribution[1]
if dist_name and dist_version_str:
return dist_name, dist_version_str
else:
return None, None |
<SYSTEM_TASK:>
Gets mongo version of the server if it is running. Otherwise return
<END_TASK>
<USER_TASK:>
Description:
def get_mongo_version(self):
"""
Gets mongo version of the server if it is running. Otherwise return
version configured in mongoVersion property
""" |
if self._mongo_version:
return self._mongo_version
mongo_version = self.read_current_mongo_version()
if not mongo_version:
mongo_version = self.get_configured_mongo_version()
self._mongo_version = mongo_version
return self._mongo_version |
<SYSTEM_TASK:>
issues a buildinfo command
<END_TASK>
<USER_TASK:>
Description:
def get_server_build_info(self):
"""
issues a buildinfo command
""" |
if self.is_online():
try:
return self.get_mongo_client().server_info()
except OperationFailure, ofe:
log_exception(ofe)
if "there are no users authenticated" in str(ofe):
# this is a pymongo 3.6.1 regression where the buildinfo command fails on non authenticated client
# fall-back to an authenticated client
admin_db = self.get_db("admin", no_auth=False)
return admin_db.command("buildinfo")
except Exception, e:
log_exception(e)
return None |
<SYSTEM_TASK:>
Returns True if we manage to auth to the given db, else False.
<END_TASK>
<USER_TASK:>
Description:
def authenticate_db(self, db, dbname, retry=True):
"""
Returns True if we manage to auth to the given db, else False.
""" |
log_verbose("Server '%s' attempting to authenticate to db '%s'" % (self.id, dbname))
login_user = self.get_login_user(dbname)
username = None
password = None
auth_success = False
if login_user:
username = login_user["username"]
if "password" in login_user:
password = login_user["password"]
# have three attempts to authenticate
no_tries = 0
while not auth_success and no_tries < 3:
if not username:
username = read_username(dbname)
if not password:
password = self.lookup_password(dbname, username)
if not password:
password = read_password("Enter password for user '%s\%s'"%
(dbname, username))
# if auth success then exit loop and memoize login
try:
auth_success = db.authenticate(username, password)
log_verbose("Authentication attempt #%s to db '%s' result: %s" % (no_tries, dbname, auth_success))
except OperationFailure, ofe:
if "auth fails" in str(ofe):
auth_success = False
if auth_success or not retry:
break
else:
log_error("Invalid login!")
username = None
password = None
no_tries += 1
if auth_success:
self.set_login_user(dbname, username, password)
log_verbose("Authentication Succeeded!")
else:
log_verbose("Authentication failed")
return auth_success |
<SYSTEM_TASK:>
We need a repl key if you are auth + a cluster member +
<END_TASK>
<USER_TASK:>
Description:
def needs_repl_key(self):
"""
We need a repl key if you are auth + a cluster member +
version is None or >= 2.0.0
""" |
cluster = self.get_cluster()
return (self.supports_repl_key() and
cluster is not None and cluster.get_repl_key() is not None) |
<SYSTEM_TASK:>
IF there is an exact match then use it
<END_TASK>
<USER_TASK:>
Description:
def exact_or_minor_exe_version_match(executable_name,
exe_version_tuples,
version):
"""
IF there is an exact match then use it
OTHERWISE try to find a minor version match
""" |
exe = exact_exe_version_match(executable_name,
exe_version_tuples,
version)
if not exe:
exe = minor_exe_version_match(executable_name,
exe_version_tuples,
version)
return exe |
<SYSTEM_TASK:>
Pause for this many seconds
<END_TASK>
<USER_TASK:>
Description:
def seconds(num):
"""
Pause for this many seconds
""" |
now = pytime.time()
end = now + num
until(end) |
<SYSTEM_TASK:>
Does necessary work before starting a server
<END_TASK>
<USER_TASK:>
Description:
def _pre_mongod_server_start(server, options_override=None):
"""
Does necessary work before starting a server
1- An efficiency step for arbiters running with --no-journal
* there is a lock file ==>
* server must not have exited cleanly from last run, and does not know
how to auto-recover (as a journalled server would)
* however: this is an arbiter, therefore
* there is no need to repair data files in any way ==>
* i can rm this lockfile and start my server
""" |
lock_file_path = server.get_lock_file_path()
no_journal = (server.get_cmd_option("nojournal") or
(options_override and "nojournal" in options_override))
if (os.path.exists(lock_file_path) and
server.is_arbiter_server() and
no_journal):
log_warning("WARNING: Detected a lock file ('%s') for your server '%s'"
" ; since this server is an arbiter, there is no need for"
" repair or other action. Deleting mongod.lock and"
" proceeding..." % (lock_file_path, server.id))
try:
os.remove(lock_file_path)
except Exception, e:
log_exception(e)
raise MongoctlException("Error while trying to delete '%s'. "
"Cause: %s" % (lock_file_path, e)) |
<SYSTEM_TASK:>
Contains post start server operations
<END_TASK>
<USER_TASK:>
Description:
def prepare_mongod_server(server):
"""
Contains post start server operations
""" |
log_info("Preparing server '%s' for use as configured..." %
server.id)
cluster = server.get_cluster()
# setup the local users if server supports that
if server.supports_local_users():
users.setup_server_local_users(server)
if not server.is_cluster_member() or server.is_standalone_config_server():
users.setup_server_users(server)
if cluster and server.is_primary():
users.setup_cluster_users(cluster, server) |
<SYSTEM_TASK:>
Returns the more stringent rlimit value. -1 means no limit.
<END_TASK>
<USER_TASK:>
Description:
def _rlimit_min(one_val, nother_val):
"""Returns the more stringent rlimit value. -1 means no limit.""" |
if one_val < 0 or nother_val < 0 :
return max(one_val, nother_val)
else:
return min(one_val, nother_val) |
<SYSTEM_TASK:>
Converts a NetJSON 'NetworkGraph' object
<END_TASK>
<USER_TASK:>
Description:
def parse(self, data):
"""
Converts a NetJSON 'NetworkGraph' object
to a NetworkX Graph object,which is then returned.
Additionally checks for protocol version, revision and metric.
""" |
graph = self._init_graph()
# ensure is NetJSON NetworkGraph object
if 'type' not in data or data['type'] != 'NetworkGraph':
raise ParserError('Parse error, not a NetworkGraph object')
# ensure required keys are present
required_keys = ['protocol', 'version', 'metric', 'nodes', 'links']
for key in required_keys:
if key not in data:
raise ParserError('Parse error, "{0}" key not found'.format(key))
# store metadata
self.protocol = data['protocol']
self.version = data['version']
self.revision = data.get('revision') # optional
self.metric = data['metric']
# create graph
for node in data['nodes']:
graph.add_node(node['id'],
label=node['label'] if 'label' in node else None,
local_addresses=node.get('local_addresses', []),
**node.get('properties', {}))
for link in data['links']:
try:
source = link["source"]
dest = link["target"]
cost = link["cost"]
except KeyError as e:
raise ParserError('Parse error, "%s" key not found' % e)
properties = link.get('properties', {})
graph.add_edge(source, dest, weight=cost, **properties)
return graph |
<SYSTEM_TASK:>
Converts a OpenVPN JSON to a NetworkX Graph object
<END_TASK>
<USER_TASK:>
Description:
def parse(self, data):
"""
Converts a OpenVPN JSON to a NetworkX Graph object
which is then returned.
""" |
# initialize graph and list of aggregated nodes
graph = self._init_graph()
server = self._server_common_name
# add server (central node) to graph
graph.add_node(server)
# data may be empty
if data is None:
clients = []
links = []
else:
clients = data.client_list.values()
links = data.routing_table.values()
# add clients in graph as nodes
for client in clients:
if client.common_name == 'UNDEF':
continue
client_properties = {
'label': client.common_name,
'real_address': str(client.real_address.host),
'port': int(client.real_address.port),
'connected_since': client.connected_since.strftime('%Y-%m-%dT%H:%M:%SZ'),
'bytes_received': int(client.bytes_received),
'bytes_sent': int(client.bytes_sent)
}
local_addresses = [
str(route.virtual_address)
for route in data.routing_table.values()
if route.real_address == client.real_address
]
if local_addresses:
client_properties['local_addresses'] = local_addresses
graph.add_node(str(client.real_address.host), **client_properties)
# add links in routing table to graph
for link in links:
if link.common_name == 'UNDEF':
continue
graph.add_edge(server, str(link.real_address.host), weight=1)
return graph |
<SYSTEM_TASK:>
Uses the _get_aggregated_node_list structure to find
<END_TASK>
<USER_TASK:>
Description:
def _get_primary_address(self, mac_address, node_list):
"""
Uses the _get_aggregated_node_list structure to find
the primary mac address associated to a secondary one,
if none is found returns itself.
""" |
for local_addresses in node_list:
if mac_address in local_addresses:
return local_addresses[0]
return mac_address |
<SYSTEM_TASK:>
Returns list of main and secondary mac addresses.
<END_TASK>
<USER_TASK:>
Description:
def _get_aggregated_node_list(self, data):
"""
Returns list of main and secondary mac addresses.
""" |
node_list = []
for node in data:
local_addresses = [node['primary']]
if 'secondary' in node:
local_addresses += node['secondary']
node_list.append(local_addresses)
return node_list |
<SYSTEM_TASK:>
Converts a alfred-vis JSON object
<END_TASK>
<USER_TASK:>
Description:
def _parse_alfred_vis(self, data):
"""
Converts a alfred-vis JSON object
to a NetworkX Graph object which is then returned.
Additionally checks for "source_vesion" to determine the batman-adv version.
""" |
# initialize graph and list of aggregated nodes
graph = self._init_graph()
if 'source_version' in data:
self.version = data['source_version']
if 'vis' not in data:
raise ParserError('Parse error, "vis" key not found')
node_list = self._get_aggregated_node_list(data['vis'])
# loop over topology section and create networkx graph
for node in data["vis"]:
for neigh in node["neighbors"]:
graph.add_node(node['primary'], **{
'local_addresses': node.get('secondary', []),
'clients': node.get('clients', [])
})
primary_neigh = self._get_primary_address(neigh['neighbor'],
node_list)
# networkx automatically ignores duplicated edges
graph.add_edge(node['primary'],
primary_neigh,
weight=float(neigh['metric']))
return graph |
<SYSTEM_TASK:>
Returns differences of two network topologies old and new
<END_TASK>
<USER_TASK:>
Description:
def diff(old, new):
"""
Returns differences of two network topologies old and new
in NetJSON NetworkGraph compatible format
""" |
protocol = new.protocol
version = new.version
revision = new.revision
metric = new.metric
# calculate differences
in_both = _find_unchanged(old.graph, new.graph)
added_nodes, added_edges = _make_diff(old.graph, new.graph, in_both)
removed_nodes, removed_edges = _make_diff(new.graph, old.graph, in_both)
changed_edges = _find_changed(old.graph, new.graph, in_both)
# create netjson objects
# or assign None if no changes
if added_nodes.nodes() or added_edges.edges():
added = _netjson_networkgraph(protocol, version, revision, metric,
added_nodes.nodes(data=True),
added_edges.edges(data=True),
dict=True)
else:
added = None
if removed_nodes.nodes() or removed_edges.edges():
removed = _netjson_networkgraph(protocol, version, revision, metric,
removed_nodes.nodes(data=True),
removed_edges.edges(data=True),
dict=True)
else:
removed = None
if changed_edges:
changed = _netjson_networkgraph(protocol, version, revision, metric,
[],
changed_edges,
dict=True)
else:
changed = None
return OrderedDict((
('added', added),
('removed', removed),
('changed', changed)
)) |
<SYSTEM_TASK:>
calculates differences between topologies 'old' and 'new'
<END_TASK>
<USER_TASK:>
Description:
def _make_diff(old, new, both):
"""
calculates differences between topologies 'old' and 'new'
returns a tuple with two network graph objects
the first graph contains the added nodes, the secnod contains the added links
""" |
# make a copy of old topology to avoid tampering with it
diff_edges = new.copy()
not_different = [tuple(edge) for edge in both]
diff_edges.remove_edges_from(not_different)
# repeat operation with nodes
diff_nodes = new.copy()
not_different = []
for new_node in new.nodes():
if new_node in old.nodes():
not_different.append(new_node)
diff_nodes.remove_nodes_from(not_different)
# return tuple with modified graphs
# one for nodes and one for links
return diff_nodes, diff_edges |
<SYSTEM_TASK:>
returns edges that are in both old and new
<END_TASK>
<USER_TASK:>
Description:
def _find_unchanged(old, new):
"""
returns edges that are in both old and new
""" |
edges = []
old_edges = [set(edge) for edge in old.edges()]
new_edges = [set(edge) for edge in new.edges()]
for old_edge in old_edges:
if old_edge in new_edges:
edges.append(set(old_edge))
return edges |
<SYSTEM_TASK:>
returns links that have changed cost
<END_TASK>
<USER_TASK:>
Description:
def _find_changed(old, new, both):
"""
returns links that have changed cost
""" |
# create two list of sets of old and new edges including cost
old_edges = []
for edge in old.edges(data=True):
# skip links that are not in both
if set((edge[0], edge[1])) not in both:
continue
# wrap cost in tuple so it will be recognizable
cost = (edge[2]['weight'],)
old_edges.append(set((edge[0], edge[1], cost)))
new_edges = []
for edge in new.edges(data=True):
# skip links that are not in both
if set((edge[0], edge[1])) not in both:
continue
# wrap cost in tuple so it will be recognizable
cost = (edge[2]['weight'],)
new_edges.append(set((edge[0], edge[1], cost)))
# find out which edge changed
changed = []
for new_edge in new_edges:
if new_edge not in old_edges:
# new_edge is a set, convert it to list
new_edge = list(new_edge)
for item in new_edge:
if isinstance(item, tuple):
# unwrap cost from tuple and put it in a dict
cost = {'weight': item[0]}
new_edge.remove(item)
changed.append((new_edge[0], new_edge[1], cost))
return changed |
<SYSTEM_TASK:>
Converts a BMX6 b6m JSON to a NetworkX Graph object
<END_TASK>
<USER_TASK:>
Description:
def parse(self, data):
"""
Converts a BMX6 b6m JSON to a NetworkX Graph object
which is then returned.
""" |
# initialize graph and list of aggregated nodes
graph = self._init_graph()
if len(data) != 0:
if "links" not in data[0]:
raise ParserError('Parse error, "links" key not found')
# loop over topology section and create networkx graph
# this data structure does not contain cost information, so we set it as 1
for node in data:
for link in node['links']:
cost = (link['txRate'] + link['rxRate']) / 2.0
graph.add_edge(node['name'],
link['name'],
weight=cost,
tx_rate=link['txRate'],
rx_rate=link['rxRate'])
return graph |
<SYSTEM_TASK:>
Converts a CNML structure to a NetworkX Graph object
<END_TASK>
<USER_TASK:>
Description:
def parse(self, data):
"""
Converts a CNML structure to a NetworkX Graph object
which is then returned.
""" |
graph = self._init_graph()
# loop over links and create networkx graph
# Add only working nodes with working links
for link in data.get_inner_links():
if link.status != libcnml.libcnml.Status.WORKING:
continue
interface_a, interface_b = link.getLinkedInterfaces()
source = interface_a.ipv4
dest = interface_b.ipv4
# add link to Graph
graph.add_edge(source, dest, weight=1)
return graph |
<SYSTEM_TASK:>
Converts a dict representing an OLSR 0.6.x topology
<END_TASK>
<USER_TASK:>
Description:
def parse(self, data):
"""
Converts a dict representing an OLSR 0.6.x topology
to a NetworkX Graph object, which is then returned.
Additionally checks for "config" data in order to determine version and revision.
""" |
graph = self._init_graph()
if 'topology' not in data:
raise ParserError('Parse error, "topology" key not found')
elif 'mid' not in data:
raise ParserError('Parse error, "mid" key not found')
# determine version and revision
if 'config' in data:
version_info = data['config']['olsrdVersion'].replace(' ', '').split('-')
self.version = version_info[1]
# try to get only the git hash
if 'hash_' in version_info[-1]:
version_info[-1] = version_info[-1].split('hash_')[-1]
self.revision = version_info[-1]
# process alias list
alias_dict = {}
for node in data['mid']:
local_addresses = [alias['ipAddress'] for alias in node['aliases']]
alias_dict[node['ipAddress']] = local_addresses
# loop over topology section and create networkx graph
for link in data['topology']:
try:
source = link['lastHopIP']
target = link['destinationIP']
cost = link['tcEdgeCost']
properties = {
'link_quality': link['linkQuality'],
'neighbor_link_quality': link['neighborLinkQuality']
}
except KeyError as e:
raise ParserError('Parse error, "%s" key not found' % e)
# add nodes with their local_addresses
for node in [source, target]:
if node not in alias_dict:
continue
graph.add_node(node, local_addresses=alias_dict[node])
# skip links with infinite cost
if cost == float('inf'):
continue
# original olsrd cost (jsoninfo multiplies by 1024)
cost = float(cost) / 1024.0
# add link to Graph
graph.add_edge(source, target, weight=cost, **properties)
return graph |
<SYSTEM_TASK:>
Check if the project is hosted on launchpad.
<END_TASK>
<USER_TASK:>
Description:
def check_for_launchpad(old_vendor, name, urls):
"""Check if the project is hosted on launchpad.
:param name: str, name of the project
:param urls: set, urls to check.
:return: the name of the project on launchpad, or an empty string.
""" |
if old_vendor != "pypi":
# XXX This might work for other starting vendors
# XXX but I didn't check. For now only allow
# XXX pypi -> launchpad.
return ''
for url in urls:
try:
return re.match(r"https?://launchpad.net/([\w.\-]+)",
url).groups()[0]
except AttributeError:
continue
return '' |
<SYSTEM_TASK:>
Check if the project should switch vendors. E.g
<END_TASK>
<USER_TASK:>
Description:
def check_switch_vendor(old_vendor, name, urls, _depth=0):
"""Check if the project should switch vendors. E.g
project pushed on pypi, but changelog on launchpad.
:param name: str, name of the project
:param urls: set, urls to check.
:return: tuple, (str(new vendor name), str(new project name))
""" |
if _depth > 3:
# Protect against recursive things vendors here.
return ""
new_name = check_for_launchpad(old_vendor, name, urls)
if new_name:
return "launchpad", new_name
return "", "" |
<SYSTEM_TASK:>
Mixes two colors together.
<END_TASK>
<USER_TASK:>
Description:
def _mix(color1, color2, weight=0.5, **kwargs):
""" Mixes two colors together.
""" |
weight = float(weight)
c1 = color1.value
c2 = color2.value
p = 0.0 if weight < 0 else 1.0 if weight > 1 else weight
w = p * 2 - 1
a = c1[3] - c2[3]
w1 = ((w if (w * a == -1) else (w + a) / (1 + w * a)) + 1) / 2.0
w2 = 1 - w1
q = [w1, w1, w1, p]
r = [w2, w2, w2, 1 - p]
return ColorValue([c1[i] * q[i] + c2[i] * r[i] for i in range(4)]) |
<SYSTEM_TASK:>
HSL with alpha channel color value.
<END_TASK>
<USER_TASK:>
Description:
def _hsla(h, s, l, a, **kwargs):
""" HSL with alpha channel color value.
""" |
res = colorsys.hls_to_rgb(float(h), float(l), float(s))
return ColorValue([x * 255.0 for x in res] + [float(a)]) |
<SYSTEM_TASK:>
Get lightness value of HSL color.
<END_TASK>
<USER_TASK:>
Description:
def _lightness(color, **kwargs):
""" Get lightness value of HSL color.
""" |
l = colorsys.rgb_to_hls(*[x / 255.0 for x in color.value[:3]])[1]
return NumberValue((l * 100, '%')) |
<SYSTEM_TASK:>
Parse nested rulesets
<END_TASK>
<USER_TASK:>
Description:
def parse(self, target):
""" Parse nested rulesets
and save it in cache.
""" |
if isinstance(target, ContentNode):
if target.name:
self.parent = target
self.name.parse(self)
self.name += target.name
target.ruleset.append(self)
self.root.cache['rset'][str(self.name).split()[0]].add(self)
super(Ruleset, self).parse(target) |
<SYSTEM_TASK:>
Parse nested declaration.
<END_TASK>
<USER_TASK:>
Description:
def parse(self, target):
""" Parse nested declaration.
""" |
if not isinstance(target, Node):
parent = ContentNode(None, None, [])
parent.parse(target)
target = parent
super(Declaration, self).parse(target)
self.name = str(self.data[0])
while isinstance(target, Declaration):
self.name = '-'.join((str(target.data[0]), self.name))
target = target.parent
self.expr = ' '.join(str
(n)
for n in self.data
[2:] if not isinstance(n, Declaration))
if self.expr:
target.declareset.append(self) |
<SYSTEM_TASK:>
Compile scss from file.
<END_TASK>
<USER_TASK:>
Description:
def load(self, f, precache=None):
""" Compile scss from file.
File is string path of file object.
""" |
precache = precache or self.get_opt('cache') or False
nodes = None
if isinstance(f, file_):
path = os.path.abspath(f.name)
else:
path = os.path.abspath(f)
f = open(f)
cache_path = os.path.splitext(path)[0] + '.ccss'
if precache and os.path.exists(cache_path):
ptime = os.path.getmtime(cache_path)
ttime = os.path.getmtime(path)
if ptime > ttime:
dump = open(cache_path, 'rb').read()
nodes = pickle.loads(dump)
if not nodes:
src = f.read()
nodes = self.scan(src.strip())
if precache:
f = open(cache_path, 'wb')
pickle.dump(nodes, f, protocol=1)
self.parse(nodes)
return ''.join(map(str, nodes)) |
<SYSTEM_TASK:>
Loads config file
<END_TASK>
<USER_TASK:>
Description:
def load_config(filename, filepath=''):
"""
Loads config file
Parameters
----------
filename: str
Filename of config file (incl. file extension
filepath: str
Absolute path to directory of desired config file
""" |
FILE = path.join(filepath, filename)
try:
cfg.read(FILE)
global _loaded
_loaded = True
except:
print("configfile not found.") |
<SYSTEM_TASK:>
Initialize with app configuration
<END_TASK>
<USER_TASK:>
Description:
def init_app(self, app, add_context_processor=True):
"""
Initialize with app configuration
""" |
# Check if login manager has been initialized
if not hasattr(app, 'login_manager'):
self.login_manager.init_app(
app,
add_context_processor=add_context_processor)
# Clear flashed messages since we redirect to auth immediately
self.login_manager.login_message = None
self.login_manager.needs_refresh_message = None
# Set default unauthorized callback
self.login_manager.unauthorized_handler(self.unauthorized_callback) |
<SYSTEM_TASK:>
Return login url with params encoded in state
<END_TASK>
<USER_TASK:>
Description:
def login_url(self, params=None, **kwargs):
"""
Return login url with params encoded in state
Available Google auth server params:
response_type: code, token
prompt: none, select_account, consent
approval_prompt: force, auto
access_type: online, offline
scopes: string (separated with commas) or list
redirect_uri: string
login_hint: string
""" |
kwargs.setdefault('response_type', 'code')
kwargs.setdefault('access_type', 'online')
if 'prompt' not in kwargs:
kwargs.setdefault('approval_prompt', 'auto')
scopes = kwargs.pop('scopes', self.scopes.split(','))
if USERINFO_PROFILE_SCOPE not in scopes:
scopes.append(USERINFO_PROFILE_SCOPE)
redirect_uri = kwargs.pop('redirect_uri', self.redirect_uri)
state = self.sign_params(params or {})
return GOOGLE_OAUTH2_AUTH_URL + '?' + urlencode(
dict(client_id=self.client_id,
scope=' '.join(scopes),
redirect_uri=redirect_uri,
state=state,
**kwargs)) |
<SYSTEM_TASK:>
Redirect to login url with next param set as request.url
<END_TASK>
<USER_TASK:>
Description:
def unauthorized_callback(self):
"""
Redirect to login url with next param set as request.url
""" |
return redirect(self.login_url(params=dict(next=request.url))) |
<SYSTEM_TASK:>
Use a refresh token to obtain a new access token
<END_TASK>
<USER_TASK:>
Description:
def get_access_token(self, refresh_token):
"""
Use a refresh token to obtain a new access token
""" |
token = requests.post(GOOGLE_OAUTH2_TOKEN_URL, data=dict(
refresh_token=refresh_token,
grant_type='refresh_token',
client_id=self.client_id,
client_secret=self.client_secret,
)).json()
if not token or token.get('error'):
return
return token |
<SYSTEM_TASK:>
Auto complete scss constructions in interactive mode.
<END_TASK>
<USER_TASK:>
Description:
def complete(text, state):
""" Auto complete scss constructions in interactive mode. """ |
for cmd in COMMANDS:
if cmd.startswith(text):
if not state:
return cmd
else:
state -= 1 |
<SYSTEM_TASK:>
Returns translation from cache.
<END_TASK>
<USER_TASK:>
Description:
def get_cache(
self,
instance,
translation=None,
language=None,
field_name=None,
field_value=None,
):
"""
Returns translation from cache.
""" |
is_new = bool(instance.pk is None)
try:
cached_obj = instance._linguist_translations[field_name][language]
if not cached_obj.field_name:
cached_obj.field_name = field_name
if not cached_obj.language:
cached_obj.language = language
if not cached_obj.identifier:
cached_obj.identifier = self.instance.linguist_identifier
except KeyError:
cached_obj = None
if not is_new:
if translation is None:
try:
translation = self.decider.objects.get(
identifier=self.instance.linguist_identifier,
object_id=self.instance.pk,
language=language,
field_name=field_name,
)
except self.decider.DoesNotExist:
pass
if cached_obj is None:
if translation is not None:
cached_obj = CachedTranslation.from_object(translation)
else:
cached_obj = CachedTranslation(
instance=instance,
language=language,
field_name=field_name,
field_value=field_value,
)
instance._linguist_translations[cached_obj.field_name][
cached_obj.language
] = cached_obj
return cached_obj |
<SYSTEM_TASK:>
Add a new translation into the cache.
<END_TASK>
<USER_TASK:>
Description:
def set_cache(
self,
instance=None,
translation=None,
language=None,
field_name=None,
field_value=None,
):
"""
Add a new translation into the cache.
""" |
if instance is not None and translation is not None:
cached_obj = CachedTranslation.from_object(translation)
instance._linguist_translations[translation.field_name][
translation.language
] = cached_obj
return cached_obj
if instance is None:
instance = self.instance
cached_obj = self.get_cache(
instance,
translation=translation,
field_value=field_value,
language=language,
field_name=field_name,
)
if field_value is None and cached_obj.field_value:
cached_obj.deleted = True
if field_value != cached_obj.field_value:
cached_obj.has_changed = True
cached_obj.field_value = field_value
return cached_obj |
<SYSTEM_TASK:>
Overrides default behavior to handle linguist fields.
<END_TASK>
<USER_TASK:>
Description:
def _filter_or_exclude(self, negate, *args, **kwargs):
"""
Overrides default behavior to handle linguist fields.
""" |
from .models import Translation
new_args = self.get_cleaned_args(args)
new_kwargs = self.get_cleaned_kwargs(kwargs)
translation_args = self.get_translation_args(args)
translation_kwargs = self.get_translation_kwargs(kwargs)
has_linguist_args = self.has_linguist_args(args)
has_linguist_kwargs = self.has_linguist_kwargs(kwargs)
if translation_args or translation_kwargs:
ids = list(
set(
Translation.objects.filter(
*translation_args, **translation_kwargs
).values_list("object_id", flat=True)
)
)
if ids:
new_kwargs["id__in"] = ids
has_kwargs = has_linguist_kwargs and not (new_kwargs or new_args)
has_args = has_linguist_args and not (new_args or new_kwargs)
# No translations but we looked for translations?
# Returns empty queryset.
if has_kwargs or has_args:
return self._clone().none()
return super(QuerySetMixin, self)._filter_or_exclude(
negate, *new_args, **new_kwargs
) |
<SYSTEM_TASK:>
Parses the given kwargs and returns True if they contain
<END_TASK>
<USER_TASK:>
Description:
def has_linguist_kwargs(self, kwargs):
"""
Parses the given kwargs and returns True if they contain
linguist lookups.
""" |
for k in kwargs:
if self.is_linguist_lookup(k):
return True
return False |
<SYSTEM_TASK:>
Parses the given args and returns True if they contain
<END_TASK>
<USER_TASK:>
Description:
def has_linguist_args(self, args):
"""
Parses the given args and returns True if they contain
linguist lookups.
""" |
linguist_args = []
for arg in args:
condition = self._get_linguist_condition(arg)
if condition:
linguist_args.append(condition)
return bool(linguist_args) |
<SYSTEM_TASK:>
Returns linguist args from model args.
<END_TASK>
<USER_TASK:>
Description:
def get_translation_args(self, args):
"""
Returns linguist args from model args.
""" |
translation_args = []
for arg in args:
condition = self._get_linguist_condition(arg, transform=True)
if condition:
translation_args.append(condition)
return translation_args |
<SYSTEM_TASK:>
Returns true if the given lookup is a valid linguist lookup.
<END_TASK>
<USER_TASK:>
Description:
def is_linguist_lookup(self, lookup):
"""
Returns true if the given lookup is a valid linguist lookup.
""" |
field = utils.get_field_name_from_lookup(lookup)
# To keep default behavior with "FieldError: Cannot resolve keyword".
if (
field not in self.concrete_field_names
and field in self.linguist_field_names
):
return True
return False |
<SYSTEM_TASK:>
Parses Q tree and returns linguist lookups or model lookups
<END_TASK>
<USER_TASK:>
Description:
def _get_linguist_condition(self, condition, reverse=False, transform=False):
"""
Parses Q tree and returns linguist lookups or model lookups
if reverse is True.
""" |
# We deal with a node
if isinstance(condition, Q):
children = []
for child in condition.children:
parsed = self._get_linguist_condition(
condition=child, reverse=reverse, transform=transform
)
if parsed is not None:
if (isinstance(parsed, Q) and parsed.children) or isinstance(
parsed, tuple
):
children.append(parsed)
new_condition = copy.deepcopy(condition)
new_condition.children = children
return new_condition
# We are dealing with a lookup ('field', 'value').
lookup, value = condition
is_linguist = self.is_linguist_lookup(lookup)
if transform and is_linguist:
return Q(
**utils.get_translation_lookup(
self.model._linguist.identifier, lookup, value
)
)
if (reverse and not is_linguist) or (not reverse and is_linguist):
return condition |
<SYSTEM_TASK:>
Returns positional arguments for related model query.
<END_TASK>
<USER_TASK:>
Description:
def get_cleaned_args(self, args):
"""
Returns positional arguments for related model query.
""" |
if not args:
return args
cleaned_args = []
for arg in args:
condition = self._get_linguist_condition(arg, True)
if condition:
cleaned_args.append(condition)
return cleaned_args |
<SYSTEM_TASK:>
Returns concrete field lookups.
<END_TASK>
<USER_TASK:>
Description:
def get_cleaned_kwargs(self, kwargs):
"""
Returns concrete field lookups.
""" |
cleaned_kwargs = kwargs.copy()
if kwargs is not None:
for k in kwargs:
if self.is_linguist_lookup(k):
del cleaned_kwargs[k]
return cleaned_kwargs |
<SYSTEM_TASK:>
Prefetches translations.
<END_TASK>
<USER_TASK:>
Description:
def with_translations(self, **kwargs):
"""
Prefetches translations.
Takes three optional keyword arguments:
* ``field_names``: ``field_name`` values for SELECT IN
* ``languages``: ``language`` values for SELECT IN
* ``chunks_length``: fetches IDs by chunk
""" |
force = kwargs.pop("force", False)
if self._prefetch_translations_done and force is False:
return self
self._prefetched_translations_cache = utils.get_grouped_translations(
self, **kwargs
)
self._prefetch_translations_done = True
return self._clone() |
<SYSTEM_TASK:>
Deletes related translations.
<END_TASK>
<USER_TASK:>
Description:
def delete_translations(self, language=None):
"""
Deletes related translations.
""" |
from .models import Translation
return Translation.objects.delete_translations(obj=self, language=language) |
<SYSTEM_TASK:>
Context manager to override the instance language.
<END_TASK>
<USER_TASK:>
Description:
def override_language(self, language):
"""
Context manager to override the instance language.
""" |
previous_language = self._linguist.language
self._linguist.language = language
yield
self._linguist.language = previous_language |
<SYSTEM_TASK:>
When setting to the name of the field itself, the value
<END_TASK>
<USER_TASK:>
Description:
def default_value_setter(field):
"""
When setting to the name of the field itself, the value
in the current language will be set.
""" |
def default_value_func_setter(self, value):
localized_field = utils.build_localized_field_name(
field, self._linguist.active_language
)
setattr(self, localized_field, value)
return default_value_func_setter |
<SYSTEM_TASK:>
Takes a field base class and wrap it with ``TranslationField`` class.
<END_TASK>
<USER_TASK:>
Description:
def field_factory(base_class):
"""
Takes a field base class and wrap it with ``TranslationField`` class.
""" |
from .fields import TranslationField
class TranslationFieldField(TranslationField, base_class):
pass
TranslationFieldField.__name__ = "Translation%s" % base_class.__name__
return TranslationFieldField |
<SYSTEM_TASK:>
Takes the original field, a given language, a decider model and return a
<END_TASK>
<USER_TASK:>
Description:
def create_translation_field(translated_field, language):
"""
Takes the original field, a given language, a decider model and return a
Field class for model.
""" |
cls_name = translated_field.__class__.__name__
if not isinstance(translated_field, tuple(SUPPORTED_FIELDS.keys())):
raise ImproperlyConfigured("%s is not supported by Linguist." % cls_name)
translation_class = field_factory(translated_field.__class__)
kwargs = get_translation_class_kwargs(translated_field.__class__)
return translation_class(
translated_field=translated_field, language=language, **kwargs
) |
<SYSTEM_TASK:>
Deletes related instance's translations when instance is deleted.
<END_TASK>
<USER_TASK:>
Description:
def delete_translations(sender, instance, **kwargs):
"""
Deletes related instance's translations when instance is deleted.
""" |
if issubclass(sender, (ModelMixin,)):
instance._linguist.decider.objects.filter(
identifier=instance.linguist_identifier, object_id=instance.pk
).delete() |
<SYSTEM_TASK:>
Support asciitree 0.2 API.
<END_TASK>
<USER_TASK:>
Description:
def draw_tree(node,
child_iter=lambda n: n.children,
text_str=str):
"""Support asciitree 0.2 API.
This function solely exist to not break old code (using asciitree 0.2).
Its use is deprecated.""" |
return LeftAligned(traverse=Traversal(get_text=text_str,
get_children=child_iter),
draw=LegacyStyle())(node) |
<SYSTEM_TASK:>
Returns an active language code that is guaranteed to be in
<END_TASK>
<USER_TASK:>
Description:
def get_language():
"""
Returns an active language code that is guaranteed to be in
settings.SUPPORTED_LANGUAGES.
""" |
lang = _get_language()
if not lang:
return get_fallback_language()
langs = [l[0] for l in settings.SUPPORTED_LANGUAGES]
if lang not in langs and "-" in lang:
lang = lang.split("-")[0]
if lang in langs:
return lang
return settings.DEFAULT_LANGUAGE |
<SYSTEM_TASK:>
Activates the given language for the given instances.
<END_TASK>
<USER_TASK:>
Description:
def activate_language(instances, language):
"""
Activates the given language for the given instances.
""" |
language = (
language if language in get_supported_languages() else get_fallback_language()
)
for instance in instances:
instance.activate_language(language) |
<SYSTEM_TASK:>
Loads a class given a class_path. The setting value may be a string or a
<END_TASK>
<USER_TASK:>
Description:
def load_class(class_path, setting_name=None):
"""
Loads a class given a class_path. The setting value may be a string or a
tuple. The setting_name parameter is only there for pretty error output, and
therefore is optional.
""" |
if not isinstance(class_path, six.string_types):
try:
class_path, app_label = class_path
except:
if setting_name:
raise exceptions.ImproperlyConfigured(
CLASS_PATH_ERROR % (setting_name, setting_name)
)
else:
raise exceptions.ImproperlyConfigured(
CLASS_PATH_ERROR % ("this setting", "It")
)
try:
class_module, class_name = class_path.rsplit(".", 1)
except ValueError:
if setting_name:
txt = "%s isn't a valid module. Check your %s setting" % (
class_path,
setting_name,
)
else:
txt = "%s isn't a valid module." % class_path
raise exceptions.ImproperlyConfigured(txt)
try:
mod = import_module(class_module)
except ImportError as e:
if setting_name:
txt = 'Error importing backend %s: "%s". Check your %s setting' % (
class_module,
e,
setting_name,
)
else:
txt = 'Error importing backend %s: "%s".' % (class_module, e)
raise exceptions.ImproperlyConfigured(txt)
try:
clazz = getattr(mod, class_name)
except AttributeError:
if setting_name:
txt = (
'Backend module "%s" does not define a "%s" class. Check'
" your %s setting" % (class_module, class_name, setting_name)
)
else:
txt = 'Backend module "%s" does not define a "%s" class.' % (
class_module,
class_name,
)
raise exceptions.ImproperlyConfigured(txt)
return clazz |
<SYSTEM_TASK:>
Mapper that takes a language field, its value and returns the
<END_TASK>
<USER_TASK:>
Description:
def get_translation_lookup(identifier, field, value):
"""
Mapper that takes a language field, its value and returns the
related lookup for Translation model.
""" |
# Split by transformers
parts = field.split("__")
# Store transformers
transformers = parts[1:] if len(parts) > 1 else None
# defaults to "title" and default language
field_name = parts[0]
language = get_fallback_language()
name_parts = parts[0].split("_")
if len(name_parts) > 1:
supported_languages = get_supported_languages()
last_part = name_parts[-1]
if last_part in supported_languages:
# title_with_underscore_fr?
field_name = "_".join(name_parts[:-1])
language = last_part
else:
# title_with_underscore?
# Let's use default language
field_name = "_".join(name_parts)
value_lookup = (
"field_value"
if transformers is None
else "field_value__%s" % "__".join(transformers)
)
lookup = {"field_name": field_name, "identifier": identifier, "language": language}
lookup[value_lookup] = value
return lookup |
<SYSTEM_TASK:>
Takes instances and returns grouped translations ready to
<END_TASK>
<USER_TASK:>
Description:
def get_grouped_translations(instances, **kwargs):
"""
Takes instances and returns grouped translations ready to
be set in cache.
""" |
grouped_translations = collections.defaultdict(list)
if not instances:
return grouped_translations
if not isinstance(instances, collections.Iterable):
instances = [instances]
if isinstance(instances, QuerySet):
model = instances.model
else:
model = instances[0]._meta.model
instances_ids = []
for instance in instances:
instances_ids.append(instance.pk)
if instance._meta.model != model:
raise Exception(
"You cannot use different model instances, only one authorized."
)
from .models import Translation
from .mixins import ModelMixin
decider = model._meta.linguist.get("decider", Translation)
identifier = model._meta.linguist.get("identifier", None)
chunks_length = kwargs.get("chunks_length", None)
populate_missing = kwargs.get("populate_missing", True)
if identifier is None:
raise Exception('You must define Linguist "identifier" meta option')
lookup = dict(identifier=identifier)
for kwarg in ("field_names", "languages"):
value = kwargs.get(kwarg, None)
if value is not None:
if not isinstance(value, (list, tuple)):
value = [value]
lookup["%s__in" % kwarg[:-1]] = value
if chunks_length is not None:
translations_qs = []
for ids in utils.chunks(instances_ids, chunks_length):
ids_lookup = copy.copy(lookup)
ids_lookup["object_id__in"] = ids
translations_qs.append(decider.objects.filter(**ids_lookup))
translations = itertools.chain.from_iterable(translations_qs)
else:
lookup["object_id__in"] = instances_ids
translations = decider.objects.filter(**lookup)
for translation in translations:
grouped_translations[translation.object_id].append(translation)
return grouped_translations |
<SYSTEM_TASK:>
Returns available languages for current object.
<END_TASK>
<USER_TASK:>
Description:
def get_available_languages(self, obj):
"""
Returns available languages for current object.
""" |
return obj.available_languages if obj is not None else self.model.objects.none() |
<SYSTEM_TASK:>
Adds languages columns.
<END_TASK>
<USER_TASK:>
Description:
def languages_column(self, obj):
"""
Adds languages columns.
""" |
languages = self.get_available_languages(obj)
return '<span class="available-languages">{0}</span>'.format(
" ".join(languages)
) |
<SYSTEM_TASK:>
Prefetches translations for the given instances.
<END_TASK>
<USER_TASK:>
Description:
def prefetch_translations(instances, **kwargs):
"""
Prefetches translations for the given instances.
Can be useful for a list of instances.
""" |
from .mixins import ModelMixin
if not isinstance(instances, collections.Iterable):
instances = [instances]
populate_missing = kwargs.get("populate_missing", True)
grouped_translations = utils.get_grouped_translations(instances, **kwargs)
# In the case of no translations objects
if not grouped_translations and populate_missing:
for instance in instances:
instance.populate_missing_translations()
for instance in instances:
if (
issubclass(instance.__class__, ModelMixin)
and instance.pk in grouped_translations
):
for translation in grouped_translations[instance.pk]:
instance._linguist.set_cache(instance=instance, translation=translation)
if populate_missing:
instance.populate_missing_translations() |
<SYSTEM_TASK:>
Shorcut method to retrieve translations for a given object.
<END_TASK>
<USER_TASK:>
Description:
def get_translations(self, obj, language=None):
"""
Shorcut method to retrieve translations for a given object.
""" |
lookup = {"identifier": obj.linguist_identifier, "object_id": obj.pk}
if language is not None:
lookup["language"] = language
return self.get_queryset().filter(**lookup) |
<SYSTEM_TASK:>
Encode an integer into a symbol string.
<END_TASK>
<USER_TASK:>
Description:
def encode(number, checksum=False, split=0):
"""Encode an integer into a symbol string.
A ValueError is raised on invalid input.
If checksum is set to True, a check symbol will be
calculated and appended to the string.
If split is specified, the string will be divided into
clusters of that size separated by hyphens.
The encoded string is returned.
""" |
number = int(number)
if number < 0:
raise ValueError("number '%d' is not a positive integer" % number)
split = int(split)
if split < 0:
raise ValueError("split '%d' is not a positive integer" % split)
check_symbol = ''
if checksum:
check_symbol = encode_symbols[number % check_base]
if number == 0:
return '0' + check_symbol
symbol_string = ''
while number > 0:
remainder = number % base
number //= base
symbol_string = encode_symbols[remainder] + symbol_string
symbol_string = symbol_string + check_symbol
if split:
chunks = []
for pos in range(0, len(symbol_string), split):
chunks.append(symbol_string[pos:pos + split])
symbol_string = '-'.join(chunks)
return symbol_string |
<SYSTEM_TASK:>
Decode an encoded symbol string.
<END_TASK>
<USER_TASK:>
Description:
def decode(symbol_string, checksum=False, strict=False):
"""Decode an encoded symbol string.
If checksum is set to True, the string is assumed to have a
trailing check symbol which will be validated. If the
checksum validation fails, a ValueError is raised.
If strict is set to True, a ValueError is raised if the
normalization step requires changes to the string.
The decoded string is returned.
""" |
symbol_string = normalize(symbol_string, strict=strict)
if checksum:
symbol_string, check_symbol = symbol_string[:-1], symbol_string[-1]
number = 0
for symbol in symbol_string:
number = number * base + decode_symbols[symbol]
if checksum:
check_value = decode_symbols[check_symbol]
modulo = number % check_base
if check_value != modulo:
raise ValueError("invalid check symbol '%s' for string '%s'" %
(check_symbol, symbol_string))
return number |
<SYSTEM_TASK:>
Normalize an encoded symbol string.
<END_TASK>
<USER_TASK:>
Description:
def normalize(symbol_string, strict=False):
"""Normalize an encoded symbol string.
Normalization provides error correction and prepares the
string for decoding. These transformations are applied:
1. Hyphens are removed
2. 'I', 'i', 'L' or 'l' are converted to '1'
3. 'O' or 'o' are converted to '0'
4. All characters are converted to uppercase
A TypeError is raised if an invalid string type is provided.
A ValueError is raised if the normalized string contains
invalid characters.
If the strict parameter is set to True, a ValueError is raised
if any of the above transformations are applied.
The normalized string is returned.
""" |
if isinstance(symbol_string, string_types):
if not PY3:
try:
symbol_string = symbol_string.encode('ascii')
except UnicodeEncodeError:
raise ValueError("string should only contain ASCII characters")
else:
raise TypeError("string is of invalid type %s" %
symbol_string.__class__.__name__)
norm_string = symbol_string.replace('-', '').translate(normalize_symbols).upper()
if not valid_symbols.match(norm_string):
raise ValueError("string '%s' contains invalid characters" % norm_string)
if strict and norm_string != symbol_string:
raise ValueError("string '%s' requires normalization" % symbol_string)
return norm_string |
<SYSTEM_TASK:>
Return required packages
<END_TASK>
<USER_TASK:>
Description:
def setup_requires():
"""
Return required packages
Plus any version tests and warnings
""" |
from pkg_resources import parse_version
required = ['cython>=0.24.0']
numpy_requirement = 'numpy>=1.7.1'
try:
import numpy
except Exception:
required.append(numpy_requirement)
else:
if parse_version(numpy.__version__) < parse_version('1.7.1'):
required.append(numpy_requirement)
return required |
<SYSTEM_TASK:>
Populate the block context with BlockNodes from parent templates.
<END_TASK>
<USER_TASK:>
Description:
def _build_block_context(template, context):
"""Populate the block context with BlockNodes from parent templates.""" |
# Ensure there's a BlockContext before rendering. This allows blocks in
# ExtendsNodes to be found by sub-templates (allowing {{ block.super }} and
# overriding sub-blocks to work).
if BLOCK_CONTEXT_KEY not in context.render_context:
context.render_context[BLOCK_CONTEXT_KEY] = BlockContext()
block_context = context.render_context[BLOCK_CONTEXT_KEY]
for node in template.nodelist:
if isinstance(node, ExtendsNode):
compiled_parent = node.get_parent(context)
# Add the parent node's blocks to the context. (This ends up being
# similar logic to ExtendsNode.render(), where we're adding the
# parent's blocks to the context so a child can find them.)
block_context.add_blocks(
{n.name: n for n in compiled_parent.nodelist.get_nodes_by_type(BlockNode)})
_build_block_context(compiled_parent, context)
return compiled_parent
# The ExtendsNode has to be the first non-text node.
if not isinstance(node, TextNode):
break |
<SYSTEM_TASK:>
Recursively iterate over a node to find the wanted block.
<END_TASK>
<USER_TASK:>
Description:
def _render_template_block_nodelist(nodelist, block_name, context):
"""Recursively iterate over a node to find the wanted block.""" |
# Attempt to find the wanted block in the current template.
for node in nodelist:
# If the wanted block was found, return it.
if isinstance(node, BlockNode):
# No matter what, add this block to the rendering context.
context.render_context[BLOCK_CONTEXT_KEY].push(node.name, node)
# If the name matches, you're all set and we found the block!
if node.name == block_name:
return node.render(context)
# If a node has children, recurse into them. Based on
# django.template.base.Node.get_nodes_by_type.
for attr in node.child_nodelists:
try:
new_nodelist = getattr(node, attr)
except AttributeError:
continue
# Try to find the block recursively.
try:
return _render_template_block_nodelist(new_nodelist, block_name, context)
except BlockNotFound:
continue
# The wanted block_name was not found.
raise BlockNotFound("block with name '%s' does not exist" % block_name) |
<SYSTEM_TASK:>
Loads the given template_name and renders the given block with the given
<END_TASK>
<USER_TASK:>
Description:
def render_block_to_string(template_name, block_name, context=None):
"""
Loads the given template_name and renders the given block with the given
dictionary as context. Returns a string.
template_name
The name of the template to load and render. If it's a list of
template names, Django uses select_template() instead of
get_template() to find the template.
""" |
# Like render_to_string, template_name can be a string or a list/tuple.
if isinstance(template_name, (tuple, list)):
t = loader.select_template(template_name)
else:
t = loader.get_template(template_name)
# Create the context instance.
context = context or {}
# The Django backend.
if isinstance(t, DjangoTemplate):
return django_render_block(t, block_name, context)
elif isinstance(t, Jinja2Template):
from render_block.jinja2 import jinja2_render_block
return jinja2_render_block(t, block_name, context)
else:
raise UnsupportedEngine(
'Can only render blocks from the Django template backend.') |
<SYSTEM_TASK:>
Generates the host path for a container volume. If the given path is a dictionary, uses the entry of the instance
<END_TASK>
<USER_TASK:>
Description:
def get_host_path(root, path, instance=None):
"""
Generates the host path for a container volume. If the given path is a dictionary, uses the entry of the instance
name.
:param root: Root path to prepend, if ``path`` does not already describe an absolute path.
:type root: unicode | str | AbstractLazyObject
:param path: Path string or dictionary of per-instance paths.
:type path: unicode | str | dict | AbstractLazyObject
:param instance: Optional instance name.
:type instance: unicode | str
:return: Path on the host that is mapped to the container volume.
:rtype: unicode | str
""" |
r_val = resolve_value(path)
if isinstance(r_val, dict):
r_instance = instance or 'default'
r_path = resolve_value(r_val.get(r_instance))
if not r_path:
raise ValueError("No path defined for instance {0}.".format(r_instance))
else:
r_path = r_val
r_root = resolve_value(root)
if r_path and r_root and (r_path[0] != posixpath.sep):
return posixpath.join(r_root, r_path)
return r_path |
<SYSTEM_TASK:>
Runs the given lists of attached actions and instance actions on the client.
<END_TASK>
<USER_TASK:>
Description:
def run_actions(self, actions):
"""
Runs the given lists of attached actions and instance actions on the client.
:param actions: Actions to apply.
:type actions: list[dockermap.map.action.ItemAction]
:return: Where the result is not ``None``, returns the output from the client. Note that this is a generator
and needs to be consumed in order for all actions to be performed.
:rtype: collections.Iterable[dict]
""" |
policy = self._policy
for action in actions:
config_id = action.config_id
config_type = config_id.config_type
client_config = policy.clients[action.client_name]
client = client_config.get_client()
c_map = policy.container_maps[config_id.map_name]
if config_type == ItemType.CONTAINER:
config = c_map.get_existing(config_id.config_name)
item_name = policy.cname(config_id.map_name, config_id.config_name, config_id.instance_name)
elif config_type == ItemType.VOLUME:
a_parent_name = config_id.config_name if c_map.use_attached_parent_name else None
item_name = policy.aname(config_id.map_name, config_id.instance_name, parent_name=a_parent_name)
if client_config.features['volumes']:
config = c_map.get_existing_volume(config_id.config_name)
else:
config = c_map.get_existing(config_id.config_name)
elif config_type == ItemType.NETWORK:
config = c_map.get_existing_network(config_id.config_name)
item_name = policy.nname(config_id.map_name, config_id.config_name)
elif config_type == ItemType.IMAGE:
config = None
item_name = format_image_tag(config_id.config_name, config_id.instance_name)
else:
raise ValueError("Invalid configuration type.", config_id.config_type)
for action_type in action.action_types:
try:
a_method = self.action_methods[(config_type, action_type)]
except KeyError:
raise ActionTypeException(config_id, action_type)
action_config = ActionConfig(action.client_name, action.config_id, client_config, client,
c_map, config)
try:
res = a_method(action_config, item_name, **action.extra_data)
except Exception:
exc_info = sys.exc_info()
raise ActionException(exc_info, action.client_name, config_id, action_type)
if res is not None:
yield ActionOutput(action.client_name, config_id, action_type, res) |
<SYSTEM_TASK:>
Constructs a configuration object from an existing client instance. If the client has already been created with
<END_TASK>
<USER_TASK:>
Description:
def from_client(cls, client):
"""
Constructs a configuration object from an existing client instance. If the client has already been created with
a configuration object, returns that instance.
:param client: Client object to derive the configuration from.
:type client: docker.client.Client
:return: ClientConfiguration
""" |
if hasattr(client, 'client_configuration'):
return client.client_configuration
kwargs = {'client': client}
for attr in cls.init_kwargs:
if hasattr(client, attr):
kwargs[attr] = getattr(client, attr)
if hasattr(client, 'api_version'):
kwargs['version'] = client.api_version
return cls(**kwargs) |
<SYSTEM_TASK:>
Generates keyword arguments for creating a new Docker client instance.
<END_TASK>
<USER_TASK:>
Description:
def get_init_kwargs(self):
"""
Generates keyword arguments for creating a new Docker client instance.
:return: Keyword arguments as defined through this configuration.
:rtype: dict
""" |
init_kwargs = {}
for k in self.init_kwargs:
if k in self.core_property_set:
init_kwargs[k] = getattr(self, k)
elif k in self:
init_kwargs[k] = self[k]
return init_kwargs |
<SYSTEM_TASK:>
Retrieves or creates a client instance from this configuration object. If instantiated from this configuration,
<END_TASK>
<USER_TASK:>
Description:
def get_client(self):
"""
Retrieves or creates a client instance from this configuration object. If instantiated from this configuration,
the resulting object is also cached in the property ``client`` and a reference to this configuration is stored
on the client object.
:return: Client object instance.
:rtype: docker.client.Client
""" |
client = self._client
if not client:
self._client = client = self.client_constructor(**self.get_init_kwargs())
client.client_configuration = self
# Client might update the version number after construction.
updated_version = getattr(client, 'api_version', None)
if updated_version:
self.version = updated_version
return client |
<SYSTEM_TASK:>
Runs a single command inside a container.
<END_TASK>
<USER_TASK:>
Description:
def exec_commands(self, action, c_name, run_cmds, **kwargs):
"""
Runs a single command inside a container.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param c_name: Container name.
:type c_name: unicode | str
:param run_cmds: Commands to run.
:type run_cmds: list[dockermap.map.input.ExecCommand]
:return: List of exec command return values (e.g. containing the command id), if applicable, or ``None``
if either no commands have been run or no values have been returned from the API.
:rtype: list[dict] | NoneType
""" |
client = action.client
exec_results = []
for run_cmd in run_cmds:
cmd = run_cmd.cmd
cmd_user = run_cmd.user
log.debug("Creating exec command in container %s with user %s: %s.", c_name, cmd_user, cmd)
ec_kwargs = self.get_exec_create_kwargs(action, c_name, cmd, cmd_user)
create_result = client.exec_create(**ec_kwargs)
if create_result:
e_id = create_result['Id']
log.debug("Starting exec command with id %s.", e_id)
es_kwargs = self.get_exec_start_kwargs(action, c_name, e_id)
client.exec_start(**es_kwargs)
exec_results.append(create_result)
else:
log.debug("Exec command was created, but did not return an id. Assuming that it has been started.")
if exec_results:
return exec_results
return None |
<SYSTEM_TASK:>
Runs all configured commands of a container configuration inside the container instance.
<END_TASK>
<USER_TASK:>
Description:
def exec_container_commands(self, action, c_name, **kwargs):
"""
Runs all configured commands of a container configuration inside the container instance.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param c_name: Container name.
:type c_name: unicode | str
:return: List of exec command return values (e.g. containing the command id), if applicable, or ``None``
if either no commands have been run or no values have been returned from the API.
:rtype: list[dict] | NoneType
""" |
config_cmds = action.config.exec_commands
if not config_cmds:
return None
return self.exec_commands(action, c_name, run_cmds=config_cmds) |
<SYSTEM_TASK:>
Performs `os.path` replacement operations on a path string.
<END_TASK>
<USER_TASK:>
Description:
def prepare_path(path, replace_space, replace_sep, expandvars, expanduser):
"""
Performs `os.path` replacement operations on a path string.
:param path: Path string
:type path: unicode | str
:param replace_space: Mask spaces with backslash.
:param replace_sep: Replace potentially different path separators with POSIX path notation (use :const:`posixpath.sep`).
:type replace_sep: bool
:param expandvars: Expand environment variables (:func:`~os.path.expandvars`).
:type expandvars: bool
:param expanduser: Expand user variables (:func:`~os.path.expanduser`).
:type expanduser: bool
:return: Path string from `path` with aforementioned replacements.
:rtype: unicode | str
""" |
r_path = path
if expandvars:
r_path = os.path.expandvars(r_path)
if expanduser:
r_path = os.path.expanduser(r_path)
if replace_sep and os.sep != posixpath.sep:
r_path = r_path.replace(os.path.sep, posixpath.sep)
if replace_space:
r_path = r_path.replace(' ', '\\ ')
return r_path |
<SYSTEM_TASK:>
Converts a command line to the notation as used in a Dockerfile ``CMD`` and ``ENTRYPOINT`` command. In shell
<END_TASK>
<USER_TASK:>
Description:
def format_command(cmd, shell=False):
"""
Converts a command line to the notation as used in a Dockerfile ``CMD`` and ``ENTRYPOINT`` command. In shell
notation, this returns a simple string, whereas by default it returns a JSON-list format with the command and
arguments.
:param cmd: Command line as a string or tuple.
:type cmd: unicode | str | tuple | list
:param shell: Use the notation so that Docker runs the command in a shell. Default is ``False``.
:type shell: bool
:return: The command string.
:rtype: unicode | str
""" |
def _split_cmd():
line = None
for part in cmd.split(' '):
line = part if line is None else '{0} {1}'.format(line, part)
if part[-1] != '\\':
yield line
line = None
if line is not None:
yield line
if cmd in ([], ''):
return '[]'
if shell:
if isinstance(cmd, (list, tuple)):
return ' '.join(cmd)
elif isinstance(cmd, six.string_types):
return cmd
else:
if isinstance(cmd, (list, tuple)):
return json.dumps(map(six.text_type, cmd))
elif isinstance(cmd, six.string_types):
return json.dumps(list(_split_cmd()))
raise ValueError("Invalid type of command string or sequence: {0}".format(cmd)) |
<SYSTEM_TASK:>
Converts a port number or multiple port numbers, as used in the Dockerfile ``EXPOSE`` command, to a tuple.
<END_TASK>
<USER_TASK:>
Description:
def format_expose(expose):
"""
Converts a port number or multiple port numbers, as used in the Dockerfile ``EXPOSE`` command, to a tuple.
:param: Port numbers, can be as integer, string, or a list/tuple of those.
:type expose: int | unicode | str | list | tuple
:return: A tuple, to be separated by spaces before inserting in a Dockerfile.
:rtype: tuple
""" |
if isinstance(expose, six.string_types):
return expose,
elif isinstance(expose, collections.Iterable):
return map(six.text_type, expose)
return six.text_type(expose), |
<SYSTEM_TASK:>
Adds a file to the Docker build. An ``ADD`` command is inserted, and the path is stored for later packaging of
<END_TASK>
<USER_TASK:>
Description:
def add_file(self, src_path, dst_path=None, ctx_path=None, replace_space=True, expandvars=False, expanduser=False,
remove_final=False):
"""
Adds a file to the Docker build. An ``ADD`` command is inserted, and the path is stored for later packaging of
the context tarball.
:param src_path: Path to the file or directory.
:type src_path: unicode | str
:param dst_path: Destination path during the Docker build. By default uses the last element of `src_path`.
:type dst_path: unicode | str
:param ctx_path: Path inside the context tarball. Can be set in order to avoid name clashes. By default
identical to the destination path.
:type ctx_path: unicode | str
:param replace_space: Mask spaces in path names with a backslash. Default is ``True``.
:type replace_space: bool
:param expandvars: Expand local environment variables. Default is ``False``.
:type expandvars: bool
:param expanduser: Expand local user variables. Default is ``False``.
:type expanduser: bool
:param remove_final: Remove the file after the build operation has completed. Can be useful e.g. for source code
archives, which are no longer needed after building the binaries. Note that this will not reduce the size of
the resulting image (actually may increase instead) unless the image is squashed.
:type remove_final: bool
:return: The path of the file in the Dockerfile context.
:rtype: unicode | str
""" |
if dst_path is None:
head, tail = os.path.split(src_path)
if not tail:
# On trailing backslashes.
tail = os.path.split(head)[1]
if not tail:
ValueError("Could not generate target path from input '{0}'; needs to be specified explicitly.")
target_path = tail
else:
target_path = dst_path
source_path = prepare_path(src_path, False, False, expandvars, expanduser)
target_path = prepare_path(target_path, replace_space, True, expandvars, expanduser)
if ctx_path:
context_path = prepare_path(ctx_path, replace_space, True, expandvars, expanduser)
else:
context_path = target_path
self.prefix('ADD', context_path, target_path)
self._files.append((source_path, context_path))
if remove_final:
self._remove_files.add(target_path)
return context_path |
<SYSTEM_TASK:>
Adds content to the Dockerfile.
<END_TASK>
<USER_TASK:>
Description:
def write(self, input_str):
"""
Adds content to the Dockerfile.
:param input_str: Content.
:type input_str: unicode | str
""" |
self.check_not_finalized()
if isinstance(input_str, six.binary_type):
self.fileobj.write(input_str)
else:
self.fileobj.write(input_str.encode('utf-8')) |
<SYSTEM_TASK:>
Merge dependencies of current configuration with further dependencies; in this instance, it means that in case
<END_TASK>
<USER_TASK:>
Description:
def merge_dependency(self, item, resolve_parent, parents):
"""
Merge dependencies of current configuration with further dependencies; in this instance, it means that in case
of container configuration first parent dependencies are checked, and then immediate dependencies of the current
configuration should be added to the list, but without duplicating any entries.
:param item: Configuration item.
:type item: (unicode | str, unicode | str, unicode | str, unicode | str)
:param resolve_parent: Function to resolve parent dependencies.
:type resolve_parent: function
:type parents: collections.Iterable[(unicode | str, unicode | str, unicode | str, unicode | str)]
:return: List of recursively resolved dependencies of this container.
:rtype: list[(unicode | str, unicode | str, unicode | str, unicode | str)]
:raise CircularDependency: If the current element depends on one found deeper in the hierarchy.
""" |
dep = []
for parent_key in parents:
if item == parent_key:
raise CircularDependency(item, True)
if parent_key.config_type == ItemType.CONTAINER:
parent_dep = resolve_parent(parent_key)
if item in parent_dep:
raise CircularDependency(item)
merge_list(dep, parent_dep)
merge_list(dep, parents)
return dep |
<SYSTEM_TASK:>
Loads a ContainerMap configuration from a YAML document stream.
<END_TASK>
<USER_TASK:>
Description:
def load_map(stream, name=None, check_integrity=True, check_duplicates=True):
"""
Loads a ContainerMap configuration from a YAML document stream.
:param stream: YAML stream.
:type stream: file
:param name: Name of the ContainerMap. If not provided, will be attempted to read from a ``name`` attribute on the
document root level.
:type name: unicode | str
:param check_integrity: Performs a brief integrity check; default is ``True``.
:type check_integrity: bool
:param check_duplicates: Check for duplicate attached volumes during integrity check.
:type check_duplicates: bool
:return: A ContainerMap object.
:rtype: ContainerMap
""" |
map_dict = yaml.safe_load(stream)
if isinstance(map_dict, dict):
map_name = name or map_dict.pop('name', None)
if not map_name:
raise ValueError("No map name provided, and none found in YAML stream.")
return ContainerMap(map_name, map_dict, check_integrity=check_integrity, check_duplicates=check_duplicates)
raise ValueError("Valid map could not be decoded.") |
<SYSTEM_TASK:>
Loads client configurations from a YAML document stream.
<END_TASK>
<USER_TASK:>
Description:
def load_clients(stream, configuration_class=ClientConfiguration):
"""
Loads client configurations from a YAML document stream.
:param stream: YAML stream.
:type stream: file
:param configuration_class: Class of the configuration object to create.
:type configuration_class: class
:return: A dictionary of client configuration objects.
:rtype: dict[unicode | str, dockermap.map.config.client.ClientConfiguration]
""" |
client_dict = yaml.safe_load(stream)
if isinstance(client_dict, dict):
return {client_name: configuration_class(**client_config)
for client_name, client_config in six.iteritems(client_dict)}
raise ValueError("Valid configuration could not be decoded.") |
<SYSTEM_TASK:>
Loads a ContainerMap configuration from a YAML file.
<END_TASK>
<USER_TASK:>
Description:
def load_map_file(filename, name=None, check_integrity=True):
"""
Loads a ContainerMap configuration from a YAML file.
:param filename: YAML file name.
:type filename: unicode | str
:param name: Name of the ContainerMap. If ``None`` will attempt to find a ``name`` element on the root level of
the document; an empty string names the map according to the file, without extension.
:type name: unicode | str
:param check_integrity: Performs a brief integrity check; default is ``True``.
:type check_integrity: bool
:return: A ContainerMap object.
:rtype: ContainerMap
""" |
if name == '':
base_name = os.path.basename(filename)
map_name, __, __ = os.path.basename(base_name).rpartition(os.path.extsep)
else:
map_name = name
with open(filename, 'r') as f:
return load_map(f, name=map_name, check_integrity=check_integrity) |
<SYSTEM_TASK:>
Loads client configurations from a YAML file.
<END_TASK>
<USER_TASK:>
Description:
def load_clients_file(filename, configuration_class=ClientConfiguration):
"""
Loads client configurations from a YAML file.
:param filename: YAML file name.
:type filename: unicode | str
:param configuration_class: Class of the configuration object to create.
:type configuration_class: class
:return: A dictionary of client configuration objects.
:rtype: dict[unicode | str, dockermap.map.config.client.ClientConfiguration]
""" |
with open(filename, 'r') as f:
return load_clients(f, configuration_class=configuration_class) |
<SYSTEM_TASK:>
Returns the state generator to be used for the given action.
<END_TASK>
<USER_TASK:>
Description:
def get_state_generator(self, action_name, policy, kwargs):
"""
Returns the state generator to be used for the given action.
:param action_name: Action identifier name.
:type action_name: unicode | str
:param policy: An instance of the current policy class.
:type policy: dockermap.map.policy.base.BasePolicy
:param kwargs: Keyword arguments. Can be modified by the initialization of the state generator.
:type kwargs: dict
:return: State generator object.
:rtype: dockermap.map.state.base.AbstractStateGenerator
""" |
state_generator_cls = self.generators[action_name][0]
state_generator = state_generator_cls(policy, kwargs)
return state_generator |
<SYSTEM_TASK:>
Returns the action generator to be used for the given action.
<END_TASK>
<USER_TASK:>
Description:
def get_action_generator(self, action_name, policy, kwargs):
"""
Returns the action generator to be used for the given action.
:param action_name: Action identifier name.
:type action_name: unicode | str
:param policy: An instance of the current policy class.
:type policy: dockermap.map.policy.base.BasePolicy
:param kwargs: Keyword arguments. Can be modified by the initialization of the action generator.
:type kwargs: dict
:return: Action generator object.
:rtype: dockermap.map.action.base.AbstractActionGenerator
""" |
action_generator_cls = self.generators[action_name][1]
action_generator = action_generator_cls(policy, kwargs)
return action_generator |
<SYSTEM_TASK:>
Returns a generator of states in relation to the indicated action.
<END_TASK>
<USER_TASK:>
Description:
def get_states(self, action_name, config_name, instances=None, map_name=None, **kwargs):
"""
Returns a generator of states in relation to the indicated action.
:param action_name: Action name.
:type action_name: unicode | str
:param config_name: Name(s) of container configuration(s) or MapConfigId tuple(s).
:type config_name: unicode | str | collections.Iterable[unicode | str] | dockermap.map.input.InputConfigId | collections.Iterable[dockermap.map.input.InputConfigId]
:param instances: Optional instance names, where applicable but not included in ``config_name``.
:type instances: unicode | str | collections.Iterable[unicode | str]
:param map_name: Optional map name, where not inlcuded in ``config_name``.
:param kwargs: Additional kwargs for state generation, action generation, runner, or the client action.
:return: Resulting states of the configurations.
:rtype: collections.Iterable[dockermap.map.state.ConfigState]
""" |
policy = self.get_policy()
_set_forced_update_ids(kwargs, policy.container_maps, map_name or self._default_map, instances)
state_generator = self.get_state_generator(action_name, policy, kwargs)
log.debug("Remaining kwargs passed to client actions: %s", kwargs)
config_ids = get_map_config_ids(config_name, policy.container_maps, map_name or self._default_map,
instances)
log.debug("Generating states for configurations: %s", config_ids)
return state_generator.get_states(config_ids) |
<SYSTEM_TASK:>
Returns the entire set of actions performed for the indicated action name.
<END_TASK>
<USER_TASK:>
Description:
def get_actions(self, action_name, config_name, instances=None, map_name=None, **kwargs):
"""
Returns the entire set of actions performed for the indicated action name.
:param action_name: Action name.
:type action_name: unicode | str
:param config_name: Name(s) of container configuration(s) or MapConfigId tuple(s).
:type config_name: unicode | str | collections.Iterable[unicode | str] | dockermap.map.input.MapConfigId | collections.Iterable[dockermap.map.input.MapConfigId]
:param instances: Optional instance names, where applicable but not included in ``config_name``.
:type instances: unicode | str | collections.Iterable[unicode | str]
:param map_name: Optional map name, where not inlcuded in ``config_name``.
:param kwargs: Additional kwargs for state generation, action generation, runner, or the client action.
:return: Resulting actions of the configurations.
:rtype: collections.Iterable[list[dockermap.map.action.ItemAction]]
""" |
policy = self.get_policy()
action_generator = self.get_action_generator(action_name, policy, kwargs)
for state in self.get_states(action_name, config_name, instances=instances, map_name=map_name, **kwargs):
log.debug("Evaluating state: %s.", state)
actions = action_generator.get_state_actions(state, **kwargs)
if actions:
log.debug("Running actions: %s", actions)
yield actions
else:
log.debug("No actions returned.") |
<SYSTEM_TASK:>
Creates container instances for a container configuration.
<END_TASK>
<USER_TASK:>
Description:
def create(self, container, instances=None, map_name=None, **kwargs):
"""
Creates container instances for a container configuration.
:param container: Container name.
:type container: unicode | str
:param instances: Instance name to create. If not specified, will create all instances as specified in the
configuration (or just one default instance).
:type instances: tuple | list
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to
the main container creation.
:return: Return values of created containers.
:rtype: list[dockermap.map.runner.ActionOutput]
""" |
return self.run_actions('create', container, instances=instances, map_name=map_name, **kwargs) |
<SYSTEM_TASK:>
Starts instances for a container configuration.
<END_TASK>
<USER_TASK:>
Description:
def start(self, container, instances=None, map_name=None, **kwargs):
"""
Starts instances for a container configuration.
:param container: Container name.
:type container: unicode | str
:param instances: Instance names to start. If not specified, will start all instances as specified in the
configuration (or just one default instance).
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:type instances: collections.Iterable[unicode | str | NoneType]
:param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to
the main container start.
:return: Return values of started containers.
:rtype: list[dockermap.map.runner.ActionOutput]
""" |
return self.run_actions('start', container, instances=instances, map_name=map_name, **kwargs) |
<SYSTEM_TASK:>
Restarts instances for a container configuration.
<END_TASK>
<USER_TASK:>
Description:
def restart(self, container, instances=None, map_name=None, **kwargs):
"""
Restarts instances for a container configuration.
:param container: Container name.
:type container: unicode | str
:param instances: Instance names to stop. If not specified, will restart all instances as specified in the
configuration (or just one default instance).
:type instances: collections.Iterable[unicode | str | NoneType]
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to
the main container restart.
:return: Return values of restarted containers.
:rtype: list[dockermap.map.runner.ActionOutput]
""" |
return self.run_actions('restart', container, instances=instances, map_name=map_name, **kwargs) |
<SYSTEM_TASK:>
Stops instances for a container configuration.
<END_TASK>
<USER_TASK:>
Description:
def stop(self, container, instances=None, map_name=None, **kwargs):
"""
Stops instances for a container configuration.
:param container: Container name.
:type container: unicode | str
:param instances: Instance names to stop. If not specified, will stop all instances as specified in the
configuration (or just one default instance).
:type instances: collections.Iterable[unicode | str | NoneType]
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param raise_on_error: Errors on stop and removal may result from Docker volume problems, that do not further
affect further actions. Such errors are always logged, but do not raise an exception unless this is set to
``True``. Please note that 404 errors (on non-existing containers) are always ignored on stop and removal.
:type raise_on_error: bool
:param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to
the main container stop.
:return: Return values of stopped containers.
:rtype: list[dockermap.map.runner.ActionOutput]
""" |
return self.run_actions('stop', container, instances=instances, map_name=map_name, **kwargs) |
<SYSTEM_TASK:>
Remove instances from a container configuration.
<END_TASK>
<USER_TASK:>
Description:
def remove(self, container, instances=None, map_name=None, **kwargs):
"""
Remove instances from a container configuration.
:param container: Container name.
:type container: unicode | str
:param instances: Instance names to remove. If not specified, will remove all instances as specified in the
configuration (or just one default instance).
:type instances: collections.Iterable[unicode | str | NoneType]
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to
the main container removal.
:return: Return values of removed containers.
:rtype: list[dockermap.map.runner.ActionOutput]
""" |
return self.run_actions('remove', container, instances=instances, map_name=map_name, **kwargs) |
<SYSTEM_TASK:>
Start up container instances from a container configuration. Typically this means creating and starting
<END_TASK>
<USER_TASK:>
Description:
def startup(self, container, instances=None, map_name=None, **kwargs):
"""
Start up container instances from a container configuration. Typically this means creating and starting
containers and their dependencies. Note that not all policy classes necessarily implement this method.
:param container: Container name.
:type container: unicode | str
:param instances: Instance names to remove. If not specified, will remove all instances as specified in the
configuration (or just one default instance).
:type instances: collections.Iterable[unicode | str | NoneType]
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param kwargs: Additional kwargs. Only options controlling policy behavior are considered.
:return: Return values of created containers.
:rtype: list[dockermap.map.runner.ActionOutput]
""" |
return self.run_actions('startup', container, instances=instances, map_name=map_name, **kwargs) |
<SYSTEM_TASK:>
Shut down container instances from a container configuration. Typically this means stopping and removing
<END_TASK>
<USER_TASK:>
Description:
def shutdown(self, container, instances=None, map_name=None, **kwargs):
"""
Shut down container instances from a container configuration. Typically this means stopping and removing
containers. Note that not all policy classes necessarily implement this method.
:param container: Container name.
:type container: unicode | str
:param instances: Instance names to remove. If not specified, will remove all instances as specified in the
configuration (or just one default instance).
:type instances: collections.Iterable[unicode | str | NoneType]
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param kwargs: Additional kwargs. Only options controlling policy behavior are considered.
:return: Return values of removed containers.
:rtype: list[dockermap.map.runner.ActionOutput]
""" |
return self.run_actions('shutdown', container, instances=instances, map_name=map_name, **kwargs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.