_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q1900
TableProcessor._build_row
train
def _build_row(self, row, parent, align, border): """ Given a row of text, build table cells. """ tr = etree.SubElement(parent, 'tr') tag = 'td' if parent.tag == 'thead': tag = 'th' cells = self._split_row(row, border) # We use align here rather than cells to ensure every row # contains the same number of columns. for i, a in enumerate(align): c = etree.SubElement(tr, tag) try: c.text = cells[i].strip() except IndexError: c.text = "" if a: c.set('align', a)
python
{ "resource": "" }
q1901
TableProcessor._split_row
train
def _split_row(self, row, border): """ split a row of text into list of cells. """ if border: if row.startswith('|'): row = row[1:] if row.endswith('|'): row = row[:-1] return row.split('|')
python
{ "resource": "" }
q1902
TableExtension.extendMarkdown
train
def extendMarkdown(self, md, md_globals): """ Add an instance of TableProcessor to BlockParser. """ md.parser.blockprocessors.add('table', TableProcessor(md.parser), '<hashheader')
python
{ "resource": "" }
q1903
get_all_static
train
def get_all_static(): """ Get all the static files directories found by ``STATICFILES_FINDERS`` :return: set of paths (top-level folders only) """ static_dirs = set() for finder in settings.STATICFILES_FINDERS: finder = finders.get_finder(finder) if hasattr(finder, 'storages'): for storage in finder.storages.values(): static_dirs.add(storage.location) if hasattr(finder, 'storage'): static_dirs.add(finder.storage.location) return static_dirs
python
{ "resource": "" }
q1904
BaseCompiler.input
train
def input(self, **kwargs): """ Specify temporary input file extension. Browserify requires explicit file extension (".js" or ".json" by default). https://github.com/substack/node-browserify/issues/1469 """ if self.infile is None and "{infile}" in self.command: if self.filename is None: self.infile = NamedTemporaryFile(mode='wb', suffix=self.infile_ext) self.infile.write(self.content.encode(self.default_encoding)) self.infile.flush() self.options += ( ('infile', self.infile.name), ) return super(BaseCompiler, self).input(**kwargs)
python
{ "resource": "" }
q1905
graph_hash
train
def graph_hash(obj): '''this hashes all types to a hash without colissions. python's hashing algorithms are not cross type compatable but hashing tuples with the type as the first element seems to do the trick''' obj_type = type(obj) try: # this works for hashables return hash((obj_type, obj)) except: # this works for object containers since graphdb # wants to identify different containers # instead of the sum of their current internals return hash((obj_type, id(obj)))
python
{ "resource": "" }
q1906
VList.where
train
def where(self, relation, filter_fn): ''' use this to filter VLists, simply provide a filter function and what relation to apply it to ''' assert type(relation).__name__ in {'str','unicode'}, 'where needs the first arg to be a string' assert callable(filter_fn), 'filter_fn needs to be callable' return VList(i for i in self if relation in i._relations() and any(filter_fn(_()) for _ in i[relation]))
python
{ "resource": "" }
q1907
VList._where
train
def _where(self, filter_fn): ''' use this to filter VLists, simply provide a filter function to filter the current found objects ''' assert callable(filter_fn), 'filter_fn needs to be callable' return VList(i for i in self if filter_fn(i()))
python
{ "resource": "" }
q1908
VList._where
train
def _where(self, **kwargs): '''use this to filter VLists with kv pairs''' out = self for k,v in kwargs.items(): out = out.where(k, lambda i:i==v) return out
python
{ "resource": "" }
q1909
SQLiteGraphDB.find
train
def find(self, target, relation): ''' returns back all elements the target has a relation to ''' query = 'select ob1.code from objects as ob1, objects as ob2, relations where relations.dst=ob1.id and relations.name=? and relations.src=ob2.id and ob2.code=?' # src is id not source :/ for i in self._execute(query, (relation, self.serialize(target))): yield self.deserialize(i[0])
python
{ "resource": "" }
q1910
Sock.parse_buf
train
def parse_buf(self, encoding="unicode"): """ Since TCP is a stream-orientated protocol, responses aren't guaranteed to be complete when they arrive. The buffer stores all the data and this function splits the data into replies based on the new line delimiter. """ buf_len = len(self.buf) replies = [] reply = b"" chop = 0 skip = 0 i = 0 buf_len = len(self.buf) for i in range(0, buf_len): ch = self.buf[i:i + 1] if skip: skip -= 1 i += 1 continue nxt = i + 1 if nxt < buf_len: if ch == b"\r" and self.buf[nxt:nxt + 1] == b"\n": # Append new reply. if reply != b"": if encoding == "unicode": replies.append(encode_str(reply, encoding)) else: replies.append(reply) reply = b"" # Truncate the whole buf if chop is out of bounds. chop = nxt + 1 skip = 1 i += 1 continue reply += ch i += 1 # Truncate buf. if chop: self.buf = self.buf[chop:] return replies
python
{ "resource": "" }
q1911
Sock.get_chunks
train
def get_chunks(self, fixed_limit=None, encoding="unicode"): """ This is the function which handles retrieving new data chunks. It's main logic is avoiding a recv call blocking forever and halting the program flow. To do this, it manages errors and keeps an eye on the buffer to avoid overflows and DoS attacks. http://stackoverflow.com/questions/16745409/what-does-pythons-socket-recv-return-for-non-blocking-sockets-if-no-data-is-r http://stackoverflow.com/questions/3187565/select-and-ssl-in-python """ # Socket is disconnected. if not self.connected: return # Recv chunks until network buffer is empty. repeat = 1 wait = 0.2 chunk_no = 0 max_buf = self.max_buf max_chunks = self.max_chunks if fixed_limit is not None: max_buf = fixed_limit max_chunks = fixed_limit while repeat: chunk_size = self.chunk_size while True: # Don't exceed buffer size. buf_len = len(self.buf) if buf_len >= max_buf: break remaining = max_buf - buf_len if remaining < chunk_size: chunk_size = remaining # Don't allow non-blocking sockets to be # DoSed by multiple small replies. if chunk_no >= max_chunks and not self.blocking: break try: chunk = self.s.recv(chunk_size) except socket.timeout as e: self.debug_print("Get chunks timed out.") self.debug_print(e) # Timeout on blocking sockets. err = e.args[0] self.debug_print(err) if err == "timed out": repeat = 0 break except ssl.SSLError as e: # Will block on non-blocking SSL sockets. if e.errno == ssl.SSL_ERROR_WANT_READ: self.debug_print("SSL_ERROR_WANT_READ") break else: self.debug_print("Get chunks ssl error") self.close() return except socket.error as e: # Will block on nonblocking non-SSL sockets. err = e.args[0] if err == errno.EAGAIN or err == errno.EWOULDBLOCK: break else: # Connection closed or other problem. self.debug_print("get chunks other closing") self.close() return else: if chunk == b"": self.close() return # Avoid decoding errors. self.buf += chunk # Otherwise the loop will be endless. if self.blocking: break # Used to avoid DoS of small packets. chunk_no += 1 # Repeat is already set -- manual skip. if not repeat: break else: repeat = 0 # Block until there's a full reply or there's a timeout. if self.blocking: if fixed_limit is None: # Partial response. if self.delimiter not in self.buf: repeat = 1 time.sleep(wait)
python
{ "resource": "" }
q1912
Net.validate_node
train
def validate_node(self, node_ip, node_port=None, same_nodes=1): self.debug_print("Validating: " + node_ip) # Is this a valid IP? if not is_ip_valid(node_ip) or node_ip == "0.0.0.0": self.debug_print("Invalid node ip in validate node") return 0 # Is this a valid port? if node_port != 0 and node_port is not None: if not is_valid_port(node_port): self.debug_print("Invalid node port in validate port") return 0 """ Don't accept connections from self to passive server or connections to already connected nodes. """ if not self.enable_duplicate_ip_cons: # Don't connect to ourself. if (node_ip == "127.0.0.1" or node_ip == get_lan_ip(self.interface) or node_ip == self.wan_ip): self.debug_print("Cannot connect to ourself.") return 0 # No, really: don't connect to ourself. if node_ip == self.passive_bind and node_port == self.passive_port: self.debug_print("Error connecting to same listen server.") return 0 # Don't connect to same nodes. if same_nodes: for node in self.outbound + self.inbound: try: addr, port = node["con"].s.getpeername() if node_ip == addr: self.debug_print("Already connected to this node.") return 0 except Exception as e: print(e) return 0 return 1
python
{ "resource": "" }
q1913
Net.bootstrap
train
def bootstrap(self): """ When the software is first started, it needs to retrieve a list of nodes to connect to the network to. This function asks the server for N nodes which consists of at least N passive nodes and N simultaneous nodes. The simultaneous nodes are prioritized if the node_type for the machine running this software is simultaneous, with passive nodes being used as a fallback. Otherwise, the node exclusively uses passive nodes to bootstrap. This algorithm is designed to preserve passive node's inbound connection slots. """ # Disable bootstrap. if not self.enable_bootstrap: return None # Avoid raping the rendezvous server. t = time.time() if self.last_bootstrap is not None: if t - self.last_bootstrap <= rendezvous_interval: self.debug_print("Bootstrapped recently") return None self.last_bootstrap = t self.debug_print("Searching for nodes to connect to.") try: connection_slots = self.max_outbound - (len(self.outbound)) if connection_slots > 0: # Connect to rendezvous server. rendezvous_con = self.rendezvous.server_connect() # Retrieve random nodes to bootstrap with. rendezvous_con.send_line("BOOTSTRAP " + str(self.max_outbound * 2)) choices = rendezvous_con.recv_line(timeout=2) if choices == "NODES EMPTY": rendezvous_con.close() self.debug_print("Node list is empty.") return self else: self.debug_print("Found node list.") # Parse node list. choices = re.findall("(?:(p|s)[:]([0-9]+[.][0-9]+[.][0-9]+[.][0-9]+)[:]([0-9]+))+\s?", choices) rendezvous_con.s.close() # Attempt to make active simultaneous connections. passive_nodes = [] for node in choices: # Out of connection slots. if not connection_slots: break # Add to list of passive nodes. node_type, node_ip, node_port = node self.debug_print(str(node)) if node_type == "p": passive_nodes.append(node) # Use passive to make up the remaining cons. i = 0 while i < len(passive_nodes) and connection_slots > 0: node_type, node_ip, node_port = passive_nodes[i] con = self.add_node(node_ip, node_port, "passive") if con is not None: connection_slots -= 1 self.debug_print("Con successful.") else: self.debug_print("Con failed.") i += 1 except Exception as e: self.debug_print("Unknown error in bootstrap()") error = parse_exception(e) log_exception(self.error_log_path, error) return self
python
{ "resource": "" }
q1914
Net.advertise
train
def advertise(self): """ This function tells the rendezvous server that our node is ready to accept connections from other nodes on the P2P network that run the bootstrap function. It's only used when net_type == p2p """ # Advertise is disabled. if not self.enable_advertise: self.debug_print("Advertise is disbled!") return None # Direct net server is reserved for direct connections only. if self.net_type == "direct" and self.node_type == "passive": return None # Net isn't started!. if not self.is_net_started: raise Exception("Please call start() before you call advertise()") # Avoid raping the rendezvous server with excessive requests. t = time.time() if self.last_advertise is not None: if t - self.last_advertise <= advertise_interval: return None if len(self.inbound) >= self.min_connected: return None self.last_advertise = t # Tell rendezvous server to list us. try: # We're a passive node. if self.node_type == "passive" and\ self.passive_port is not None and\ self.enable_advertise: self.rendezvous.passive_listen(self.passive_port, self.max_inbound) """ Simultaneous open is only used as a fail-safe for connections to nodes on the direct_net and only direct_net can list itself as simultaneous so its safe to leave this enabled. """ if self.node_type == "simultaneous": self.rendezvous.simultaneous_listen() except Exception as e: error = parse_exception(e) log_exception(self.error_log_path, error) return self
python
{ "resource": "" }
q1915
Net.determine_node
train
def determine_node(self): """ Determines the type of node based on a combination of forwarding reachability and NAT type. """ # Manually set node_type as simultaneous. if self.node_type == "simultaneous": if self.nat_type != "unknown": return "simultaneous" # Get IP of binding interface. unspecific_bind = ["0.0.0.0", "127.0.0.1", "localhost"] if self.passive_bind in unspecific_bind: lan_ip = get_lan_ip(self.interface) else: lan_ip = self.passive_bind # Passive node checks. if lan_ip is not None \ and self.passive_port is not None and self.enable_forwarding: self.debug_print("Checking if port is forwarded.") # Check port isn't already forwarded. if is_port_forwarded(lan_ip, self.passive_port, "TCP", self.forwarding_servers): msg = "Port already forwarded. Skipping NAT traversal." self.debug_print(msg) self.forwarding_type = "forwarded" return "passive" else: self.debug_print("Port is not already forwarded.") # Most routers. try: self.debug_print("Trying UPnP") UPnP(self.interface).forward_port("TCP", self.passive_port, lan_ip) if is_port_forwarded(lan_ip, self.passive_port, "TCP", self.forwarding_servers): self.forwarding_type = "UPnP" self.debug_print("Forwarded port with UPnP.") else: self.debug_print("UPnP failed to forward port.") except Exception as e: # Log exception. error = parse_exception(e) log_exception(self.error_log_path, error) self.debug_print("UPnP failed to forward port.") # Apple devices. try: self.debug_print("Trying NATPMP.") NatPMP(self.interface).forward_port("TCP", self.passive_port, lan_ip) if is_port_forwarded(lan_ip, self.passive_port, "TCP", self.forwarding_servers): self.forwarding_type = "NATPMP" self.debug_print("Port forwarded with NATPMP.") else: self.debug_print("Failed to forward port with NATPMP.") self.debug_print("Falling back on TCP hole punching or" " proxying.") except Exception as e: # Log exception error = parse_exception(e) log_exception(self.error_log_path, error) self.debug_print("Failed to forward port with NATPMP.") # Check it worked. if self.forwarding_type != "manual": return "passive" # Fail-safe node types. if self.nat_type != "unknown": return "simultaneous" else: return "active"
python
{ "resource": "" }
q1916
Net.start
train
def start(self): """ This function determines node and NAT type, saves connectivity details, and starts any needed servers to be a part of the network. This is usually the first function called after initialising the Net class. """ self.debug_print("Starting networking.") self.debug_print("Make sure to iterate over replies if you need" " connection alive management!") # Register a cnt + c handler signal.signal(signal.SIGINT, self.stop) # Save WAN IP. self.debug_print("WAN IP = " + str(self.wan_ip)) # Check rendezvous server is up. try: rendezvous_con = self.rendezvous.server_connect() rendezvous_con.close() except: raise Exception("Unable to connect to rendezvous server.") # Started no matter what # since LAN connections are always possible. self.start_passive_server() # Determine NAT type. if self.nat_type == "unknown": self.debug_print("Determining NAT type.") nat_type = self.rendezvous.determine_nat() if nat_type is not None and nat_type != "unknown": self.nat_type = nat_type self.rendezvous.nat_type = nat_type self.debug_print("NAT type = " + nat_type) else: self.debug_print("Unable to determine NAT type.") # Check NAT type if node is simultaneous # is manually specified. if self.node_type == "simultaneous": if self.nat_type not in self.rendezvous.predictable_nats: self.debug_print("Manual setting of simultanous specified but" " ignored since NAT does not support it.") self.node_type = "active" else: # Determine node type. self.debug_print("Determining node type.") # No checks for manually specifying passive # (there probably should be.) if self.node_type == "unknown": self.node_type = self.determine_node() # Prevent P2P nodes from running as simultaneous. if self.net_type == "p2p": """ TCP hole punching is reserved specifically for direct networks (a net object reserved for receiving direct connections -- p2p is for connecting to the main network. The reason for this is you can't do multiple TCP hole punches at the same time so reserved for direct network where it's most needed. """ if self.node_type == "simultaneous": self.debug_print("Simultaneous is not allowed for P2P") self.node_type = "active" self.disable_simultaneous() self.debug_print("Node type = " + self.node_type) # Close stray cons from determine_node() tests. self.close_cons() # Set net started status. self.is_net_started = 1 # Initialise our UNL details. self.unl = UNL( net=self, dht_node=self.dht_node, wan_ip=self.wan_ip ) # Nestled calls. return self
python
{ "resource": "" }
q1917
RendezvousProtocol.send_remote_port
train
def send_remote_port(self): """ Sends the remote port mapped for the connection. This port is surprisingly often the same as the locally bound port for an endpoint because a lot of NAT types preserve the port. """ msg = "REMOTE TCP %s" % (str(self.transport.getPeer().port)) self.send_line(msg)
python
{ "resource": "" }
q1918
RendezvousProtocol.cleanup_candidates
train
def cleanup_candidates(self, node_ip): """ Removes old TCP hole punching candidates for a designated node if a certain amount of time has passed since they last connected. """ if node_ip in self.factory.candidates: old_candidates = [] for candidate in self.factory.candidates[node_ip]: elapsed = int(time.time() - candidate["time"]) if elapsed > self.challege_timeout: old_candidates.append(candidate) for candidate in old_candidates: self.factory.candidates[node_ip].remove(candidate)
python
{ "resource": "" }
q1919
RendezvousProtocol.propogate_candidates
train
def propogate_candidates(self, node_ip): """ Used to progate new candidates to passive simultaneous nodes. """ if node_ip in self.factory.candidates: old_candidates = [] for candidate in self.factory.candidates[node_ip]: # Not connected. if not candidate["con"].connected: continue # Already sent -- updated when they accept this challenge. if candidate["propogated"]: continue # Notify node of challege from client. msg = "CHALLENGE %s %s %s" % ( candidate["ip_addr"], " ".join(map(str, candidate["predictions"])), candidate["proto"]) self.factory.nodes["simultaneous"][node_ip]["con"].\ send_line(msg) old_candidates.append(candidate)
python
{ "resource": "" }
q1920
RendezvousProtocol.synchronize_simultaneous
train
def synchronize_simultaneous(self, node_ip): """ Because adjacent mappings for certain NAT types can be stolen by other connections, the purpose of this function is to ensure the last connection by a passive simultaneous node is recent compared to the time for a candidate to increase the chance that the precited mappings remain active for the TCP hole punching attempt. """ for candidate in self.factory.candidates[node_ip]: # Only if candidate is connected. if not candidate["con"].connected: continue # Synchronise simultaneous node. if candidate["time"] -\ self.factory.nodes["simultaneous"][node_ip]["time"] >\ self.challege_timeout: msg = "RECONNECT" self.factory.nodes["simultaneous"][node_ip]["con"].\ send_line(msg) return self.cleanup_candidates(node_ip) self.propogate_candidates(node_ip)
python
{ "resource": "" }
q1921
RendezvousProtocol.connectionLost
train
def connectionLost(self, reason): """ Mostly handles clean-up of node + candidate structures. Avoids memory exhaustion for a large number of connections. """ try: self.connected = False if debug: print(self.log_entry("CLOSED =", "none")) # Every five minutes: cleanup t = time.time() if time.time() - self.factory.last_cleanup >= self.cleanup: self.factory.last_cleanup = t # Delete old passive nodes. old_node_ips = [] for node_ip in list(self.factory.nodes["passive"]): passive_node = self.factory.nodes["passive"][node_ip] # Gives enough time for passive nodes to receive clients. if t - passive_node["time"] >= self.node_lifetime: old_node_ips.append(node_ip) for node_ip in old_node_ips: del self.factory.nodes["passive"][node_ip] # Delete old simultaneous nodes. old_node_ips = [] for node_ip in list(self.factory.nodes["simultaneous"]): simultaneous_node =\ self.factory.nodes["simultaneous"][node_ip] # Gives enough time for passive nodes to receive clients. if t - simultaneous_node["time"] >= self.node_lifetime: old_node_ips.append(node_ip) for node_ip in old_node_ips: del self.factory.nodes["simultaneous"][node_ip] # Delete old candidates and candidate structs. old_node_ips = [] for node_ip in list(self.factory.candidates): # Record old candidates. old_candidates = [] for candidate in self.factory.candidates[node_ip]: # Hole punching is ms time sensitive. # Candidates older than this is safe to assume # they're not needed. if node_ip not in self.factory.nodes["simultaneous"] \ and t - candidate["time"] >= self.challenge_timeout * 5: old_candidates.append(candidate) # Remove old candidates. for candidate in old_candidates: self.factory.candidates[node_ip].remove(candidate) # Record old node IPs. if not len(self.factory.candidates[node_ip]) and \ node_ip not in self.factory.nodes["simultaneous"]: old_node_ips.append(node_ip) # Remove old node IPs. for node_ip in old_node_ips: del self.factory.candidates[node_ip] except Exception as e: error = parse_exception(e) log_exception(error_log_path, error) print(self.log_entry("ERROR =", error))
python
{ "resource": "" }
q1922
IPgetter.get_external_ip
train
def get_external_ip(self): """ This function gets your IP from a random server """ random.shuffle(self.server_list) myip = '' for server in self.server_list[:3]: myip = self.fetch(server) if myip != '': return myip else: continue return ''
python
{ "resource": "" }
q1923
IPgetter.fetch
train
def fetch(self, server): """ This function gets your IP from a specific server """ t = None socket_default_timeout = socket.getdefaulttimeout() opener = urllib.build_opener() opener.addheaders = [('User-agent', "Mozilla/5.0 (X11; Linux x86_64; rv:24.0)" " Gecko/20100101 Firefox/24.0")] try: # Close url resource if fetching not finished within timeout. t = Timer(self.timeout, self.handle_timeout, [self.url]) t.start() # Open URL. if version_info[0:2] == (2, 5): # Support for Python 2.5.* using socket hack # (Changes global socket timeout.) socket.setdefaulttimeout(self.timeout) self.url = opener.open(server) else: self.url = opener.open(server, timeout=self.timeout) # Read response. content = self.url.read() # Didn't want to import chardet. Prefered to stick to stdlib if PY3K: try: content = content.decode('UTF-8') except UnicodeDecodeError: content = content.decode('ISO-8859-1') p = '(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(' p += '25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[' p += '01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)' m = re.search( p, content) myip = m.group(0) if len(myip) > 0: return myip else: return '' except Exception as e: print(e) return '' finally: if self.url is not None: self.url.close() self.url = None if t is not None: t.cancel() # Reset default socket timeout. if socket.getdefaulttimeout() != socket_default_timeout: socket.setdefaulttimeout(socket_default_timeout)
python
{ "resource": "" }
q1924
UNL.connect
train
def connect(self, their_unl, events, force_master=1, hairpin=1, nonce="0" * 64): """ A new thread is spawned because many of the connection techniques rely on sleep to determine connection outcome or to synchronise hole punching techniques. If the sleep is in its own thread it won't block main execution. """ parms = (their_unl, events, force_master, hairpin, nonce) t = Thread(target=self.connect_handler, args=parms) t.start() self.unl_threads.append(t)
python
{ "resource": "" }
q1925
SysClock.calculate_clock_skew
train
def calculate_clock_skew(self): """ Computer average and standard deviation using all the data points. """ n = self.statx_n(self.data_points) """ Required to be able to compute the standard deviation. """ if n < 1: return Decimal("0") avg = self.statx_avg(self.data_points) sdev = self.statx_sdev(self.data_points) """ Incrementally remove aberration points. """ for k in range(0, self.clean_steps): """ Remove aberration points: keep only the sigma range around the average. """ min_val = avg - sdev max_val = avg + sdev cleaned_data_points = [] for i in range(0, n): v = self.data_points[i] if v < min_val or v > max_val: continue cleaned_data_points.append(v) self.data_points = cleaned_data_points[:] """ Recompute the new average using the "sound" points we kept. """ n = self.statx_n(self.data_points) """ Not enough data to compute standard deviation. """ if n < 2: break avg = self.statx_avg(self.data_points) sdev = self.statx_sdev(self.data_points) if sdev <= self.max_sdev or n < self.min_data: break """ If standard deviation is too large still, we cannot update our clock. Collect more points. If we don't have a minimum amount of data, don't attempt the update yet, continue collecting. """ if sdev > self.max_sdev or n < self.min_data: return Decimal("0") return avg
python
{ "resource": "" }
q1926
RendezvousClient.simultaneous_listen
train
def simultaneous_listen(self): """ This function is called by passive simultaneous nodes who wish to establish themself as such. It sets up a connection to the Rendezvous Server to monitor for new hole punching requests. """ # Close socket. if self.server_con is not None: self.server_con.s.close() self.server_con = None # Reset predictions + mappings. self.mappings = None self.predictions = None # Connect to rendezvous server. parts = self.sequential_connect() if parts is None: return 0 con, mappings, predictions = parts con.blocking = 0 con.timeout = 0 con.s.settimeout(0) self.server_con = con self.mappings = mappings self.predictions = predictions # Register simultaneous node with server. msg = "SIMULTANEOUS READY 0 0" ret = self.server_con.send_line(msg) if not ret: return 0 return 1
python
{ "resource": "" }
q1927
RendezvousClient.predict_mappings
train
def predict_mappings(self, mappings): """ This function is used to predict the remote ports that a NAT will map a local connection to. It requires the NAT type to be determined before use. Current support for preserving and delta type mapping behaviour. """ if self.nat_type not in self.predictable_nats: msg = "Can't predict mappings for non-predictable NAT type." raise Exception(msg) for mapping in mappings: mapping["bound"] = mapping["sock"].getsockname()[1] if self.nat_type == "preserving": mapping["remote"] = mapping["source"] if self.nat_type == "delta": max_port = 65535 mapping["remote"] = int(mapping["source"]) + self.delta # Overflow or underflow = wrap port around. if mapping["remote"] > max_port: mapping["remote"] -= max_port if mapping["remote"] < 0: mapping["remote"] = max_port - -mapping["remote"] # Unknown error. if mapping["remote"] < 1 or mapping["remote"] > max_port: mapping["remote"] = 1 mapping["remote"] = str(mapping["remote"]) return mappings
python
{ "resource": "" }
q1928
RendezvousClient.parse_remote_port
train
def parse_remote_port(self, reply): """ Parses a remote port from a Rendezvous Server's response. """ remote_port = re.findall("^REMOTE (TCP|UDP) ([0-9]+)$", reply) if not len(remote_port): remote_port = 0 else: remote_port = int(remote_port[0][1]) if remote_port < 1 or remote_port > 65535: remote_port = 0 return remote_port
python
{ "resource": "" }
q1929
get_unused_port
train
def get_unused_port(port=None): """Checks if port is already in use.""" if port is None or port < 1024 or port > 65535: port = random.randint(1024, 65535) assert(1024 <= port <= 65535) while True: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.bind(('', port)) # Try to open port except socket.error as e: if e.errno in (98, 10048): # 98, 10048 means address already bound return get_unused_port(None) raise e s.close() return port
python
{ "resource": "" }
q1930
get_lan_ip
train
def get_lan_ip(interface="default"): if sys.version_info < (3, 0, 0): if type(interface) == str: interface = unicode(interface) else: if type(interface) == bytes: interface = interface.decode("utf-8") # Get ID of interface that handles WAN stuff. default_gateway = get_default_gateway(interface) gateways = netifaces.gateways() wan_id = None if netifaces.AF_INET in gateways: gw_list = gateways[netifaces.AF_INET] for gw_info in gw_list: if gw_info[0] == default_gateway: wan_id = gw_info[1] break # Find LAN IP of interface for WAN stuff. interfaces = netifaces.interfaces() if wan_id in interfaces: families = netifaces.ifaddresses(wan_id) if netifaces.AF_INET in families: if_info_list = families[netifaces.AF_INET] for if_info in if_info_list: if "addr" in if_info: return if_info["addr"] """ Execution may reach here if the host is using virtual interfaces on Linux and there are no gateways which suggests the host is a VPS or server. In this case """ if platform.system() == "Linux": if ip is not None: return ip.routes["8.8.8.8"]["prefsrc"] return None
python
{ "resource": "" }
q1931
get_wan_ip
train
def get_wan_ip(n=0): """ That IP module sucks. Occasionally it returns an IP address behind cloudflare which probably happens when cloudflare tries to proxy your web request because it thinks you're trying to DoS. It's better if we just run our own infrastructure. """ if n == 2: try: ip = myip() ip = extract_ip(ip) if is_ip_valid(ip): return ip except Exception as e: print(str(e)) return None # Fail-safe: use centralized server for IP lookup. from pyp2p.net import forwarding_servers for forwarding_server in forwarding_servers: url = "http://" + forwarding_server["addr"] + ":" url += str(forwarding_server["port"]) url += forwarding_server["url"] url += "?action=get_wan_ip" try: r = urlopen(url, timeout=5) response = r.read().decode("utf-8") response = extract_ip(response) if is_ip_valid(response): return response except Exception as e: print(str(e)) continue time.sleep(1) return get_wan_ip(n + 1)
python
{ "resource": "" }
q1932
Cron.initialize
train
def initialize(self): """Initialize croniter and related times""" if self.croniter is None: self.time = time.time() self.datetime = datetime.now(self.tz) self.loop_time = self.loop.time() self.croniter = croniter(self.spec, start_time=self.datetime)
python
{ "resource": "" }
q1933
Cron.get_next
train
def get_next(self): """Return next iteration time related to loop time""" return self.loop_time + (self.croniter.get_next(float) - self.time)
python
{ "resource": "" }
q1934
Cron.call_next
train
def call_next(self): """Set next hop in the loop. Call task""" if self.handle is not None: self.handle.cancel() next_time = self.get_next() self.handle = self.loop.call_at(next_time, self.call_next) self.call_func()
python
{ "resource": "" }
q1935
Cron.call_func
train
def call_func(self, *args, **kwargs): """Called. Take care of exceptions using gather""" asyncio.gather( self.cron(*args, **kwargs), loop=self.loop, return_exceptions=True ).add_done_callback(self.set_result)
python
{ "resource": "" }
q1936
cmd
train
def cmd(): '''Handler for command line invocation''' # Try to handle any reasonable thing thrown at this. # Consume '-f' and '-o' as input/output, allow '-' for stdin/stdout # and treat any subsequent arguments as a space separated string to # be titlecased (so it still works if people forget quotes) parser = argparse.ArgumentParser() in_group = parser.add_mutually_exclusive_group() in_group.add_argument('string', nargs='*', default=[], help='String to titlecase') in_group.add_argument('-f', '--input-file', help='File to read from to titlecase') parser.add_argument('-o', '--output-file', help='File to write titlecased output to)') args = parser.parse_args() if args.input_file is not None: if args.input_file == '-': ifile = sys.stdin else: ifile = open(args.input_file) else: ifile = sys.stdin if args.output_file is not None: if args.output_file == '-': ofile = sys.stdout else: ofile = open(args.output_file, 'w') else: ofile = sys.stdout if len(args.string) > 0: in_string = ' '.join(args.string) else: with ifile: in_string = ifile.read() with ofile: ofile.write(titlecase(in_string))
python
{ "resource": "" }
q1937
Checker.add_options
train
def add_options(cls, parser: OptionManager) -> None: """ ``flake8`` api method to register new plugin options. See :class:`.Configuration` docs for detailed options reference. Arguments: parser: ``flake8`` option parser instance. """ parser.add_option( '--eradicate-aggressive', default=False, help=( 'Enables aggressive mode for eradicate; ' 'this may result in false positives' ), action='store_true', type=None, )
python
{ "resource": "" }
q1938
Checker.run
train
def run(self) -> Generator[Tuple[int, int, str, type], None, None]: """ Runs the checker. ``fix_file()`` only mutates the buffer object. It is the only way to find out if some error happened. """ if self.filename != STDIN: buffer = StringIO() options = _Options(aggressive=self.options.eradicate_aggressive) fix_file(self.filename, options, buffer) traceback = buffer.getvalue() if traceback: yield 1, 0, self._error(traceback), type(self)
python
{ "resource": "" }
q1939
unique
train
def unique(g): """ Yield values yielded by ``g``, removing any duplicates. Example ------- >>> list(unique(iter([1, 3, 1, 2, 3]))) [1, 3, 2] """ yielded = set() for value in g: if value not in yielded: yield value yielded.add(value)
python
{ "resource": "" }
q1940
static_get_type_attr
train
def static_get_type_attr(t, name): """ Get a type attribute statically, circumventing the descriptor protocol. """ for type_ in t.mro(): try: return vars(type_)[name] except KeyError: pass raise AttributeError(name)
python
{ "resource": "" }
q1941
_conflicting_defaults
train
def _conflicting_defaults(typename, conflicts): """Format an error message for conflicting default implementations. Parameters ---------- typename : str Name of the type for which we're producing an error. conflicts : dict[str -> list[Interface]] Map from strings to interfaces providing a default with that name. Returns ------- message : str User-facing error message. """ message = "\nclass {C} received conflicting default implementations:".format( C=typename, ) for attrname, interfaces in conflicts.items(): message += dedent( """ The following interfaces provided default implementations for {attr!r}: {interfaces}""" ).format( attr=attrname, interfaces=bulleted_list(sorted(map(getname, interfaces))), ) return InvalidImplementation(message)
python
{ "resource": "" }
q1942
InterfaceMeta._diff_signatures
train
def _diff_signatures(self, type_): """ Diff our method signatures against the methods provided by type_. Parameters ---------- type_ : type The type to check. Returns ------- missing, mistyped, mismatched : list[str], dict[str -> type], dict[str -> signature] # noqa ``missing`` is a list of missing interface names. ``mistyped`` is a list mapping names to incorrect types. ``mismatched`` is a dict mapping names to incorrect signatures. """ missing = [] mistyped = {} mismatched = {} for name, iface_sig in self._signatures.items(): try: # Don't invoke the descriptor protocol here so that we get # staticmethod/classmethod/property objects instead of the # functions they wrap. f = static_get_type_attr(type_, name) except AttributeError: missing.append(name) continue impl_sig = TypedSignature(f) if not issubclass(impl_sig.type, iface_sig.type): mistyped[name] = impl_sig.type if not compatible(impl_sig.signature, iface_sig.signature): mismatched[name] = impl_sig return missing, mistyped, mismatched
python
{ "resource": "" }
q1943
InterfaceMeta.verify
train
def verify(self, type_): """ Check whether a type implements ``self``. Parameters ---------- type_ : type The type to check. Raises ------ TypeError If ``type_`` doesn't conform to our interface. Returns ------- None """ raw_missing, mistyped, mismatched = self._diff_signatures(type_) # See if we have defaults for missing methods. missing = [] defaults_to_use = {} for name in raw_missing: try: defaults_to_use[name] = self._defaults[name].implementation except KeyError: missing.append(name) if not any((missing, mistyped, mismatched)): return defaults_to_use raise self._invalid_implementation(type_, missing, mistyped, mismatched)
python
{ "resource": "" }
q1944
InterfaceMeta._invalid_implementation
train
def _invalid_implementation(self, t, missing, mistyped, mismatched): """ Make a TypeError explaining why ``t`` doesn't implement our interface. """ assert missing or mistyped or mismatched, "Implementation wasn't invalid." message = "\nclass {C} failed to implement interface {I}:".format( C=getname(t), I=getname(self), ) if missing: message += dedent( """ The following methods of {I} were not implemented: {missing_methods}""" ).format( I=getname(self), missing_methods=self._format_missing_methods(missing) ) if mistyped: message += dedent( """ The following methods of {I} were implemented with incorrect types: {mismatched_types}""" ).format( I=getname(self), mismatched_types=self._format_mismatched_types(mistyped), ) if mismatched: message += dedent( """ The following methods of {I} were implemented with invalid signatures: {mismatched_methods}""" ).format( I=getname(self), mismatched_methods=self._format_mismatched_methods(mismatched), ) return InvalidImplementation(message)
python
{ "resource": "" }
q1945
Interface.from_class
train
def from_class(cls, existing_class, subset=None, name=None): """Create an interface from an existing class. Parameters ---------- existing_class : type The type from which to extract an interface. subset : list[str], optional List of methods that should be included in the interface. Default is to use all attributes not defined in an empty class. name : str, optional Name of the generated interface. Default is ``existing_class.__name__ + 'Interface'``. Returns ------- interface : type A new interface class with stubs generated from ``existing_class``. """ if name is None: name = existing_class.__name__ + 'Interface' if subset is None: subset = set(dir(existing_class)) - TRIVIAL_CLASS_ATTRIBUTES return InterfaceMeta( name, (Interface,), {name: static_get_type_attr(existing_class, name) for name in subset}, )
python
{ "resource": "" }
q1946
compatible
train
def compatible(impl_sig, iface_sig): """ Check whether ``impl_sig`` is compatible with ``iface_sig``. Parameters ---------- impl_sig : inspect.Signature The signature of the implementation function. iface_sig : inspect.Signature The signature of the interface function. In general, an implementation is compatible with an interface if any valid way of passing parameters to the interface method is also valid for the implementation. Consequently, the following differences are allowed between the signature of an implementation method and the signature of its interface definition: 1. An implementation may add new arguments to an interface iff: a. All new arguments have default values. b. All new arguments accepted positionally (i.e. all non-keyword-only arguments) occur after any arguments declared by the interface. c. Keyword-only arguments may be reordered by the implementation. 2. For type-annotated interfaces, type annotations my differ as follows: a. Arguments to implementations of an interface may be annotated with a **superclass** of the type specified by the interface. b. The return type of an implementation may be annotated with a **subclass** of the type specified by the interface. """ return all([ positionals_compatible( takewhile(is_positional, impl_sig.parameters.values()), takewhile(is_positional, iface_sig.parameters.values()), ), keywords_compatible( valfilter(complement(is_positional), impl_sig.parameters), valfilter(complement(is_positional), iface_sig.parameters), ), ])
python
{ "resource": "" }
q1947
step_count
train
def step_count(group_idx): """Return the amount of index changes within group_idx.""" cmp_pos = 0 steps = 1 if len(group_idx) < 1: return 0 for i in range(len(group_idx)): if group_idx[cmp_pos] != group_idx[i]: cmp_pos = i steps += 1 return steps
python
{ "resource": "" }
q1948
step_indices
train
def step_indices(group_idx): """Return the edges of areas within group_idx, which are filled with the same value.""" ilen = step_count(group_idx) + 1 indices = np.empty(ilen, np.int64) indices[0] = 0 indices[-1] = group_idx.size cmp_pos = 0 ri = 1 for i in range(len(group_idx)): if group_idx[cmp_pos] != group_idx[i]: cmp_pos = i indices[ri] = i ri += 1 return indices
python
{ "resource": "" }
q1949
AggregateOp.callable
train
def callable(cls, nans=False, reverse=False, scalar=False): """ Compile a jitted function doing the hard part of the job """ _valgetter = cls._valgetter_scalar if scalar else cls._valgetter valgetter = nb.njit(_valgetter) outersetter = nb.njit(cls._outersetter) _cls_inner = nb.njit(cls._inner) if nans: def _inner(ri, val, ret, counter, mean): if not np.isnan(val): _cls_inner(ri, val, ret, counter, mean) inner = nb.njit(_inner) else: inner = _cls_inner def _loop(group_idx, a, ret, counter, mean, outer, fill_value, ddof): # fill_value and ddof need to be present for being exchangeable with loop_2pass size = len(ret) rng = range(len(group_idx) - 1, -1 , -1) if reverse else range(len(group_idx)) for i in rng: ri = group_idx[i] if ri < 0: raise ValueError("negative indices not supported") if ri >= size: raise ValueError("one or more indices in group_idx are too large") val = valgetter(a, i) inner(ri, val, ret, counter, mean) outersetter(outer, i, ret[ri]) return nb.njit(_loop, nogil=True)
python
{ "resource": "" }
q1950
AggregateGeneric.callable
train
def callable(self, nans=False): """Compile a jitted function and loop it over the sorted data.""" jitfunc = nb.njit(self.func, nogil=True) def _loop(sortidx, group_idx, a, ret): size = len(ret) group_idx_srt = group_idx[sortidx] a_srt = a[sortidx] indices = step_indices(group_idx_srt) for i in range(len(indices) - 1): start_idx, stop_idx = indices[i], indices[i + 1] ri = group_idx_srt[start_idx] if ri < 0: raise ValueError("negative indices not supported") if ri >= size: raise ValueError("one or more indices in group_idx are too large") ret[ri] = jitfunc(a_srt[start_idx:stop_idx]) return nb.njit(_loop, nogil=True)
python
{ "resource": "" }
q1951
get_func
train
def get_func(func, aliasing, implementations): """ Return the key of a found implementation or the func itself """ try: func_str = aliasing[func] except KeyError: if callable(func): return func else: if func_str in implementations: return func_str if func_str.startswith('nan') and \ func_str[3:] in funcs_no_separate_nan: raise ValueError("%s does not have a nan-version".format(func_str[3:])) else: raise NotImplementedError("No such function available") raise ValueError("func %s is neither a valid function string nor a " "callable object".format(func))
python
{ "resource": "" }
q1952
minimum_dtype
train
def minimum_dtype(x, dtype=np.bool_): """returns the "most basic" dtype which represents `x` properly, which provides at least the same value range as the specified dtype.""" def check_type(x, dtype): try: converted = dtype.type(x) except (ValueError, OverflowError): return False # False if some overflow has happened return converted == x or np.isnan(x) def type_loop(x, dtype, dtype_dict, default=None): while True: try: dtype = np.dtype(dtype_dict[dtype.name]) if check_type(x, dtype): return np.dtype(dtype) except KeyError: if default is not None: return np.dtype(default) raise ValueError("Can not determine dtype of %r" % x) dtype = np.dtype(dtype) if check_type(x, dtype): return dtype if np.issubdtype(dtype, np.inexact): return type_loop(x, dtype, _next_float_dtype) else: return type_loop(x, dtype, _next_int_dtype, default=np.float32)
python
{ "resource": "" }
q1953
_array
train
def _array(group_idx, a, size, fill_value, dtype=None): """groups a into separate arrays, keeping the order intact.""" if fill_value is not None and not (np.isscalar(fill_value) or len(fill_value) == 0): raise ValueError("fill_value must be None, a scalar or an empty " "sequence") order_group_idx = np.argsort(group_idx, kind='mergesort') counts = np.bincount(group_idx, minlength=size) ret = np.split(a[order_group_idx], np.cumsum(counts)[:-1]) ret = np.asanyarray(ret) if fill_value is None or np.isscalar(fill_value): _fill_untouched(group_idx, ret, fill_value) return ret
python
{ "resource": "" }
q1954
_generic_callable
train
def _generic_callable(group_idx, a, size, fill_value, dtype=None, func=lambda g: g, **kwargs): """groups a by inds, and then applies foo to each group in turn, placing the results in an array.""" groups = _array(group_idx, a, size, ()) ret = np.full(size, fill_value, dtype=dtype or np.float64) for i, grp in enumerate(groups): if np.ndim(grp) == 1 and len(grp) > 0: ret[i] = func(grp) return ret
python
{ "resource": "" }
q1955
_cumsum
train
def _cumsum(group_idx, a, size, fill_value=None, dtype=None): """ N to N aggregate operation of cumsum. Perform cumulative sum for each group. group_idx = np.array([4, 3, 3, 4, 4, 1, 1, 1, 7, 8, 7, 4, 3, 3, 1, 1]) a = np.array([3, 4, 1, 3, 9, 9, 6, 7, 7, 0, 8, 2, 1, 8, 9, 8]) _cumsum(group_idx, a, np.max(group_idx) + 1) >>> array([ 3, 4, 5, 6, 15, 9, 15, 22, 7, 0, 15, 17, 6, 14, 31, 39]) """ sortidx = np.argsort(group_idx, kind='mergesort') invsortidx = np.argsort(sortidx, kind='mergesort') group_idx_srt = group_idx[sortidx] a_srt = a[sortidx] a_srt_cumsum = np.cumsum(a_srt, dtype=dtype) increasing = np.arange(len(a), dtype=int) group_starts = _min(group_idx_srt, increasing, size, fill_value=0)[group_idx_srt] a_srt_cumsum += -a_srt_cumsum[group_starts] + a_srt[group_starts] return a_srt_cumsum[invsortidx]
python
{ "resource": "" }
q1956
_fill_untouched
train
def _fill_untouched(idx, ret, fill_value): """any elements of ret not indexed by idx are set to fill_value.""" untouched = np.ones_like(ret, dtype=bool) untouched[idx] = False ret[untouched] = fill_value
python
{ "resource": "" }
q1957
_prod
train
def _prod(group_idx, a, size, fill_value, dtype=None): """Same as aggregate_numpy.py""" dtype = minimum_dtype_scalar(fill_value, dtype, a) ret = np.full(size, fill_value, dtype=dtype) if fill_value != 1: ret[group_idx] = 1 # product should start from 1 np.multiply.at(ret, group_idx, a) return ret
python
{ "resource": "" }
q1958
c_func
train
def c_func(funcname, reverse=False, nans=False, scalar=False): """ Fill c_funcs with constructed code from the templates """ varnames = ['group_idx', 'a', 'ret', 'counter'] codebase = c_base_reverse if reverse else c_base iteration = c_iter_scalar[funcname] if scalar else c_iter[funcname] if scalar: varnames.remove('a') return codebase % dict(init=c_init(varnames), iter=iteration, finish=c_finish.get(funcname, ''), ri_redir=(c_ri_redir if nans else c_ri))
python
{ "resource": "" }
q1959
step_indices
train
def step_indices(group_idx): """ Get the edges of areas within group_idx, which are filled with the same value """ ilen = step_count(group_idx) + 1 indices = np.empty(ilen, int) indices[0] = 0 indices[-1] = group_idx.size inline(c_step_indices, ['group_idx', 'indices'], define_macros=c_macros, extra_compile_args=c_args) return indices
python
{ "resource": "" }
q1960
RandomProjection.__create_proj_mat
train
def __create_proj_mat(self, size): """Create a random projection matrix [1] D. Achlioptas. Database-friendly random projections: Johnson-Lindenstrauss with binary coins. [2] P. Li, et al. Very sparse random projections. http://scikit-learn.org/stable/modules/random_projection.html#sparse-random-projection """ # [1] # return np.random.choice([-np.sqrt(3), 0, np.sqrt(3)], size=size, p=[1 / 6, 2 / 3, 1 / 6]) # [2] s = 1 / self.density return np.random.choice([-np.sqrt(s / self.k), 0, np.sqrt(s / self.k)], size=size, p=[1 / (2 * s), 1 - 1 / s, 1 / (2 * s)])
python
{ "resource": "" }
q1961
load_ratings
train
def load_ratings(data_home, size): """Load all samples in the dataset. """ if size == '100k': with open(os.path.join(data_home, 'u.data'), encoding='ISO-8859-1') as f: lines = list(map(lambda l: list(map(int, l.rstrip().split('\t'))), f.readlines())) elif size == '1m': with open(os.path.join(data_home, 'ratings.dat'), encoding='ISO-8859-1') as f: lines = list(map(lambda l: list(map(int, l.rstrip().split('::'))), f.readlines())) ratings = [] for l in lines: # Since we consider positive-only feedback setting, ratings < 5 will be excluded. if l[2] == 5: ratings.append(l) ratings = np.asarray(ratings) # sorted by timestamp return ratings[np.argsort(ratings[:, 3])]
python
{ "resource": "" }
q1962
n_feature_hash
train
def n_feature_hash(feature, dims, seeds): """N-hot-encoded feature hashing. Args: feature (str): Target feature represented as string. dims (list of int): Number of dimensions for each hash value. seeds (list of float): Seed of each hash function (mmh3). Returns: numpy 1d array: n-hot-encoded feature vector for `s`. """ vec = np.zeros(sum(dims)) offset = 0 for seed, dim in zip(seeds, dims): vec[offset:(offset + dim)] = feature_hash(feature, dim, seed) offset += dim return vec
python
{ "resource": "" }
q1963
feature_hash
train
def feature_hash(feature, dim, seed=123): """Feature hashing. Args: feature (str): Target feature represented as string. dim (int): Number of dimensions for a hash value. seed (float): Seed of a MurmurHash3 hash function. Returns: numpy 1d array: one-hot-encoded feature vector for `s`. """ vec = np.zeros(dim) i = mmh3.hash(feature, seed) % dim vec[i] = 1 return vec
python
{ "resource": "" }
q1964
count_true_positive
train
def count_true_positive(truth, recommend): """Count number of true positives from given sets of samples. Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: int: Number of true positives. """ tp = 0 for r in recommend: if r in truth: tp += 1 return tp
python
{ "resource": "" }
q1965
RecommenderMixin.initialize
train
def initialize(self, *args): """Initialize a recommender by resetting stored users and items. """ # number of observed users self.n_user = 0 # store user data self.users = {} # number of observed items self.n_item = 0 # store item data self.items = {}
python
{ "resource": "" }
q1966
RecommenderMixin.register_user
train
def register_user(self, user): """For new users, append their information into the dictionaries. Args: user (User): User. """ self.users[user.index] = {'known_items': set()} self.n_user += 1
python
{ "resource": "" }
q1967
RecommenderMixin.scores2recos
train
def scores2recos(self, scores, candidates, rev=False): """Get recommendation list for a user u_index based on scores. Args: scores (numpy array; (n_target_items,)): Scores for the target items. Smaller score indicates a promising item. candidates (numpy array; (# target items, )): Target items' indices. Only these items are considered as the recommendation candidates. rev (bool): If true, return items in an descending order. A ascending order (i.e., smaller scores are more promising) is default. Returns: (numpy array, numpy array) : (Sorted list of items, Sorted scores). """ sorted_indices = np.argsort(scores) if rev: sorted_indices = sorted_indices[::-1] return candidates[sorted_indices], scores[sorted_indices]
python
{ "resource": "" }
q1968
Evaluator.fit
train
def fit(self, train_events, test_events, n_epoch=1): """Train a model using the first 30% positive events to avoid cold-start. Evaluation of this batch training is done by using the next 20% positive events. After the batch SGD training, the models are incrementally updated by using the 20% test events. Args: train_events (list of Event): Positive training events (0-30%). test_events (list of Event): Test events (30-50%). n_epoch (int): Number of epochs for the batch training. """ # make initial status for batch training for e in train_events: self.__validate(e) self.rec.users[e.user.index]['known_items'].add(e.item.index) self.item_buffer.append(e.item.index) # for batch evaluation, temporarily save new users info for e in test_events: self.__validate(e) self.item_buffer.append(e.item.index) self.__batch_update(train_events, test_events, n_epoch) # batch test events are considered as a new observations; # the model is incrementally updated based on them before the incremental evaluation step for e in test_events: self.rec.users[e.user.index]['known_items'].add(e.item.index) self.rec.update(e)
python
{ "resource": "" }
q1969
Evaluator.__batch_update
train
def __batch_update(self, train_events, test_events, n_epoch): """Batch update called by the fitting method. Args: train_events (list of Event): Positive training events. test_events (list of Event): Test events. n_epoch (int): Number of epochs for the batch training. """ for epoch in range(n_epoch): # SGD requires us to shuffle events in each iteration # * if n_epoch == 1 # => shuffle is not required because it is a deterministic training (i.e. matrix sketching) if n_epoch != 1: np.random.shuffle(train_events) # train for e in train_events: self.rec.update(e, batch_train=True) # test MPR = self.__batch_evaluate(test_events) if self.debug: logger.debug('epoch %2d: MPR = %f' % (epoch + 1, MPR))
python
{ "resource": "" }
q1970
Evaluator.__batch_evaluate
train
def __batch_evaluate(self, test_events): """Evaluate the current model by using the given test events. Args: test_events (list of Event): Current model is evaluated by these events. Returns: float: Mean Percentile Rank for the test set. """ percentiles = np.zeros(len(test_events)) all_items = set(self.item_buffer) for i, e in enumerate(test_events): # check if the data allows users to interact the same items repeatedly unobserved = all_items if not self.repeat: # make recommendation for all unobserved items unobserved -= self.rec.users[e.user.index]['known_items'] # true item itself must be in the recommendation candidates unobserved.add(e.item.index) candidates = np.asarray(list(unobserved)) recos, scores = self.__recommend(e, candidates) pos = np.where(recos == e.item.index)[0][0] percentiles[i] = pos / (len(recos) - 1) * 100 return np.mean(percentiles)
python
{ "resource": "" }
q1971
Grapher._scale_x_values
train
def _scale_x_values(self, values, max_width): '''Scale X values to new width''' if type(values) == dict: values = self._scale_x_values_timestamps(values=values, max_width=max_width) adjusted_values = list(values) if len(adjusted_values) > max_width: def get_position(current_pos): return len(adjusted_values) * current_pos // max_width adjusted_values = [statistics.mean(adjusted_values[get_position(i):get_position(i + 1)]) for i in range(max_width)] return adjusted_values
python
{ "resource": "" }
q1972
Grapher._scale_x_values_timestamps
train
def _scale_x_values_timestamps(self, values, max_width): '''Scale X values to new width based on timestamps''' first_timestamp = float(values[0][0]) last_timestamp = float(values[-1][0]) step_size = (last_timestamp - first_timestamp) / max_width values_by_column = [[] for i in range(max_width)] for timestamp, value in values: if value is None: continue timestamp = float(timestamp) column = (timestamp - first_timestamp) // step_size column = int(min(column, max_width - 1)) # Don't go beyond the last column values_by_column[column].append(value) adjusted_values = [statistics.mean(values) if values else 0 for values in values_by_column] # Average each column, 0 if no values return adjusted_values
python
{ "resource": "" }
q1973
Grapher._scale_y_values
train
def _scale_y_values(self, values, new_min, new_max, scale_old_from_zero=True): ''' Take values and transmute them into a new range ''' # Scale Y values - Create a scaled list of values to use for the visual graph scaled_values = [] y_min_value = min(values) if scale_old_from_zero: y_min_value = 0 y_max_value = max(values) new_min = 0 OldRange = (y_max_value - y_min_value) or 1 # Prevents division by zero if all values are the same NewRange = (new_max - new_min) # max_height is new_max for old_value in values: new_value = (((old_value - y_min_value) * NewRange) / OldRange) + new_min scaled_values.append(new_value) return scaled_values
python
{ "resource": "" }
q1974
Grapher._assign_ascii_character
train
def _assign_ascii_character(self, y_prev, y, y_next): # noqa for complexity '''Assign the character to be placed into the graph''' char = '?' if y_next > y and y_prev > y: char = '-' elif y_next < y and y_prev < y: char = '-' elif y_prev < y and y == y_next: char = '-' elif y_prev == y and y_next < y: char = '-' elif y_next > y: char = '/' elif y_next < y: char = '\\' elif y_prev < y: char = '/' elif y_prev > y: char = '\\' elif y_next == y: char = '-' elif y == y_prev: char = '-' return char
python
{ "resource": "" }
q1975
Grapher.asciigraph
train
def asciigraph(self, values=None, max_height=None, max_width=None, label=False): ''' Accepts a list of y values and returns an ascii graph Optionally values can also be a dictionary with a key of timestamp, and a value of value. InGraphs returns data in this format for example. ''' result = '' border_fill_char = '*' start_ctime = None end_ctime = None if not max_width: max_width = 180 # If this is a dict of timestamp -> value, sort the data, store the start/end time, and convert values to a list of values if isinstance(values, dict): time_series_sorted = sorted(list(values.items()), key=lambda x: x[0]) # Sort timestamp/value dict by the timestamps start_timestamp = time_series_sorted[0][0] end_timestamp = time_series_sorted[-1][0] start_ctime = datetime.fromtimestamp(float(start_timestamp)).ctime() end_ctime = datetime.fromtimestamp(float(end_timestamp)).ctime() values = self._scale_x_values_timestamps(values=time_series_sorted, max_width=max_width) values = [value for value in values if value is not None] if not max_height: max_height = min(20, max(values)) stdev = statistics.stdev(values) mean = statistics.mean(values) # Do value adjustments adjusted_values = list(values) adjusted_values = self._scale_x_values(values=values, max_width=max_width) upper_value = max(adjusted_values) # Getting upper/lower after scaling x values so we don't label a spike we can't see lower_value = min(adjusted_values) adjusted_values = self._scale_y_values(values=adjusted_values, new_min=0, new_max=max_height, scale_old_from_zero=False) adjusted_values = self._round_floats_to_ints(values=adjusted_values) # Obtain Ascii Graph String field = self._get_ascii_field(adjusted_values) graph_string = self._draw_ascii_graph(field=field) # Label the graph if label: top_label = 'Upper value: {upper_value:.2f} '.format(upper_value=upper_value).ljust(max_width, border_fill_char) result += top_label + '\n' result += '{graph_string}\n'.format(graph_string=graph_string) if label: lower = f'Lower value: {lower_value:.2f} ' stats = f' Mean: {mean:.2f} *** Std Dev: {stdev:.2f} ******' fill_length = max_width - len(lower) - len(stats) stat_label = f'{lower}{"*" * fill_length}{stats}\n' result += stat_label if start_ctime and end_ctime: fill_length = max_width - len(start_ctime) - len(end_ctime) result += f'{start_ctime}{" " * fill_length}{end_ctime}\n' return result
python
{ "resource": "" }
q1976
replace
train
def replace(expression: Expression, position: Sequence[int], replacement: Replacement) -> Replacement: r"""Replaces the subexpression of `expression` at the given `position` with the given `replacement`. The original `expression` itself is not modified, but a modified copy is returned. If the replacement is a list of expressions, it will be expanded into the list of operands of the respective operation: >>> print(replace(f(a), (0, ), [b, c])) f(b, c) Parameters: expression: An :class:`Expression` where a (sub)expression is to be replaced. position: A tuple of indices, e.g. the empty tuple refers to the `expression` itself, `(0, )` refers to the first child (operand) of the `expression`, `(0, 0)` to the first child of the first child etc. replacement: Either an :class:`Expression` or a list of :class:`Expression`\s to be inserted into the `expression` instead of the original expression at that `position`. Returns: The resulting expression from the replacement. Raises: IndexError: If the position is invalid or out of range. """ if len(position) == 0: return replacement if not isinstance(expression, Operation): raise IndexError("Invalid position {!r} for expression {!s}".format(position, expression)) if position[0] >= op_len(expression): raise IndexError("Position {!r} out of range for expression {!s}".format(position, expression)) pos = position[0] operands = list(op_iter(expression)) subexpr = replace(operands[pos], position[1:], replacement) if isinstance(subexpr, Sequence): new_operands = tuple(operands[:pos]) + tuple(subexpr) + tuple(operands[pos + 1:]) return create_operation_expression(expression, new_operands) operands[pos] = subexpr return create_operation_expression(expression, operands)
python
{ "resource": "" }
q1977
BipartiteGraph.find_matching
train
def find_matching(self) -> Dict[TLeft, TRight]: """Finds a matching in the bipartite graph. This is done using the Hopcroft-Karp algorithm with an implementation from the `hopcroftkarp` package. Returns: A dictionary where each edge of the matching is represented by a key-value pair with the key being from the left part of the graph and the value from te right part. """ # The directed graph is represented as a dictionary of edges # The key is the tail of all edges which are represented by the value # The value is a set of heads for the all edges originating from the tail (key) # In addition, the graph stores which part of the bipartite graph a node originated from # to avoid problems when a value exists in both halfs. # Only one direction of the undirected edge is needed for the HopcroftKarp class directed_graph = {} # type: Dict[Tuple[int, TLeft], Set[Tuple[int, TRight]]] for (left, right) in self._edges: tail = (LEFT, left) head = (RIGHT, right) if tail not in directed_graph: directed_graph[tail] = {head} else: directed_graph[tail].add(head) matching = HopcroftKarp(directed_graph).maximum_matching() # Filter out the partitions (LEFT and RIGHT) and only return the matching edges # that go from LEFT to RIGHT return dict((tail[1], head[1]) for tail, head in matching.items() if tail[0] == LEFT)
python
{ "resource": "" }
q1978
BipartiteGraph.without_nodes
train
def without_nodes(self, edge: Edge) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]': """Returns a copy of this bipartite graph with the given edge and its adjacent nodes removed.""" return BipartiteGraph(((n1, n2), v) for (n1, n2), v in self._edges.items() if n1 != edge[0] and n2 != edge[1])
python
{ "resource": "" }
q1979
BipartiteGraph.without_edge
train
def without_edge(self, edge: Edge) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]': """Returns a copy of this bipartite graph with the given edge removed.""" return BipartiteGraph((e2, v) for e2, v in self._edges.items() if edge != e2)
python
{ "resource": "" }
q1980
BipartiteGraph.limited_to
train
def limited_to(self, left: Set[TLeft], right: Set[TRight]) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]': """Returns the induced subgraph where only the nodes from the given sets are included.""" return BipartiteGraph(((n1, n2), v) for (n1, n2), v in self._edges.items() if n1 in left and n2 in right)
python
{ "resource": "" }
q1981
is_constant
train
def is_constant(expression): """Check if the given expression is constant, i.e. it does not contain Wildcards.""" if isinstance(expression, Wildcard): return False if isinstance(expression, Expression): return expression.is_constant if isinstance(expression, Operation): return all(is_constant(o) for o in op_iter(expression)) return True
python
{ "resource": "" }
q1982
get_head
train
def get_head(expression): """Returns the given expression's head.""" if isinstance(expression, Wildcard): if isinstance(expression, SymbolWildcard): return expression.symbol_type return None return type(expression)
python
{ "resource": "" }
q1983
match_head
train
def match_head(subject, pattern): """Checks if the head of subject matches the pattern's head.""" if isinstance(pattern, Pattern): pattern = pattern.expression pattern_head = get_head(pattern) if pattern_head is None: return True if issubclass(pattern_head, OneIdentityOperation): return True subject_head = get_head(subject) assert subject_head is not None return issubclass(subject_head, pattern_head)
python
{ "resource": "" }
q1984
is_anonymous
train
def is_anonymous(expression): """Returns True iff the expression does not contain any variables.""" if hasattr(expression, 'variable_name') and expression.variable_name: return False if isinstance(expression, Operation): return all(is_anonymous(o) for o in op_iter(expression)) return True
python
{ "resource": "" }
q1985
contains_variables_from_set
train
def contains_variables_from_set(expression, variables): """Returns True iff the expression contains any of the variables from the given set.""" if hasattr(expression, 'variable_name') and expression.variable_name in variables: return True if isinstance(expression, Operation): return any(contains_variables_from_set(o, variables) for o in op_iter(expression)) return False
python
{ "resource": "" }
q1986
get_variables
train
def get_variables(expression, variables=None): """Returns the set of variable names in the given expression.""" if variables is None: variables = set() if hasattr(expression, 'variable_name') and expression.variable_name is not None: variables.add(expression.variable_name) if isinstance(expression, Operation): for operand in op_iter(expression): get_variables(operand, variables) return variables
python
{ "resource": "" }
q1987
rename_variables
train
def rename_variables(expression: Expression, renaming: Dict[str, str]) -> Expression: """Rename the variables in the expression according to the given dictionary. Args: expression: The expression in which the variables are renamed. renaming: The renaming dictionary. Maps old variable names to new ones. Variable names not occuring in the dictionary are left unchanged. Returns: The expression with renamed variables. """ if isinstance(expression, Operation): if hasattr(expression, 'variable_name'): variable_name = renaming.get(expression.variable_name, expression.variable_name) return create_operation_expression( expression, [rename_variables(o, renaming) for o in op_iter(expression)], variable_name=variable_name ) operands = [rename_variables(o, renaming) for o in op_iter(expression)] return create_operation_expression(expression, operands) elif isinstance(expression, Expression): expression = expression.__copy__() expression.variable_name = renaming.get(expression.variable_name, expression.variable_name) return expression
python
{ "resource": "" }
q1988
fixed_integer_vector_iter
train
def fixed_integer_vector_iter(max_vector: Tuple[int, ...], vector_sum: int) -> Iterator[Tuple[int, ...]]: """ Return an iterator over the integer vectors which - are componentwise less than or equal to *max_vector*, and - are non-negative, and where - the sum of their components is exactly *vector_sum*. The iterator yields the vectors in lexicographical order. Examples: List all vectors that are between ``(0, 0)`` and ``(2, 2)`` componentwise, where the sum of components is 2: >>> vectors = list(fixed_integer_vector_iter([2, 2], 2)) >>> vectors [(0, 2), (1, 1), (2, 0)] >>> list(map(sum, vectors)) [2, 2, 2] Args: max_vector: Maximum vector for the iteration. Every yielded result will be less than or equal to this componentwise. vector_sum: Every iterated vector will have a component sum equal to this value. Yields: All non-negative vectors that have the given sum and are not larger than the given maximum. Raises: ValueError: If *vector_sum* is negative. """ if vector_sum < 0: raise ValueError("Vector sum must not be negative") if len(max_vector) == 0: if vector_sum == 0: yield tuple() return total = sum(max_vector) if vector_sum <= total: start = max(max_vector[0] + vector_sum - total, 0) end = min(max_vector[0], vector_sum) for j in range(start, end + 1): for vec in fixed_integer_vector_iter(max_vector[1:], vector_sum - j): yield (j, ) + vec
python
{ "resource": "" }
q1989
commutative_sequence_variable_partition_iter
train
def commutative_sequence_variable_partition_iter(values: Multiset, variables: List[VariableWithCount] ) -> Iterator[Dict[str, Multiset]]: """Yield all possible variable substitutions for given values and variables. .. note:: The results are not yielded in any particular order because the algorithm uses dictionaries. Dictionaries until Python 3.6 do not keep track of the insertion order. Example: For a subject like ``fc(a, a, a, b, b, c)`` and a pattern like ``f(x__, y___, y___)`` one can define the following input parameters for the partitioning: >>> x = VariableWithCount(name='x', count=1, minimum=1, default=None) >>> y = VariableWithCount(name='y', count=2, minimum=0, default=None) >>> values = Multiset('aaabbc') Then the solutions are found (and sorted to get a unique output): >>> substitutions = commutative_sequence_variable_partition_iter(values, [x, y]) >>> as_strings = list(str(Substitution(substitution)) for substitution in substitutions) >>> for substitution in sorted(as_strings): ... print(substitution) {x ↦ {a, a, a, b, b, c}, y ↦ {}} {x ↦ {a, a, a, c}, y ↦ {b}} {x ↦ {a, b, b, c}, y ↦ {a}} {x ↦ {a, c}, y ↦ {a, b}} Args: values: The multiset of values which are partitioned and distributed among the variables. variables: A list of the variables to distribute the values among. Each variable has a name, a count of how many times it occurs and a minimum number of values it needs. Yields: Each possible substitutions that is a valid partitioning of the values among the variables. """ if len(variables) == 1: yield from _commutative_single_variable_partiton_iter(values, variables[0]) return generators = [] for value, count in values.items(): generators.append(_make_variable_generator_factory(value, count, variables)) initial = dict((var.name, Multiset()) for var in variables) # type: Dict[str, 'Multiset[T]'] for subst in generator_chain(initial, *generators): valid = True for var in variables: if var.default is not None and len(subst[var.name]) == 0: subst[var.name] = var.default elif len(subst[var.name]) < var.minimum: valid = False break if valid: if None in subst: del subst[None] yield subst
python
{ "resource": "" }
q1990
generator_chain
train
def generator_chain(initial_data: T, *factories: Callable[[T], Iterator[T]]) -> Iterator[T]: """Chain multiple generators together by passing results from one to the next. This helper function allows to create a chain of generator where each generator is constructed by a factory that gets the data yielded by the previous generator. So each generator can generate new data dependant on the data yielded by the previous one. For each data item yielded by a generator, a new generator is constructed by the next factory. Example: Lets say for every number from 0 to 4, we want to count up to that number. Then we can do something like this using list comprehensions: >>> [i for n in range(1, 5) for i in range(1, n + 1)] [1, 1, 2, 1, 2, 3, 1, 2, 3, 4] You can use this function to achieve the same thing: >>> list(generator_chain(5, lambda n: iter(range(1, n)), lambda i: iter(range(1, i + 1)))) [1, 1, 2, 1, 2, 3, 1, 2, 3, 4] The advantage is, that this is independent of the number of dependant generators you have. Also, this function does not use recursion so it is safe to use even with large generator counts. Args: initial_data: The initial data that is passed to the first generator factory. *factories: The generator factories. Each of them gets passed its predecessors data and has to return an iterable. The data from this iterable is passed to the next factory. Yields: Every data item yielded by the generators of the final factory. """ generator_count = len(factories) if generator_count == 0: yield initial_data return generators = [None] * generator_count # type: List[Optional[Iterator[T]]] next_data = initial_data generator_index = 0 while True: try: while generator_index < generator_count: if generators[generator_index] is None: generators[generator_index] = factories[generator_index](next_data) next_data = next(generators[generator_index]) generator_index += 1 yield next_data generator_index -= 1 except StopIteration: generators[generator_index] = None generator_index -= 1 if generator_index < 0: break
python
{ "resource": "" }
q1991
Substitution.try_add_variable
train
def try_add_variable(self, variable_name: str, replacement: VariableReplacement) -> None: """Try to add the variable with its replacement to the substitution. This considers an existing replacement and will only succeed if the new replacement can be merged with the old replacement. Merging can occur if either the two replacements are equivalent. Replacements can also be merged if the old replacement for the variable_name was unordered (i.e. a :class:`~.Multiset`) and the new one is an equivalent ordered version of it: >>> subst = Substitution({'x': Multiset(['a', 'b'])}) >>> subst.try_add_variable('x', ('a', 'b')) >>> print(subst) {x ↦ (a, b)} Args: variable: The name of the variable to add. replacement: The replacement for the variable. Raises: ValueError: if the variable cannot be merged because it conflicts with the existing substitution for the variable_name. """ if variable_name not in self: self[variable_name] = replacement.copy() if isinstance(replacement, Multiset) else replacement else: existing_value = self[variable_name] if isinstance(existing_value, tuple): if isinstance(replacement, Multiset): if Multiset(existing_value) != replacement: raise ValueError elif replacement != existing_value: raise ValueError elif isinstance(existing_value, Multiset): if not isinstance(replacement, (tuple, list, Multiset)): raise ValueError compare_value = Multiset(replacement) if existing_value == compare_value: if not isinstance(replacement, Multiset): self[variable_name] = replacement else: raise ValueError elif replacement != existing_value: raise ValueError
python
{ "resource": "" }
q1992
Substitution.union_with_variable
train
def union_with_variable(self, variable: str, replacement: VariableReplacement) -> 'Substitution': """Try to create a new substitution with the given variable added. See :meth:`try_add_variable` for a version of this method that modifies the substitution in place. Args: variable_name: The name of the variable to add. replacement: The substitution for the variable. Returns: The new substitution with the variable_name added or merged. Raises: ValueError: if the variable cannot be merged because it conflicts with the existing substitution for the variable. """ new_subst = Substitution(self) new_subst.try_add_variable(variable, replacement) return new_subst
python
{ "resource": "" }
q1993
Substitution.extract_substitution
train
def extract_substitution(self, subject: 'expressions.Expression', pattern: 'expressions.Expression') -> bool: """Extract the variable substitution for the given pattern and subject. This assumes that subject and pattern already match when being considered as linear. Also, they both must be :term:`syntactic`, as sequence variables cannot be handled here. All that this method does is checking whether all the substitutions for the variables can be unified. So, in case it returns ``False``, the substitution is invalid for the match. ..warning:: This method mutates the substitution and will even do so in case the extraction fails. Create a copy before using this method if you need to preserve the original substitution. Example: With an empty initial substitution and a linear pattern, the extraction will always succeed: >>> subst = Substitution() >>> subst.extract_substitution(f(a, b), f(x_, y_)) True >>> print(subst) {x ↦ a, y ↦ b} Clashing values for existing variables will fail: >>> subst.extract_substitution(b, x_) False For non-linear patterns, the extraction can also fail with an empty substitution: >>> subst = Substitution() >>> subst.extract_substitution(f(a, b), f(x_, x_)) False >>> print(subst) {x ↦ a} Note that the initial substitution got mutated even though the extraction failed! Args: subject: A :term:`syntactic` subject that matches the pattern. pattern: A :term:`syntactic` pattern that matches the subject. Returns: ``True`` iff the substitution could be extracted successfully. """ if getattr(pattern, 'variable_name', False): try: self.try_add_variable(pattern.variable_name, subject) except ValueError: return False return True elif isinstance(pattern, expressions.Operation): assert isinstance(subject, type(pattern)) assert op_len(subject) == op_len(pattern) op_expression = cast(expressions.Operation, subject) for subj, patt in zip(op_iter(op_expression), op_iter(pattern)): if not self.extract_substitution(subj, patt): return False return True
python
{ "resource": "" }
q1994
Substitution.union
train
def union(self, *others: 'Substitution') -> 'Substitution': """Try to merge the substitutions. If a variable occurs in multiple substitutions, try to merge the replacements. See :meth:`union_with_variable` to see how replacements are merged. Does not modify any of the original substitutions. Example: >>> subst1 = Substitution({'x': Multiset(['a', 'b']), 'z': a}) >>> subst2 = Substitution({'x': ('a', 'b'), 'y': ('c', )}) >>> print(subst1.union(subst2)) {x ↦ (a, b), y ↦ (c), z ↦ a} Args: others: The other substitutions to merge with this one. Returns: The new substitution with the other substitutions merged. Raises: ValueError: if a variable occurs in multiple substitutions but cannot be merged because the substitutions conflict. """ new_subst = Substitution(self) for other in others: for variable_name, replacement in other.items(): new_subst.try_add_variable(variable_name, replacement) return new_subst
python
{ "resource": "" }
q1995
Substitution.rename
train
def rename(self, renaming: Dict[str, str]) -> 'Substitution': """Return a copy of the substitution with renamed variables. Example: Rename the variable *x* to *y*: >>> subst = Substitution({'x': a}) >>> subst.rename({'x': 'y'}) {'y': Symbol('a')} Args: renaming: A dictionary mapping old variable names to new ones. Returns: A copy of the substitution where variable names have been replaced according to the given renaming dictionary. Names that are not contained in the dictionary are left unchanged. """ return Substitution((renaming.get(name, name), value) for name, value in self.items())
python
{ "resource": "" }
q1996
_get_symbol_wildcard_label
train
def _get_symbol_wildcard_label(state: '_State', symbol: Symbol) -> Type[Symbol]: """Return the transition target for the given symbol type from the the given state or None if it does not exist.""" return next((t for t in state.keys() if is_symbol_wildcard(t) and isinstance(symbol, t)), None)
python
{ "resource": "" }
q1997
_term_str
train
def _term_str(term: TermAtom) -> str: # pragma: no cover """Return a string representation of a term atom.""" if is_operation(term): return term.name + '(' elif is_symbol_wildcard(term): return '*{!s}'.format(term.__name__) elif isinstance(term, Wildcard): return '*{!s}{!s}'.format(term.min_count, (not term.fixed_size) and '+' or '') elif term == Wildcard: return '*' else: return str(term)
python
{ "resource": "" }
q1998
FlatTerm.merged
train
def merged(cls, *flatterms: 'FlatTerm') -> 'FlatTerm': """Concatenate the given flatterms to a single flatterm. Args: *flatterms: The flatterms which are concatenated. Returns: The concatenated flatterms. """ return cls(cls._combined_wildcards_iter(sum(flatterms, cls.empty())))
python
{ "resource": "" }
q1999
FlatTerm._flatterm_iter
train
def _flatterm_iter(cls, expression: Expression) -> Iterator[TermAtom]: """Generator that yields the atoms of the expressions in prefix notation with operation end markers.""" if isinstance(expression, Operation): yield type(expression) for operand in op_iter(expression): yield from cls._flatterm_iter(operand) yield OPERATION_END elif isinstance(expression, SymbolWildcard): yield expression.symbol_type elif isinstance(expression, (Symbol, Wildcard)): yield expression else: assert False, "Unreachable unless a new unsupported expression type is added."
python
{ "resource": "" }