_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
31
13.1k
language
stringclasses
1 value
meta_information
dict
q1900
TableProcessor._build_row
train
def _build_row(self, row, parent, align, border): """ Given a row of text, build table cells. """ tr = etree.SubElement(parent, 'tr') tag = 'td' if parent.tag == 'thead': tag = 'th' cells = self._split_row(row, border)
python
{ "resource": "" }
q1901
TableProcessor._split_row
train
def _split_row(self, row, border): """ split a row of text into list of cells. """ if border: if row.startswith('|'): row = row[1:]
python
{ "resource": "" }
q1902
TableExtension.extendMarkdown
train
def extendMarkdown(self, md, md_globals): """ Add an instance of TableProcessor to BlockParser. """ md.parser.blockprocessors.add('table',
python
{ "resource": "" }
q1903
get_all_static
train
def get_all_static(): """ Get all the static files directories found by ``STATICFILES_FINDERS`` :return: set of paths (top-level folders only) """ static_dirs = set() for finder in settings.STATICFILES_FINDERS: finder = finders.get_finder(finder) if hasattr(finder, 'storages'): for
python
{ "resource": "" }
q1904
BaseCompiler.input
train
def input(self, **kwargs): """ Specify temporary input file extension. Browserify requires explicit file extension (".js" or ".json" by default). https://github.com/substack/node-browserify/issues/1469 """ if self.infile is None and "{infile}" in self.command: if self.filename is None: self.infile
python
{ "resource": "" }
q1905
graph_hash
train
def graph_hash(obj): '''this hashes all types to a hash without colissions. python's hashing algorithms are not cross type compatable but hashing tuples with the type as the first element seems to do the trick''' obj_type = type(obj) try: # this works for hashables return hash((obj_type, obj))
python
{ "resource": "" }
q1906
VList.where
train
def where(self, relation, filter_fn): ''' use this to filter VLists, simply provide a filter function and what relation to apply it to ''' assert type(relation).__name__ in {'str','unicode'}, 'where needs the first arg to be a string'
python
{ "resource": "" }
q1907
VList._where
train
def _where(self, filter_fn): ''' use this to filter VLists, simply provide a filter function to filter the
python
{ "resource": "" }
q1908
VList._where
train
def _where(self, **kwargs): '''use this to filter VLists with kv pairs''' out = self
python
{ "resource": "" }
q1909
SQLiteGraphDB.find
train
def find(self, target, relation): ''' returns back all elements the target has a relation to ''' query = 'select ob1.code from objects as ob1, objects as
python
{ "resource": "" }
q1910
Sock.parse_buf
train
def parse_buf(self, encoding="unicode"): """ Since TCP is a stream-orientated protocol, responses aren't guaranteed to be complete when they arrive. The buffer stores all the data and this function splits the data into replies based on the new line delimiter. """ buf_len = len(self.buf) replies = [] reply = b"" chop = 0 skip = 0 i = 0 buf_len = len(self.buf) for i in range(0, buf_len): ch = self.buf[i:i + 1] if skip: skip -= 1 i += 1 continue nxt = i + 1 if nxt < buf_len: if ch == b"\r" and self.buf[nxt:nxt + 1] == b"\n": # Append new reply.
python
{ "resource": "" }
q1911
Sock.get_chunks
train
def get_chunks(self, fixed_limit=None, encoding="unicode"): """ This is the function which handles retrieving new data chunks. It's main logic is avoiding a recv call blocking forever and halting the program flow. To do this, it manages errors and keeps an eye on the buffer to avoid overflows and DoS attacks. http://stackoverflow.com/questions/16745409/what-does-pythons-socket-recv-return-for-non-blocking-sockets-if-no-data-is-r http://stackoverflow.com/questions/3187565/select-and-ssl-in-python """ # Socket is disconnected. if not self.connected: return # Recv chunks until network buffer is empty. repeat = 1 wait = 0.2 chunk_no = 0 max_buf = self.max_buf max_chunks = self.max_chunks if fixed_limit is not None: max_buf = fixed_limit max_chunks = fixed_limit while repeat: chunk_size = self.chunk_size while True: # Don't exceed buffer size. buf_len = len(self.buf) if buf_len >= max_buf: break remaining = max_buf - buf_len if remaining < chunk_size: chunk_size = remaining # Don't allow non-blocking sockets to be # DoSed by multiple small replies. if chunk_no >= max_chunks and not self.blocking: break try: chunk = self.s.recv(chunk_size) except socket.timeout as e: self.debug_print("Get chunks timed out.") self.debug_print(e) # Timeout on blocking sockets. err = e.args[0] self.debug_print(err) if err == "timed out": repeat = 0 break except ssl.SSLError as e: # Will block on non-blocking SSL sockets. if e.errno == ssl.SSL_ERROR_WANT_READ: self.debug_print("SSL_ERROR_WANT_READ") break else: self.debug_print("Get chunks ssl error") self.close() return except socket.error as e: # Will block on nonblocking non-SSL sockets. err = e.args[0] if err == errno.EAGAIN or err == errno.EWOULDBLOCK:
python
{ "resource": "" }
q1912
Net.validate_node
train
def validate_node(self, node_ip, node_port=None, same_nodes=1): self.debug_print("Validating: " + node_ip) # Is this a valid IP? if not is_ip_valid(node_ip) or node_ip == "0.0.0.0": self.debug_print("Invalid node ip in validate node") return 0 # Is this a valid port? if node_port != 0 and node_port is not None: if not is_valid_port(node_port): self.debug_print("Invalid node port in validate port") return 0 """ Don't accept connections from self to passive server or connections to already connected nodes. """ if not self.enable_duplicate_ip_cons: # Don't connect to ourself. if (node_ip == "127.0.0.1" or node_ip == get_lan_ip(self.interface) or node_ip == self.wan_ip): self.debug_print("Cannot connect to ourself.") return 0 # No, really: don't connect to ourself.
python
{ "resource": "" }
q1913
Net.bootstrap
train
def bootstrap(self): """ When the software is first started, it needs to retrieve a list of nodes to connect to the network to. This function asks the server for N nodes which consists of at least N passive nodes and N simultaneous nodes. The simultaneous nodes are prioritized if the node_type for the machine running this software is simultaneous, with passive nodes being used as a fallback. Otherwise, the node exclusively uses passive nodes to bootstrap. This algorithm is designed to preserve passive node's inbound connection slots. """ # Disable bootstrap. if not self.enable_bootstrap: return None # Avoid raping the rendezvous server. t = time.time() if self.last_bootstrap is not None: if t - self.last_bootstrap <= rendezvous_interval: self.debug_print("Bootstrapped recently") return None self.last_bootstrap = t self.debug_print("Searching for nodes to connect to.") try: connection_slots = self.max_outbound - (len(self.outbound)) if connection_slots > 0: # Connect to rendezvous server. rendezvous_con = self.rendezvous.server_connect() # Retrieve random nodes to bootstrap with. rendezvous_con.send_line("BOOTSTRAP " + str(self.max_outbound * 2)) choices = rendezvous_con.recv_line(timeout=2) if choices == "NODES EMPTY": rendezvous_con.close() self.debug_print("Node list is empty.") return self else: self.debug_print("Found node list.") # Parse node list. choices = re.findall("(?:(p|s)[:]([0-9]+[.][0-9]+[.][0-9]+[.][0-9]+)[:]([0-9]+))+\s?", choices) rendezvous_con.s.close()
python
{ "resource": "" }
q1914
Net.advertise
train
def advertise(self): """ This function tells the rendezvous server that our node is ready to accept connections from other nodes on the P2P network that run the bootstrap function. It's only used when net_type == p2p """ # Advertise is disabled. if not self.enable_advertise: self.debug_print("Advertise is disbled!") return None # Direct net server is reserved for direct connections only. if self.net_type == "direct" and self.node_type == "passive": return None # Net isn't started!. if not self.is_net_started: raise Exception("Please call start() before you call advertise()") # Avoid raping the rendezvous server with excessive requests. t = time.time() if self.last_advertise is not None: if t - self.last_advertise <= advertise_interval: return None if len(self.inbound) >= self.min_connected: return None self.last_advertise = t # Tell rendezvous server to list us. try: #
python
{ "resource": "" }
q1915
Net.determine_node
train
def determine_node(self): """ Determines the type of node based on a combination of forwarding reachability and NAT type. """ # Manually set node_type as simultaneous. if self.node_type == "simultaneous": if self.nat_type != "unknown": return "simultaneous" # Get IP of binding interface. unspecific_bind = ["0.0.0.0", "127.0.0.1", "localhost"] if self.passive_bind in unspecific_bind: lan_ip = get_lan_ip(self.interface) else: lan_ip = self.passive_bind # Passive node checks. if lan_ip is not None \ and self.passive_port is not None and self.enable_forwarding: self.debug_print("Checking if port is forwarded.") # Check port isn't already forwarded. if is_port_forwarded(lan_ip, self.passive_port, "TCP", self.forwarding_servers): msg = "Port already forwarded. Skipping NAT traversal." self.debug_print(msg) self.forwarding_type = "forwarded" return "passive" else: self.debug_print("Port is not already forwarded.") # Most routers. try: self.debug_print("Trying UPnP") UPnP(self.interface).forward_port("TCP", self.passive_port, lan_ip) if is_port_forwarded(lan_ip, self.passive_port, "TCP", self.forwarding_servers): self.forwarding_type = "UPnP" self.debug_print("Forwarded port with UPnP.") else: self.debug_print("UPnP failed to forward port.") except Exception as e: # Log exception. error = parse_exception(e) log_exception(self.error_log_path, error)
python
{ "resource": "" }
q1916
Net.start
train
def start(self): """ This function determines node and NAT type, saves connectivity details, and starts any needed servers to be a part of the network. This is usually the first function called after initialising the Net class. """ self.debug_print("Starting networking.") self.debug_print("Make sure to iterate over replies if you need" " connection alive management!") # Register a cnt + c handler signal.signal(signal.SIGINT, self.stop) # Save WAN IP. self.debug_print("WAN IP = " + str(self.wan_ip)) # Check rendezvous server is up. try: rendezvous_con = self.rendezvous.server_connect() rendezvous_con.close() except: raise Exception("Unable to connect to rendezvous server.") # Started no matter what # since LAN connections are always possible. self.start_passive_server() # Determine NAT type. if self.nat_type == "unknown": self.debug_print("Determining NAT type.") nat_type = self.rendezvous.determine_nat() if nat_type is not None and nat_type != "unknown": self.nat_type = nat_type self.rendezvous.nat_type = nat_type self.debug_print("NAT type = " + nat_type) else: self.debug_print("Unable to determine NAT type.") # Check NAT type if node is simultaneous # is manually specified. if self.node_type == "simultaneous": if self.nat_type not in self.rendezvous.predictable_nats: self.debug_print("Manual setting of simultanous specified but" " ignored since NAT does not support it.") self.node_type = "active" else: # Determine node type. self.debug_print("Determining node type.") # No checks for manually specifying passive # (there probably should be.) if self.node_type == "unknown":
python
{ "resource": "" }
q1917
RendezvousProtocol.send_remote_port
train
def send_remote_port(self): """ Sends the remote port mapped for the connection. This port is surprisingly often the same as the
python
{ "resource": "" }
q1918
RendezvousProtocol.cleanup_candidates
train
def cleanup_candidates(self, node_ip): """ Removes old TCP hole punching candidates for a designated node if a certain amount of time has passed since they last connected. """ if node_ip in self.factory.candidates:
python
{ "resource": "" }
q1919
RendezvousProtocol.propogate_candidates
train
def propogate_candidates(self, node_ip): """ Used to progate new candidates to passive simultaneous nodes. """ if node_ip in self.factory.candidates: old_candidates = [] for candidate in self.factory.candidates[node_ip]: # Not connected. if not candidate["con"].connected:
python
{ "resource": "" }
q1920
RendezvousProtocol.synchronize_simultaneous
train
def synchronize_simultaneous(self, node_ip): """ Because adjacent mappings for certain NAT types can be stolen by other connections, the purpose of this function is to ensure the last connection by a passive simultaneous node is recent compared to the time for a candidate to increase the chance that the precited mappings remain active for the TCP hole punching attempt. """ for candidate in self.factory.candidates[node_ip]: # Only if candidate is connected. if not candidate["con"].connected: continue
python
{ "resource": "" }
q1921
RendezvousProtocol.connectionLost
train
def connectionLost(self, reason): """ Mostly handles clean-up of node + candidate structures. Avoids memory exhaustion for a large number of connections. """ try: self.connected = False if debug: print(self.log_entry("CLOSED =", "none")) # Every five minutes: cleanup t = time.time() if time.time() - self.factory.last_cleanup >= self.cleanup: self.factory.last_cleanup = t # Delete old passive nodes. old_node_ips = [] for node_ip in list(self.factory.nodes["passive"]): passive_node = self.factory.nodes["passive"][node_ip] # Gives enough time for passive nodes to receive clients. if t - passive_node["time"] >= self.node_lifetime: old_node_ips.append(node_ip) for node_ip in old_node_ips: del self.factory.nodes["passive"][node_ip] # Delete old simultaneous nodes. old_node_ips = [] for node_ip in list(self.factory.nodes["simultaneous"]): simultaneous_node =\ self.factory.nodes["simultaneous"][node_ip] # Gives enough time for passive nodes to receive clients. if t - simultaneous_node["time"] >= self.node_lifetime: old_node_ips.append(node_ip) for node_ip in old_node_ips: del self.factory.nodes["simultaneous"][node_ip] # Delete old candidates and candidate structs. old_node_ips = [] for node_ip in list(self.factory.candidates): # Record old candidates. old_candidates = [] for candidate in self.factory.candidates[node_ip]:
python
{ "resource": "" }
q1922
IPgetter.get_external_ip
train
def get_external_ip(self): """ This function gets your IP from a random server """ random.shuffle(self.server_list) myip = '' for server in self.server_list[:3]: myip
python
{ "resource": "" }
q1923
IPgetter.fetch
train
def fetch(self, server): """ This function gets your IP from a specific server """ t = None socket_default_timeout = socket.getdefaulttimeout() opener = urllib.build_opener() opener.addheaders = [('User-agent', "Mozilla/5.0 (X11; Linux x86_64; rv:24.0)" " Gecko/20100101 Firefox/24.0")] try: # Close url resource if fetching not finished within timeout. t = Timer(self.timeout, self.handle_timeout, [self.url]) t.start() # Open URL. if version_info[0:2] == (2, 5): # Support for Python 2.5.* using socket hack # (Changes global socket timeout.) socket.setdefaulttimeout(self.timeout) self.url = opener.open(server) else: self.url = opener.open(server, timeout=self.timeout) # Read response. content = self.url.read() # Didn't want to import chardet. Prefered to stick to stdlib if PY3K: try: content = content.decode('UTF-8')
python
{ "resource": "" }
q1924
UNL.connect
train
def connect(self, their_unl, events, force_master=1, hairpin=1, nonce="0" * 64): """ A new thread is spawned because many of the connection techniques rely on sleep to determine connection outcome or to synchronise hole punching techniques. If the sleep is in its own thread it won't block main execution.
python
{ "resource": "" }
q1925
SysClock.calculate_clock_skew
train
def calculate_clock_skew(self): """ Computer average and standard deviation using all the data points. """ n = self.statx_n(self.data_points) """ Required to be able to compute the standard deviation. """ if n < 1: return Decimal("0") avg = self.statx_avg(self.data_points) sdev = self.statx_sdev(self.data_points) """ Incrementally remove aberration points. """ for k in range(0, self.clean_steps): """ Remove aberration points: keep only the sigma range around the average. """ min_val = avg - sdev max_val = avg + sdev cleaned_data_points = [] for
python
{ "resource": "" }
q1926
RendezvousClient.simultaneous_listen
train
def simultaneous_listen(self): """ This function is called by passive simultaneous nodes who wish to establish themself as such. It sets up a connection to the Rendezvous Server to monitor for new hole punching requests. """ # Close socket. if self.server_con is not None: self.server_con.s.close() self.server_con = None # Reset predictions + mappings. self.mappings = None self.predictions = None # Connect to rendezvous server. parts = self.sequential_connect() if parts is None:
python
{ "resource": "" }
q1927
RendezvousClient.predict_mappings
train
def predict_mappings(self, mappings): """ This function is used to predict the remote ports that a NAT will map a local connection to. It requires the NAT type to be determined before use. Current support for preserving and delta type mapping behaviour. """ if self.nat_type not in self.predictable_nats: msg = "Can't predict mappings for non-predictable NAT type." raise Exception(msg) for mapping in mappings: mapping["bound"] = mapping["sock"].getsockname()[1]
python
{ "resource": "" }
q1928
RendezvousClient.parse_remote_port
train
def parse_remote_port(self, reply): """ Parses a remote port from a Rendezvous Server's response. """ remote_port = re.findall("^REMOTE (TCP|UDP) ([0-9]+)$", reply) if not len(remote_port): remote_port = 0
python
{ "resource": "" }
q1929
get_unused_port
train
def get_unused_port(port=None): """Checks if port is already in use.""" if port is None or port < 1024 or port > 65535: port = random.randint(1024, 65535) assert(1024 <= port <= 65535) while True: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.bind(('', port)) # Try to open port
python
{ "resource": "" }
q1930
get_lan_ip
train
def get_lan_ip(interface="default"): if sys.version_info < (3, 0, 0): if type(interface) == str: interface = unicode(interface) else: if type(interface) == bytes: interface = interface.decode("utf-8") # Get ID of interface that handles WAN stuff. default_gateway = get_default_gateway(interface) gateways = netifaces.gateways() wan_id = None if netifaces.AF_INET in gateways: gw_list = gateways[netifaces.AF_INET] for gw_info in gw_list: if gw_info[0] == default_gateway:
python
{ "resource": "" }
q1931
get_wan_ip
train
def get_wan_ip(n=0): """ That IP module sucks. Occasionally it returns an IP address behind cloudflare which probably happens when cloudflare tries to proxy your web request because it thinks you're trying to DoS. It's better if we just run our own infrastructure. """ if n == 2: try: ip = myip() ip = extract_ip(ip) if is_ip_valid(ip): return ip except Exception as e: print(str(e)) return None # Fail-safe: use centralized server for IP lookup. from pyp2p.net import forwarding_servers for forwarding_server in forwarding_servers: url = "http://" + forwarding_server["addr"] + ":" url +=
python
{ "resource": "" }
q1932
Cron.initialize
train
def initialize(self): """Initialize croniter and related times""" if self.croniter is None: self.time = time.time() self.datetime = datetime.now(self.tz)
python
{ "resource": "" }
q1933
Cron.get_next
train
def get_next(self): """Return next iteration time related to loop time"""
python
{ "resource": "" }
q1934
Cron.call_next
train
def call_next(self): """Set next hop in the loop. Call task""" if self.handle is not None:
python
{ "resource": "" }
q1935
Cron.call_func
train
def call_func(self, *args, **kwargs): """Called. Take care of exceptions using gather""" asyncio.gather( self.cron(*args, **kwargs),
python
{ "resource": "" }
q1936
cmd
train
def cmd(): '''Handler for command line invocation''' # Try to handle any reasonable thing thrown at this. # Consume '-f' and '-o' as input/output, allow '-' for stdin/stdout # and treat any subsequent arguments as a space separated string to # be titlecased (so it still works if people forget quotes) parser = argparse.ArgumentParser() in_group = parser.add_mutually_exclusive_group() in_group.add_argument('string', nargs='*', default=[], help='String to titlecase') in_group.add_argument('-f', '--input-file',
python
{ "resource": "" }
q1937
Checker.add_options
train
def add_options(cls, parser: OptionManager) -> None: """ ``flake8`` api method to register new plugin options. See :class:`.Configuration` docs for detailed options reference. Arguments: parser: ``flake8`` option parser instance. """ parser.add_option( '--eradicate-aggressive', default=False, help=(
python
{ "resource": "" }
q1938
Checker.run
train
def run(self) -> Generator[Tuple[int, int, str, type], None, None]: """ Runs the checker. ``fix_file()`` only mutates the buffer object. It is the only way to find out if some error happened. """ if self.filename != STDIN: buffer = StringIO()
python
{ "resource": "" }
q1939
unique
train
def unique(g): """ Yield values yielded by ``g``, removing any duplicates. Example ------- >>> list(unique(iter([1, 3, 1, 2, 3]))) [1, 3, 2] """
python
{ "resource": "" }
q1940
static_get_type_attr
train
def static_get_type_attr(t, name): """ Get a type attribute statically, circumventing the descriptor protocol.
python
{ "resource": "" }
q1941
_conflicting_defaults
train
def _conflicting_defaults(typename, conflicts): """Format an error message for conflicting default implementations. Parameters ---------- typename : str Name of the type for which we're producing an error. conflicts : dict[str -> list[Interface]] Map from strings to interfaces providing a default with that name. Returns ------- message : str User-facing error message. """ message = "\nclass {C} received conflicting default implementations:".format( C=typename, ) for attrname, interfaces in conflicts.items(): message += dedent(
python
{ "resource": "" }
q1942
InterfaceMeta._diff_signatures
train
def _diff_signatures(self, type_): """ Diff our method signatures against the methods provided by type_. Parameters ---------- type_ : type The type to check. Returns ------- missing, mistyped, mismatched : list[str], dict[str -> type], dict[str -> signature] # noqa ``missing`` is a list of missing interface names. ``mistyped`` is a list mapping names to incorrect types. ``mismatched`` is a dict mapping names to incorrect signatures. """ missing = [] mistyped = {} mismatched = {} for name, iface_sig in self._signatures.items(): try: # Don't invoke the descriptor protocol here so that we get # staticmethod/classmethod/property objects instead of the # functions they wrap.
python
{ "resource": "" }
q1943
InterfaceMeta.verify
train
def verify(self, type_): """ Check whether a type implements ``self``. Parameters ---------- type_ : type The type to check. Raises ------ TypeError If ``type_`` doesn't conform to our interface. Returns ------- None """ raw_missing, mistyped, mismatched = self._diff_signatures(type_) # See if we have defaults for missing methods. missing = [] defaults_to_use = {} for name in raw_missing: try:
python
{ "resource": "" }
q1944
InterfaceMeta._invalid_implementation
train
def _invalid_implementation(self, t, missing, mistyped, mismatched): """ Make a TypeError explaining why ``t`` doesn't implement our interface. """ assert missing or mistyped or mismatched, "Implementation wasn't invalid." message = "\nclass {C} failed to implement interface {I}:".format( C=getname(t), I=getname(self), ) if missing: message += dedent( """ The following methods of {I} were not implemented: {missing_methods}""" ).format( I=getname(self), missing_methods=self._format_missing_methods(missing) ) if mistyped: message += dedent( """ The following methods of {I} were implemented with incorrect types: {mismatched_types}"""
python
{ "resource": "" }
q1945
Interface.from_class
train
def from_class(cls, existing_class, subset=None, name=None): """Create an interface from an existing class. Parameters ---------- existing_class : type The type from which to extract an interface. subset : list[str], optional List of methods that should be included in the interface. Default is to use all attributes not defined in an empty class. name : str, optional Name of the generated interface. Default is ``existing_class.__name__ + 'Interface'``. Returns ------- interface :
python
{ "resource": "" }
q1946
compatible
train
def compatible(impl_sig, iface_sig): """ Check whether ``impl_sig`` is compatible with ``iface_sig``. Parameters ---------- impl_sig : inspect.Signature The signature of the implementation function. iface_sig : inspect.Signature The signature of the interface function. In general, an implementation is compatible with an interface if any valid way of passing parameters to the interface method is also valid for the implementation. Consequently, the following differences are allowed between the signature of an implementation method and the signature of its interface definition: 1. An implementation may add new arguments to an interface iff:
python
{ "resource": "" }
q1947
step_count
train
def step_count(group_idx): """Return the amount of index changes within group_idx.""" cmp_pos = 0 steps = 1 if len(group_idx) < 1: return 0 for
python
{ "resource": "" }
q1948
step_indices
train
def step_indices(group_idx): """Return the edges of areas within group_idx, which are filled with the same value.""" ilen = step_count(group_idx) + 1 indices = np.empty(ilen, np.int64) indices[0] = 0 indices[-1] = group_idx.size cmp_pos = 0 ri = 1 for i in
python
{ "resource": "" }
q1949
AggregateOp.callable
train
def callable(cls, nans=False, reverse=False, scalar=False): """ Compile a jitted function doing the hard part of the job """ _valgetter = cls._valgetter_scalar if scalar else cls._valgetter valgetter = nb.njit(_valgetter) outersetter = nb.njit(cls._outersetter) _cls_inner = nb.njit(cls._inner) if nans: def _inner(ri, val, ret, counter, mean): if not np.isnan(val): _cls_inner(ri, val, ret, counter, mean) inner = nb.njit(_inner)
python
{ "resource": "" }
q1950
AggregateGeneric.callable
train
def callable(self, nans=False): """Compile a jitted function and loop it over the sorted data.""" jitfunc = nb.njit(self.func, nogil=True) def _loop(sortidx, group_idx, a, ret): size = len(ret) group_idx_srt = group_idx[sortidx] a_srt = a[sortidx] indices = step_indices(group_idx_srt) for i in range(len(indices) - 1): start_idx, stop_idx = indices[i], indices[i + 1] ri = group_idx_srt[start_idx]
python
{ "resource": "" }
q1951
get_func
train
def get_func(func, aliasing, implementations): """ Return the key of a found implementation or the func itself """ try: func_str = aliasing[func] except KeyError: if callable(func): return func else: if func_str in implementations:
python
{ "resource": "" }
q1952
minimum_dtype
train
def minimum_dtype(x, dtype=np.bool_): """returns the "most basic" dtype which represents `x` properly, which provides at least the same value range as the specified dtype.""" def check_type(x, dtype): try: converted = dtype.type(x) except (ValueError, OverflowError): return False # False if some overflow has happened return converted == x or np.isnan(x) def type_loop(x, dtype, dtype_dict, default=None):
python
{ "resource": "" }
q1953
_array
train
def _array(group_idx, a, size, fill_value, dtype=None): """groups a into separate arrays, keeping the order intact.""" if fill_value is not None and not (np.isscalar(fill_value) or len(fill_value) == 0): raise ValueError("fill_value must be None, a scalar or an empty " "sequence")
python
{ "resource": "" }
q1954
_generic_callable
train
def _generic_callable(group_idx, a, size, fill_value, dtype=None, func=lambda g: g, **kwargs): """groups a by inds, and then applies foo to each group in turn, placing the results in an array.""" groups = _array(group_idx, a,
python
{ "resource": "" }
q1955
_cumsum
train
def _cumsum(group_idx, a, size, fill_value=None, dtype=None): """ N to N aggregate operation of cumsum. Perform cumulative sum for each group. group_idx = np.array([4, 3, 3, 4, 4, 1, 1, 1, 7, 8, 7, 4, 3, 3, 1, 1]) a = np.array([3, 4, 1, 3, 9, 9, 6, 7, 7, 0, 8, 2, 1, 8, 9, 8]) _cumsum(group_idx, a, np.max(group_idx) + 1) >>> array([ 3, 4,
python
{ "resource": "" }
q1956
_fill_untouched
train
def _fill_untouched(idx, ret, fill_value): """any elements of ret not indexed by idx are set to fill_value.""" untouched
python
{ "resource": "" }
q1957
_prod
train
def _prod(group_idx, a, size, fill_value, dtype=None): """Same as aggregate_numpy.py""" dtype = minimum_dtype_scalar(fill_value, dtype, a) ret = np.full(size, fill_value, dtype=dtype) if fill_value !=
python
{ "resource": "" }
q1958
c_func
train
def c_func(funcname, reverse=False, nans=False, scalar=False): """ Fill c_funcs with constructed code from the templates """ varnames = ['group_idx', 'a', 'ret', 'counter'] codebase = c_base_reverse if reverse else c_base
python
{ "resource": "" }
q1959
step_indices
train
def step_indices(group_idx): """ Get the edges of areas within group_idx, which are filled with the same value """ ilen = step_count(group_idx) + 1 indices = np.empty(ilen, int) indices[0] = 0
python
{ "resource": "" }
q1960
RandomProjection.__create_proj_mat
train
def __create_proj_mat(self, size): """Create a random projection matrix [1] D. Achlioptas. Database-friendly random projections: Johnson-Lindenstrauss with binary coins. [2] P. Li, et al. Very sparse random projections. http://scikit-learn.org/stable/modules/random_projection.html#sparse-random-projection
python
{ "resource": "" }
q1961
load_ratings
train
def load_ratings(data_home, size): """Load all samples in the dataset. """ if size == '100k': with open(os.path.join(data_home, 'u.data'), encoding='ISO-8859-1') as f: lines = list(map(lambda l: list(map(int, l.rstrip().split('\t'))), f.readlines())) elif size == '1m': with open(os.path.join(data_home, 'ratings.dat'), encoding='ISO-8859-1') as f: lines = list(map(lambda l: list(map(int, l.rstrip().split('::'))), f.readlines())) ratings =
python
{ "resource": "" }
q1962
n_feature_hash
train
def n_feature_hash(feature, dims, seeds): """N-hot-encoded feature hashing. Args: feature (str): Target feature represented as string. dims (list of int): Number of dimensions for each hash value. seeds (list of float): Seed of each hash function (mmh3). Returns: numpy 1d array: n-hot-encoded feature vector for `s`.
python
{ "resource": "" }
q1963
feature_hash
train
def feature_hash(feature, dim, seed=123): """Feature hashing. Args: feature (str): Target feature represented as string. dim (int): Number of dimensions for a hash value. seed (float): Seed of a MurmurHash3 hash function. Returns: numpy 1d
python
{ "resource": "" }
q1964
count_true_positive
train
def count_true_positive(truth, recommend): """Count number of true positives from given sets of samples. Args: truth (numpy 1d array): Set of truth samples.
python
{ "resource": "" }
q1965
RecommenderMixin.initialize
train
def initialize(self, *args): """Initialize a recommender by resetting stored users and items. """ # number of observed users self.n_user = 0 # store user data self.users =
python
{ "resource": "" }
q1966
RecommenderMixin.register_user
train
def register_user(self, user): """For new users, append their information into the dictionaries. Args: user (User): User. """
python
{ "resource": "" }
q1967
RecommenderMixin.scores2recos
train
def scores2recos(self, scores, candidates, rev=False): """Get recommendation list for a user u_index based on scores. Args: scores (numpy array; (n_target_items,)): Scores for the target items. Smaller score indicates a promising item. candidates (numpy array; (# target items, )): Target items' indices. Only these
python
{ "resource": "" }
q1968
Evaluator.fit
train
def fit(self, train_events, test_events, n_epoch=1): """Train a model using the first 30% positive events to avoid cold-start. Evaluation of this batch training is done by using the next 20% positive events. After the batch SGD training, the models are incrementally updated by using the 20% test events. Args: train_events (list of Event): Positive training events (0-30%). test_events (list of Event): Test events (30-50%). n_epoch (int): Number of epochs for the batch training.
python
{ "resource": "" }
q1969
Evaluator.__batch_update
train
def __batch_update(self, train_events, test_events, n_epoch): """Batch update called by the fitting method. Args: train_events (list of Event): Positive training events. test_events (list of Event): Test events. n_epoch (int): Number of epochs for the batch training. """ for epoch in range(n_epoch): # SGD requires us to shuffle events in each iteration # * if n_epoch == 1 # => shuffle is not required because it is a deterministic training (i.e. matrix sketching)
python
{ "resource": "" }
q1970
Evaluator.__batch_evaluate
train
def __batch_evaluate(self, test_events): """Evaluate the current model by using the given test events. Args: test_events (list of Event): Current model is evaluated by these events. Returns: float: Mean Percentile Rank for the test set. """ percentiles = np.zeros(len(test_events)) all_items = set(self.item_buffer) for i, e in enumerate(test_events): # check if the data allows users to interact the same items repeatedly unobserved = all_items if not self.repeat: # make recommendation for all unobserved items
python
{ "resource": "" }
q1971
Grapher._scale_x_values
train
def _scale_x_values(self, values, max_width): '''Scale X values to new width''' if type(values) == dict: values = self._scale_x_values_timestamps(values=values, max_width=max_width) adjusted_values = list(values) if len(adjusted_values) > max_width: def get_position(current_pos):
python
{ "resource": "" }
q1972
Grapher._scale_x_values_timestamps
train
def _scale_x_values_timestamps(self, values, max_width): '''Scale X values to new width based on timestamps''' first_timestamp = float(values[0][0]) last_timestamp = float(values[-1][0]) step_size = (last_timestamp - first_timestamp) / max_width values_by_column = [[] for i in range(max_width)] for timestamp, value in values: if value is None: continue timestamp = float(timestamp) column =
python
{ "resource": "" }
q1973
Grapher._scale_y_values
train
def _scale_y_values(self, values, new_min, new_max, scale_old_from_zero=True): ''' Take values and transmute them into a new range ''' # Scale Y values - Create a scaled list of values to use for the visual graph scaled_values = [] y_min_value = min(values) if scale_old_from_zero: y_min_value = 0 y_max_value = max(values) new_min = 0 OldRange = (y_max_value - y_min_value) or 1 # Prevents division by zero if all values are the same
python
{ "resource": "" }
q1974
Grapher._assign_ascii_character
train
def _assign_ascii_character(self, y_prev, y, y_next): # noqa for complexity '''Assign the character to be placed into the graph''' char = '?' if y_next > y and y_prev > y: char = '-' elif y_next < y and y_prev < y: char = '-' elif y_prev < y and y == y_next:
python
{ "resource": "" }
q1975
Grapher.asciigraph
train
def asciigraph(self, values=None, max_height=None, max_width=None, label=False): ''' Accepts a list of y values and returns an ascii graph Optionally values can also be a dictionary with a key of timestamp, and a value of value. InGraphs returns data in this format for example. ''' result = '' border_fill_char = '*' start_ctime = None end_ctime = None if not max_width: max_width = 180 # If this is a dict of timestamp -> value, sort the data, store the start/end time, and convert values to a list of values if isinstance(values, dict): time_series_sorted = sorted(list(values.items()), key=lambda x: x[0]) # Sort timestamp/value dict by the timestamps start_timestamp = time_series_sorted[0][0] end_timestamp = time_series_sorted[-1][0] start_ctime = datetime.fromtimestamp(float(start_timestamp)).ctime() end_ctime = datetime.fromtimestamp(float(end_timestamp)).ctime() values = self._scale_x_values_timestamps(values=time_series_sorted, max_width=max_width) values = [value for value in values if value is not None] if not max_height: max_height = min(20, max(values)) stdev = statistics.stdev(values) mean = statistics.mean(values) # Do value adjustments adjusted_values = list(values) adjusted_values = self._scale_x_values(values=values, max_width=max_width) upper_value = max(adjusted_values) # Getting upper/lower after scaling x values so we don't label a spike we can't see lower_value = min(adjusted_values)
python
{ "resource": "" }
q1976
replace
train
def replace(expression: Expression, position: Sequence[int], replacement: Replacement) -> Replacement: r"""Replaces the subexpression of `expression` at the given `position` with the given `replacement`. The original `expression` itself is not modified, but a modified copy is returned. If the replacement is a list of expressions, it will be expanded into the list of operands of the respective operation: >>> print(replace(f(a), (0, ), [b, c])) f(b, c) Parameters: expression: An :class:`Expression` where a (sub)expression is to be replaced. position: A tuple of indices, e.g. the empty tuple refers to the `expression` itself, `(0, )` refers to the first child (operand) of the `expression`, `(0, 0)` to the first child of the first child etc. replacement: Either an :class:`Expression` or a list of :class:`Expression`\s to be inserted into the `expression` instead of the original expression at that `position`. Returns: The resulting expression from the replacement. Raises: IndexError: If the position is invalid or out of range. """ if len(position) == 0: return replacement
python
{ "resource": "" }
q1977
BipartiteGraph.find_matching
train
def find_matching(self) -> Dict[TLeft, TRight]: """Finds a matching in the bipartite graph. This is done using the Hopcroft-Karp algorithm with an implementation from the `hopcroftkarp` package. Returns: A dictionary where each edge of the matching is represented by a key-value pair with the key being from the left part of the graph and the value from te right part. """ # The directed graph is represented as a dictionary of edges # The key is the tail of all edges which are represented by the value # The value is a set of heads for the all edges originating from the tail
python
{ "resource": "" }
q1978
BipartiteGraph.without_nodes
train
def without_nodes(self, edge: Edge) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]': """Returns a copy of this bipartite graph with the given edge and its adjacent nodes removed."""
python
{ "resource": "" }
q1979
BipartiteGraph.without_edge
train
def without_edge(self, edge: Edge) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]': """Returns a copy of this bipartite graph with the given edge removed."""
python
{ "resource": "" }
q1980
BipartiteGraph.limited_to
train
def limited_to(self, left: Set[TLeft], right: Set[TRight]) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]': """Returns the induced subgraph where only the nodes from the given sets are included."""
python
{ "resource": "" }
q1981
is_constant
train
def is_constant(expression): """Check if the given expression is constant, i.e. it does not contain Wildcards.""" if isinstance(expression, Wildcard):
python
{ "resource": "" }
q1982
get_head
train
def get_head(expression): """Returns the given expression's head.""" if isinstance(expression, Wildcard): if isinstance(expression, SymbolWildcard):
python
{ "resource": "" }
q1983
match_head
train
def match_head(subject, pattern): """Checks if the head of subject matches the pattern's head.""" if isinstance(pattern, Pattern): pattern = pattern.expression pattern_head = get_head(pattern)
python
{ "resource": "" }
q1984
is_anonymous
train
def is_anonymous(expression): """Returns True iff the expression does not contain any variables.""" if hasattr(expression, 'variable_name') and expression.variable_name: return False if
python
{ "resource": "" }
q1985
contains_variables_from_set
train
def contains_variables_from_set(expression, variables): """Returns True iff the expression contains any of the variables from the given set.""" if hasattr(expression, 'variable_name') and expression.variable_name in variables: return True
python
{ "resource": "" }
q1986
get_variables
train
def get_variables(expression, variables=None): """Returns the set of variable names in the given expression.""" if variables is None: variables = set() if hasattr(expression, 'variable_name') and expression.variable_name is not None: variables.add(expression.variable_name)
python
{ "resource": "" }
q1987
rename_variables
train
def rename_variables(expression: Expression, renaming: Dict[str, str]) -> Expression: """Rename the variables in the expression according to the given dictionary. Args: expression: The expression in which the variables are renamed. renaming: The renaming dictionary. Maps old variable names to new ones. Variable names not occuring in the dictionary are left unchanged. Returns: The expression with renamed variables. """ if isinstance(expression, Operation): if hasattr(expression, 'variable_name'): variable_name = renaming.get(expression.variable_name, expression.variable_name) return create_operation_expression( expression, [rename_variables(o, renaming) for
python
{ "resource": "" }
q1988
fixed_integer_vector_iter
train
def fixed_integer_vector_iter(max_vector: Tuple[int, ...], vector_sum: int) -> Iterator[Tuple[int, ...]]: """ Return an iterator over the integer vectors which - are componentwise less than or equal to *max_vector*, and - are non-negative, and where - the sum of their components is exactly *vector_sum*. The iterator yields the vectors in lexicographical order. Examples: List all vectors that are between ``(0, 0)`` and ``(2, 2)`` componentwise, where the sum of components is 2: >>> vectors = list(fixed_integer_vector_iter([2, 2], 2)) >>> vectors [(0, 2), (1, 1), (2, 0)] >>> list(map(sum, vectors)) [2, 2, 2] Args: max_vector: Maximum vector for the iteration. Every yielded result will be less than or equal to this componentwise. vector_sum: Every iterated vector will have a component sum equal to this value. Yields: All non-negative vectors that have the given sum and are not larger than the given maximum. Raises: ValueError:
python
{ "resource": "" }
q1989
commutative_sequence_variable_partition_iter
train
def commutative_sequence_variable_partition_iter(values: Multiset, variables: List[VariableWithCount] ) -> Iterator[Dict[str, Multiset]]: """Yield all possible variable substitutions for given values and variables. .. note:: The results are not yielded in any particular order because the algorithm uses dictionaries. Dictionaries until Python 3.6 do not keep track of the insertion order. Example: For a subject like ``fc(a, a, a, b, b, c)`` and a pattern like ``f(x__, y___, y___)`` one can define the following input parameters for the partitioning: >>> x = VariableWithCount(name='x', count=1, minimum=1, default=None) >>> y = VariableWithCount(name='y', count=2, minimum=0, default=None) >>> values = Multiset('aaabbc') Then the solutions are found (and sorted to get a unique output): >>> substitutions = commutative_sequence_variable_partition_iter(values, [x, y]) >>> as_strings = list(str(Substitution(substitution)) for substitution in substitutions) >>> for substitution in sorted(as_strings): ... print(substitution) {x ↦ {a, a, a, b, b, c}, y ↦ {}} {x ↦ {a, a, a, c}, y ↦ {b}} {x ↦ {a, b, b, c}, y ↦ {a}} {x ↦ {a, c}, y ↦ {a, b}} Args: values: The multiset of values which are partitioned and distributed among the variables.
python
{ "resource": "" }
q1990
generator_chain
train
def generator_chain(initial_data: T, *factories: Callable[[T], Iterator[T]]) -> Iterator[T]: """Chain multiple generators together by passing results from one to the next. This helper function allows to create a chain of generator where each generator is constructed by a factory that gets the data yielded by the previous generator. So each generator can generate new data dependant on the data yielded by the previous one. For each data item yielded by a generator, a new generator is constructed by the next factory. Example: Lets say for every number from 0 to 4, we want to count up to that number. Then we can do something like this using list comprehensions: >>> [i for n in range(1, 5) for i in range(1, n + 1)] [1, 1, 2, 1, 2, 3, 1, 2, 3, 4] You can use this function to achieve the same thing: >>> list(generator_chain(5, lambda n: iter(range(1, n)), lambda i: iter(range(1, i + 1)))) [1, 1, 2, 1, 2, 3, 1, 2, 3, 4] The advantage is, that this is independent of the number of dependant generators you have. Also, this function does not use recursion so it is safe to use even with large generator counts. Args: initial_data: The initial data that is passed to the first generator factory. *factories: The generator factories. Each of them gets passed its predecessors data and has to return an iterable.
python
{ "resource": "" }
q1991
Substitution.try_add_variable
train
def try_add_variable(self, variable_name: str, replacement: VariableReplacement) -> None: """Try to add the variable with its replacement to the substitution. This considers an existing replacement and will only succeed if the new replacement can be merged with the old replacement. Merging can occur if either the two replacements are equivalent. Replacements can also be merged if the old replacement for the variable_name was unordered (i.e. a :class:`~.Multiset`) and the new one is an equivalent ordered version of it: >>> subst = Substitution({'x': Multiset(['a', 'b'])}) >>> subst.try_add_variable('x', ('a', 'b')) >>> print(subst) {x ↦ (a, b)} Args: variable: The name of the variable to add. replacement: The replacement for the variable. Raises: ValueError: if the variable cannot be merged because it conflicts with the existing substitution for the variable_name. """ if variable_name not in self: self[variable_name] = replacement.copy() if isinstance(replacement, Multiset) else replacement else: existing_value
python
{ "resource": "" }
q1992
Substitution.union_with_variable
train
def union_with_variable(self, variable: str, replacement: VariableReplacement) -> 'Substitution': """Try to create a new substitution with the given variable added. See :meth:`try_add_variable` for a version of this method that modifies the substitution in place. Args: variable_name: The name of the variable to add. replacement: The substitution for the variable. Returns: The new substitution with the variable_name
python
{ "resource": "" }
q1993
Substitution.extract_substitution
train
def extract_substitution(self, subject: 'expressions.Expression', pattern: 'expressions.Expression') -> bool: """Extract the variable substitution for the given pattern and subject. This assumes that subject and pattern already match when being considered as linear. Also, they both must be :term:`syntactic`, as sequence variables cannot be handled here. All that this method does is checking whether all the substitutions for the variables can be unified. So, in case it returns ``False``, the substitution is invalid for the match. ..warning:: This method mutates the substitution and will even do so in case the extraction fails. Create a copy before using this method if you need to preserve the original substitution. Example: With an empty initial substitution and a linear pattern, the extraction will always succeed: >>> subst = Substitution() >>> subst.extract_substitution(f(a, b), f(x_, y_)) True >>> print(subst) {x ↦ a, y ↦ b} Clashing values for existing variables will fail: >>> subst.extract_substitution(b, x_) False For non-linear patterns, the extraction can also fail with an empty substitution: >>> subst = Substitution() >>> subst.extract_substitution(f(a, b), f(x_, x_)) False >>> print(subst) {x ↦ a} Note that the initial substitution got mutated even though the extraction failed! Args: subject: A :term:`syntactic` subject that matches the pattern. pattern:
python
{ "resource": "" }
q1994
Substitution.union
train
def union(self, *others: 'Substitution') -> 'Substitution': """Try to merge the substitutions. If a variable occurs in multiple substitutions, try to merge the replacements. See :meth:`union_with_variable` to see how replacements are merged. Does not modify any of the original substitutions. Example: >>> subst1 = Substitution({'x': Multiset(['a', 'b']), 'z': a}) >>> subst2 = Substitution({'x': ('a', 'b'), 'y': ('c', )}) >>> print(subst1.union(subst2)) {x ↦ (a, b), y ↦ (c), z ↦ a} Args: others: The other substitutions to merge with this one. Returns: The new substitution with the other substitutions merged. Raises: ValueError:
python
{ "resource": "" }
q1995
Substitution.rename
train
def rename(self, renaming: Dict[str, str]) -> 'Substitution': """Return a copy of the substitution with renamed variables. Example: Rename the variable *x* to *y*: >>> subst = Substitution({'x': a}) >>> subst.rename({'x': 'y'}) {'y': Symbol('a')} Args: renaming: A dictionary mapping old variable names to new ones. Returns: A copy
python
{ "resource": "" }
q1996
_get_symbol_wildcard_label
train
def _get_symbol_wildcard_label(state: '_State', symbol: Symbol) -> Type[Symbol]: """Return the transition target for the given symbol type from the the given state or None if it does not exist."""
python
{ "resource": "" }
q1997
_term_str
train
def _term_str(term: TermAtom) -> str: # pragma: no cover """Return a string representation of a term atom.""" if is_operation(term): return term.name + '(' elif is_symbol_wildcard(term): return '*{!s}'.format(term.__name__) elif isinstance(term, Wildcard):
python
{ "resource": "" }
q1998
FlatTerm.merged
train
def merged(cls, *flatterms: 'FlatTerm') -> 'FlatTerm': """Concatenate the given flatterms to a single flatterm. Args: *flatterms: The flatterms which are concatenated. Returns:
python
{ "resource": "" }
q1999
FlatTerm._flatterm_iter
train
def _flatterm_iter(cls, expression: Expression) -> Iterator[TermAtom]: """Generator that yields the atoms of the expressions in prefix notation with operation end markers.""" if isinstance(expression, Operation): yield type(expression) for operand
python
{ "resource": "" }