docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Return the number of milliseconds to wait, based on the connection state, before attempting to send data. When disconnected, this respects the reconnect backoff time. When connecting, returns 0 to allow non-blocking connect to finish. When connected, returns a very large number to handle slow/stalled connections. Arguments: node_id (int): The id of the node to check Returns: int: The number of milliseconds to wait.
def connection_delay(self, node_id): conn = self._conns.get(node_id) if conn is None: return 0 return conn.connection_delay()
116,494
Check whether a node is ready to send more requests. In addition to connection-level checks, this method also is used to block additional requests from being sent during a metadata refresh. Arguments: node_id (int): id of the node to check metadata_priority (bool): Mark node as not-ready if a metadata refresh is required. Default: True Returns: bool: True if the node is ready and metadata is not refreshing
def is_ready(self, node_id, metadata_priority=True): if not self._can_send_request(node_id): return False # if we need to update our metadata now declare all requests unready to # make metadata requests first priority if metadata_priority: if self._metadata_refresh_in_progress: return False if self.cluster.ttl() == 0: return False return True
116,495
Get the number of in-flight requests for a node or all nodes. Arguments: node_id (int, optional): a specific node to check. If unspecified, return the total for all nodes Returns: int: pending in-flight requests for the node, or all nodes if None
def in_flight_request_count(self, node_id=None): if node_id is not None: conn = self._conns.get(node_id) if conn is None: return 0 return len(conn.in_flight_requests) else: return sum([len(conn.in_flight_requests) for conn in list(self._conns.values())])
116,500
Set specific topics to track for metadata. Arguments: topics (list of str): topics to check for metadata Returns: Future: resolves after metadata request/response
def set_topics(self, topics): if set(topics).difference(self._topics): future = self.cluster.request_update() else: future = Future().success(set(topics)) self._topics = set(topics) return future
116,503
Add a topic to the list of topics tracked via metadata. Arguments: topic (str): topic to track Returns: Future: resolves after metadata request/response
def add_topic(self, topic): if topic in self._topics: return Future().success(set(self._topics)) self._topics.add(topic) return self.cluster.request_update()
116,504
Create a new buffer pool. Arguments: memory (int): maximum memory that this buffer pool can allocate poolable_size (int): memory size per buffer to cache in the free list rather than deallocating
def __init__(self, memory, poolable_size, metrics=None, metric_group_prefix='producer-metrics'): self._poolable_size = poolable_size self._lock = threading.RLock() buffers = int(memory / poolable_size) if poolable_size else 0 self._free = collections.deque([io.BytesIO() for _ in range(buffers)]) self._waiters = collections.deque() self.wait_time = None if metrics: self.wait_time = metrics.sensor('bufferpool-wait-time') self.wait_time.add(metrics.metric_name( 'bufferpool-wait-ratio', metric_group_prefix, 'The fraction of time an appender waits for space allocation.'), Rate())
116,549
Allocate a buffer of the given size. This method blocks if there is not enough memory and the buffer pool is configured with blocking mode. Arguments: size (int): The buffer size to allocate in bytes [ignored] max_time_to_block_ms (int): The maximum time in milliseconds to block for buffer memory to be available Returns: io.BytesIO
def allocate(self, size, max_time_to_block_ms): with self._lock: # check if we have a free buffer of the right size pooled if self._free: return self._free.popleft() elif self._poolable_size == 0: return io.BytesIO() else: # we are out of buffers and will have to block buf = None more_memory = threading.Condition(self._lock) self._waiters.append(more_memory) # loop over and over until we have a buffer or have reserved # enough memory to allocate one while buf is None: start_wait = time.time() more_memory.wait(max_time_to_block_ms / 1000.0) end_wait = time.time() if self.wait_time: self.wait_time.record(end_wait - start_wait) if self._free: buf = self._free.popleft() else: self._waiters.remove(more_memory) raise Errors.KafkaTimeoutError( "Failed to allocate memory within the configured" " max blocking time") # remove the condition for this thread to let the next thread # in line start getting memory removed = self._waiters.popleft() assert removed is more_memory, 'Wrong condition' # signal any additional waiters if there is more memory left # over for them if self._free and self._waiters: self._waiters[0].notify() # unlock and return the buffer return buf
116,550
Return buffers to the pool. If they are of the poolable size add them to the free list, otherwise just mark the memory as free. Arguments: buffer_ (io.BytesIO): The buffer to return
def deallocate(self, buf): with self._lock: # BytesIO.truncate here makes the pool somewhat pointless # but we stick with the BufferPool API until migrating to # bytesarray / memoryview. The buffer we return must not # expose any prior data on read(). buf.truncate(0) self._free.append(buf) if self._waiters: self._waiters[0].notify()
116,551
Encode an integer to a varint presentation. See https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints on how those can be produced. Arguments: value (int): Value to encode write (function): Called per byte that needs to be writen Returns: int: Number of bytes written
def encode_varint(value, write): value = (value << 1) ^ (value >> 63) if value <= 0x7f: # 1 byte write(value) return 1 if value <= 0x3fff: # 2 bytes write(0x80 | (value & 0x7f)) write(value >> 7) return 2 if value <= 0x1fffff: # 3 bytes write(0x80 | (value & 0x7f)) write(0x80 | ((value >> 7) & 0x7f)) write(value >> 14) return 3 if value <= 0xfffffff: # 4 bytes write(0x80 | (value & 0x7f)) write(0x80 | ((value >> 7) & 0x7f)) write(0x80 | ((value >> 14) & 0x7f)) write(value >> 21) return 4 if value <= 0x7ffffffff: # 5 bytes write(0x80 | (value & 0x7f)) write(0x80 | ((value >> 7) & 0x7f)) write(0x80 | ((value >> 14) & 0x7f)) write(0x80 | ((value >> 21) & 0x7f)) write(value >> 28) return 5 else: # Return to general algorithm bits = value & 0x7f value >>= 7 i = 0 while value: write(0x80 | bits) bits = value & 0x7f value >>= 7 i += 1 write(bits) return i
116,569
Decode an integer from a varint presentation. See https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints on how those can be produced. Arguments: buffer (bytearry): buffer to read from. pos (int): optional position to read from Returns: (int, int): Decoded int value and next read position
def decode_varint(buffer, pos=0): result = buffer[pos] if not (result & 0x81): return (result >> 1), pos + 1 if not (result & 0x80): return (result >> 1) ^ (~0), pos + 1 result &= 0x7f pos += 1 shift = 7 while 1: b = buffer[pos] result |= ((b & 0x7f) << shift) pos += 1 if not (b & 0x80): return ((result >> 1) ^ -(result & 1), pos) shift += 7 if shift >= 64: raise ValueError("Out of int64 range")
116,571
Encode and queue a kafka api request for sending. Arguments: request (object): An un-encoded kafka request. correlation_id (int, optional): Optionally specify an ID to correlate requests with responses. If not provided, an ID will be generated automatically. Returns: correlation_id
def send_request(self, request, correlation_id=None): log.debug('Sending request %s', request) if correlation_id is None: correlation_id = self._next_correlation_id() header = RequestHeader(request, correlation_id=correlation_id, client_id=self._client_id) message = b''.join([header.encode(), request.encode()]) size = Int32.encode(len(message)) data = size + message self.bytes_to_send.append(data) if request.expect_response(): ifr = (correlation_id, request) self.in_flight_requests.append(ifr) return correlation_id
116,577
Converts from hex|rgb to rgba Parameters: ----------- color : string Color representation on hex or rgb alpha : float Value from 0 to 1.0 that represents the alpha value. Example: to_rgba('#E1E5ED',0.6) to_rgba('#f03',0.7) to_rgba('rgb(23,23,23)',.5)
def to_rgba(color, alpha): if type(color) == tuple: color, alpha = color color = color.lower() if 'rgba' in color: cl = list(eval(color.replace('rgba', ''))) if alpha: cl[3] = alpha return 'rgba' + str(tuple(cl)) elif 'rgb' in color: r, g, b = eval(color.replace('rgb', '')) return 'rgba' + str((r, g, b, alpha)) else: return to_rgba(hex_to_rgb(color), alpha)
117,409
Converts from hex to rgb Parameters: ----------- color : string Color representation on hex or rgb Example: hex_to_rgb('#E1E5ED') hex_to_rgb('#f03')
def hex_to_rgb(color): color = normalize(color) color = color[1:] # return 'rgb'+str(tuple(ord(c) for c in color.decode('hex'))) return 'rgb' + str((int(color[0:2], base=16), int(color[2:4], base=16), int(color[4:6], base=16)))
117,410
Returns an hex color Parameters: ----------- color : string Color representation in rgba|rgb|hex Example: normalize('#f03')
def normalize(color): if type(color) == tuple: color = to_rgba(*color) if 'rgba' in color: return rgb_to_hex(rgba_to_rgb(color)) elif 'rgb' in color: return rgb_to_hex(color) elif '#' in color: if len(color) == 7: return color else: color = color[1:] return '#' + ''.join([x * 2 for x in list(color)]) else: try: return normalize(cnames[color.lower()]) except: raise CufflinksError('Not a valid color: ' + color)
117,411
Converts from rgb to hex Parameters: ----------- color : string Color representation on hex or rgb Example: rgb_to_hex('rgb(23,25,24)')
def rgb_to_hex(color): rgb = eval(color.replace('rgb', '')) # return '#'+''.join(map(chr, rgb)).encode('hex') return '#' + ''.join(['{0:02x}'.format(x).upper() for x in rgb])
117,412
Converts from rgba to rgb Parameters: ----------- color : string Color representation in rgba bg : string Color representation in rgb Example: rgba_to_rgb('rgb(23,25,24,.4)''
def rgba_to_rgb(color, bg='rgb(255,255,255)'): def c_tup(c): return eval(c[c.find('('):]) color = c_tup(color) bg = hex_to_rgb(normalize(bg)) bg = c_tup(bg) a = color[3] r = [int((1 - a) * bg[i] + a * color[i]) for i in range(3)] return 'rgb' + str(tuple(r))
117,413
Converts from hex to hsv Parameters: ----------- color : string Color representation on color Example: hex_to_hsv('#ff9933')
def hex_to_hsv(color): color = normalize(color) color = color[1:] # color=tuple(ord(c)/255.0 for c in color.decode('hex')) color = (int(color[0:2], base=16) / 255.0, int(color[2:4], base=16) / 255.0, int(color[4:6], base=16) / 255.0) return colorsys.rgb_to_hsv(*color)
117,414
Generates a scale of colours from a base colour Parameters: ----------- color : string Color representation in hex N : int number of colours to generate Example: color_range('#ff9933',20)
def color_range(color, N=20): color = normalize(color) org = color color = hex_to_hsv(color) HSV_tuples = [(color[0], x, color[2]) for x in np.arange(0, 1, 2.0 / N)] HSV_tuples.extend([(color[0], color[1], x) for x in np.arange(0, 1, 2.0 / N)]) hex_out = [] for c in HSV_tuples: c = colorsys.hsv_to_rgb(*c) c = [int(_ * 255) for _ in c] # hex_out.append("#"+"".join([chr(x).encode('hex') for x in c])) hex_out.append("#" + "".join(['{0:02x}'.format(x) for x in c])) if org not in hex_out: hex_out.append(org) hex_out.sort() return hex_out
117,415
Returns a generator with a list of colors and gradients of those colors Parameters: ----------- colors : list(colors) List of colors to use Example: colorgen() colorgen(['blue','red','pink']) colorgen(['#f03','rgb(23,25,25)'])
def colorgen(colors=None, n=None, scale=None, theme=None): from .themes import THEMES step = .1 if not colors: if not scale: if not theme: scale = get_config_file()['colorscale'] else: scale = THEMES[theme]['colorscale'] colors = get_scales(scale) dq = deque(colors) if len(dq) == 0: dq = deque(get_scales('ggplot')) if n: step = len(dq) * 0.8 / n if len(dq) * 8 < n else .1 for i in np.arange(.2, 1, step): for y in dq: yield to_rgba(y, 1 - i + .2) dq.rotate(1)
117,417
Displays a color scale (HTML) Parameters: ----------- scale : str Color scale name If no scale name is provided then all scales are returned (max number for each scale) If scale='all' then all scale combinations available will be returned Example: scales('accent') scales('all') scales()
def scales(scale=None): if scale: if scale == 'all': display(HTML(cl.to_html(_scales))) else: display(HTML(cl.to_html(get_scales(scale)))) else: s = '' keys = list(_scales_names.keys()) keys.sort() for k in keys: scale = get_scales(k) s += '<div style="display:inline-block;padding:10px;"><div>{0}</div>{1}</div>'.format( k, cl.to_html(scale)) display(HTML(s))
117,419
Returns a color scale to be used for a plotly figure Parameters: ----------- scale : str or list Color scale name If the color name is preceded by a minus (-) then the scale is inversed. Also accepts a list of colors (rgb,rgba,hex) Example: get_colorscale('accent') get_colorscale(['rgb(127,201,127)','rgb(190,174,212)','rgb(253,192,134)'])
def get_colorscale(scale): if type(scale) in string_types: scale = get_scales(scale) else: if type(scale) != list: raise Exception( "scale needs to be either a scale name or list of colors") cs = [[1.0 * c / (len(scale) - 1), scale[c]] for c in range(len(scale))] cs.sort() return cs
117,422
Returns a dict with an item per key Parameters: ----------- items : string, list or dict Items (ie line styles) keys: list List of keys items_names : string Name of items
def get_items_as_list(items,keys,items_names='styles'): if type(items)!=dict: if type(items)==list: if len(items)!=len(keys): raise Exception('List of {0} is not the same length as keys'.format(items_names)) else: items=dict(zip(keys,items)) else: items=dict(zip(keys,[items]*len(keys))) return items
117,428
Adds a study to QuantFigure.studies Parameters: study : dict {'kind':study_kind, 'params':study_parameters, 'display':display_parameters}
def _add_study(self,study): str='{study} {name}({period})' if study['params'].get('str',None)==None else study['params']['str'] study['params']['str']=str if not study['name']: study['name']=ta.get_column_name(study['kind'].upper(),study=study['kind'], str=str, period=study['params'].get('periods',None), column=study['params'].get('column',None)) restore=study['display'].pop('restore',False) if restore: _=self.studies.pop(study['kind'],None) if study['kind'] in self.studies: try: id='{0} ({1})'.format(study['kind'],study['params']['periods']) except: id='{0} ({1})'.format(study['kind'],'(2)') else: id=study['kind'] _id=id n=1 while id in self.studies: id='{0} ({1})'.format(_id,n) n+=1 self.studies[id]=study
117,449
Filters a DataFrame for columns that contain the given strings. Parameters: ----------- include : bool If False then it will exclude items that match the given filters. This is the same as passing a regex ^keyword kwargs : Key value pairs that indicate the column and value to screen for Example: df.screen(col1='string_to_match',col2=['string1','string2'])
def _screen(self,include=True,**kwargs): df=self.copy() for k,v in list(kwargs.items()): v=[v] if type(v)!=list else v if include: df=df[df[k].str.contains('|'.join(v),flags=re.IGNORECASE).fillna(False)] else: df=df[df[k].str.contains('|'.join(v),flags=re.IGNORECASE).fillna(False)==False] return df
117,463
Returns a normalized series or DataFrame Example: Series.normalize() Returns: series of DataFrame Parameters: ----------- asOf : string Date format '2015-02-29' multiplier : int Factor by which the results will be adjusted
def normalize(self,asOf=None,multiplier=100): if not asOf: x0=self.ix[0] else: x0=self.ix[asOf] return self/x0*multiplier
117,465
Returns a dictionary with the path in which each of the keys is found Parameters: from_d : dict Dictionary that contains all the keys, values to_d : dict Dictionary to which the results will be appended Example: dict_path({'level1':{'level2':{'level3':'value'}}}) Returns {'level1': [], 'level2': ['level1'], 'level3': ['level1', 'level2'] }
def dict_path(from_d,to_d={},l=[]): for k,v in list(from_d.items()): if isinstance(v,dict): to_d[k]=l _l=copy.deepcopy(l) _l.append(k) to_d=dict_path(from_d[k],to_d,_l) else: to_d[k]=l _to_d=to_d.copy() to_d={} return _to_d
117,467
Returns a dictionay indexed by values {value_k:key_k} Parameters: ----------- d : dictionary
def inverseDict(d): dt={} for k,v in list(d.items()): if type(v) in (list,tuple): for i in v: dt[i]=k else: dt[v]=k return dt
117,470
Looks for keys of the format keyword_value. And return a dictionary with {keyword:value} format Parameters: ----------- from_kwargs : dict Original dictionary to_kwargs : dict Dictionary where the items will be appended keyword : string Keyword to look for in the orginal dictionary clean_origin : bool If True then the k,v pairs from the original dictionary are deleted
def kwargs_from_keyword(from_kwargs,to_kwargs,keyword,clean_origin=True): for k in list(from_kwargs.keys()): if '{0}_'.format(keyword) in k: to_kwargs[k.replace('{0}_'.format(keyword),'')]=from_kwargs[k] if clean_origin: del from_kwargs[k] return to_kwargs
117,471
Updates the values (deep form) of a given dictionary Parameters: ----------- d : dict dictionary that contains the values to update d_update : dict dictionary to be updated
def deep_update(d,d_update): for k,v in list(d_update.items()): if isinstance(v,dict): if k in d: deep_update(d[k],v) else: d[k]=v elif isinstance(d,list): d.append({k:v}) else: d[k]=v return d
117,474
Returns a string that represents a date n numbers of days from today. Parameters: ----------- delta : int number of days strfmt : string format in which the date will be represented
def getDateFromToday(delta,strfmt='%Y%m%d'): return (dt.date.today() + dt.timedelta(delta)).strftime(strfmt)
117,477
Returns a DataFrame with the required format for a pie plot Parameters: ----------- n_labels : int Number of labels mode : string Format for each item 'abc' for alphabet columns 'stocks' for random stock names
def pie(n_labels=5,mode=None): return pd.DataFrame({'values':np.random.randint(1,100,n_labels), 'labels':getName(n_labels,mode=mode)})
117,500
Returns a DataFrame with the required format for a scatter plot Parameters: ----------- n_categories : int Number of categories n : int Number of points for each category prefix : string Name for each category mode : string Format for each item 'abc' for alphabet columns 'stocks' for random stock names
def scatter(n_categories=5,n=10,prefix='category',mode=None): categories=[] for i in range(n_categories): categories.extend([prefix+str(i+1)]*n) return pd.DataFrame({'x':np.random.randn(n*n_categories), 'y':np.random.randn(n*n_categories), 'text':getName(n*n_categories,mode=mode), 'categories':categories})
117,501
Returns a DataFrame with the required format for a heatmap plot Parameters: ----------- n_x : int Number of x categories n_y : int Number of y categories
def heatmap(n_x=5,n_y=10): x=['x_'+str(_) for _ in range(n_x)] y=['y_'+str(_) for _ in range(n_y)] return pd.DataFrame(surface(n_x-1,n_y-1).values,index=x,columns=y)
117,502
Returns a DataFrame with the required format for a scatter (lines) plot Parameters: ----------- n_traces : int Number of traces n : int Number of points for each trace columns : [str] List of column names dateIndex : bool If True it will return a datetime index if False it will return a enumerated index mode : string Format for each item 'abc' for alphabet columns 'stocks' for random stock names
def lines(n_traces=5,n=100,columns=None,dateIndex=True,mode=None): index=pd.date_range('1/1/15',periods=n) if dateIndex else list(range(n)) df=pd.DataFrame(np.random.randn(n,n_traces),index=index, columns=getName(n_traces,columns=columns,mode=mode)) return df.cumsum()
117,503
Returns a DataFrame with the required format for a bar plot Parameters: ----------- n : int Number of points for each trace n_categories : int Number of categories for each point prefix : string Name for each category columns : [str] List of column names mode : string Format for each item 'abc' for alphabet columns 'stocks' for random stock names
def bars(n=3,n_categories=3,prefix='category',columns=None,mode='abc'): categories=[] if not columns: columns=getName(n,mode=mode) for i in range(n_categories): categories.extend([prefix+str(i+1)]) data=dict([(x,np.random.randint(1,100,n_categories)) for x in columns]) return pd.DataFrame(data,index=categories)
117,504
Returns a DataFrame with the required format for a candlestick or ohlc plot df[['open','high','low','close']] Parameters: ----------- n : int Number of ohlc points
def ohlc(n=100): index=pd.date_range('1/1/15',periods=n*288,freq='5min',tz='utc') data=np.random.randn(n*288) data[0]=np.array([100]) df=pd.DataFrame(data,index=index, columns=['a']) df=df.cumsum() df=df.resample('1d').ohlc() df.index=df.index.date df.index=pd.to_datetime(df.index) return df['a']
117,505
Returns a DataFrame with the required format for a candlestick or ohlc plot df[['open','high','low','close','volume'] Parameters: ----------- n : int Number of ohlc points
def ohlcv(n=100): df=ohlc() df['volume']=[np.random.randint(1000,10000) for _ in range(len(df))] return df
117,506
Returns a DataFrame with the required format for a box plot Parameters: ----------- n_traces : int Number of traces n : int Number of points for each trace mode : string Format for each item 'abc' for alphabet columns 'stocks' for random stock names
def box(n_traces=5,n=100,mode=None): df=pd.DataFrame([np.random.chisquare(np.random.randint(2,10),n_traces) for _ in range(n)], columns=getName(n_traces,mode=mode)) return df
117,507
Returns a DataFrame with the required format for a histogram plot Parameters: ----------- n_traces : int Number of traces n : int Number of points for each trace mode : string Format for each item 'abc' for alphabet columns 'stocks' for random stock names
def histogram(n_traces=1,n=500,dispersion=2,mode=None): df=pd.DataFrame(np.transpose([np.random.randn(n)+np.random.randint(-1*dispersion,dispersion) for _ in range(n_traces)]), columns=getName(n_traces,mode=mode)) return df
117,508
Returns a DataFrame with the required format for a distribution plot (distplot) Parameters: ----------- n_traces : int Number of traces n : int Number of points for each trace mode : string Format for each item 'abc' for alphabet columns 'stocks' for random stock names
def distplot(n_traces=1,n=500,dispersion=3,mode=None): return histogram(n_traces,n,dispersion,mode)
117,509
Returns a DataFrame with the required format for a distribution plot (distplot) Parameters: ----------- n : int Number of points categories : bool or int If True, then a column with categories is added n_categories : int Number of categories
def violin(n=500,dispersion=3,categories=True,n_categories=5): df = histogram(1,n,dispersion,'abc') df=df.rename(columns={'a':'data'}) if categories: df['categories']=['category_{0}'.format(np.random.randint(n_categories)) for _ in range(n)] return df
117,510
Returns a DataFrame with the required format for a surface plot Parameters: ----------- n_x : int Number of points along the X axis n_y : int Number of points along the Y axis
def surface(n_x=20,n_y=20): x=[float(np.random.randint(0,100))] for i in range(n_x): x.append(x[:1][0]+np.random.randn()*np.random.randint(1,10)) df=pd.DataFrame(x) for i in range(n_y): df[i+1]=df[i].map(lambda x:x+np.random.randn()*np.random.randint(1,10)) return df
117,511
Returns a DataFrame with the required format for a surface (sine wave) plot Parameters: ----------- n : int Ranges for X and Y axis (-n,n) n_y : int Size of increment along the axis
def sinwave(n=4,inc=.25): x=np.arange(-n,n,inc) y=np.arange(-n,n,inc) X,Y=np.meshgrid(x,y) R = np.sqrt(X**2 + Y**2) Z = np.sin(R)/(.5*R) return pd.DataFrame(Z,index=x,columns=y)
117,512
Generates an annotations object Parameters: ----------- df : DataFrame Original DataFrame of values annotations : dict or list Dictionary of annotations {x_point : text} or List of Plotly annotations
def get_annotations(df,annotations,kind='lines',theme=None,**kwargs): for key in list(kwargs.keys()): if key not in __ANN_KWARGS: raise Exception("Invalid keyword : '{0}'".format(key)) theme_data = getTheme(theme) kwargs['fontcolor']=kwargs.pop('fontcolor',theme_data['annotations']['fontcolor']) kwargs['arrowcolor']=kwargs.pop('arrowcolor',theme_data['annotations']['arrowcolor']) kwargs['fontsize']=kwargs.pop('fontsize',12) def check_ann(annotation): local_list=[] if 'title' in annotation: local_list.append( dict( text=annotation['title'], showarrow=False, x=0, y=1, xref='paper', yref='paper', font={'size':24 if not 'fontsize' in kwargs else kwargs['fontsize']} ) ) del annotation['title'] local_list.append(ann) elif 'x' in annotation: ann=dict( x=annotation['x'], y=annotation.get('y',.5), xref=annotation.get('xref','x'), yref=annotation.get('yref',kwargs.get('yref','y1')), text=annotation.get('text'), showarrow=annotation.get('showarrow',True), arrowhead=annotation.get('arrowhead',7), arrowcolor=annotation.get('arrowcolor',kwargs.get('arrowcolor')), ax=annotation.get('ax',0), ay=annotation.get('ay',-100), textangle=annotation.get('textangle',-90), font = dict( color = annotation.get('fontcolor',annotation.get('color',kwargs.get('fontcolor'))), size = annotation.get('fontsize',annotation.get('size',kwargs.get('fontsize'))) ) ) local_list.append(ann) else: for k,v in list(annotation.items()): if kind in ('candlestick','ohlc','candle'): d=ta._ohlc_dict(df) maxv=df[d['high']].ix[k] yref='y2' else: maxv=df.ix[k].sum() if k in df.index else 0 yref='y1' ann=dict( x=k, y=maxv, xref='x', yref=yref, text=v, showarrow=kwargs.get('showarrow',True), arrowhead=kwargs.get('arrowhead',7), arrowcolor = kwargs['arrowcolor'], ax=kwargs.get('ax',0), ay=kwargs.get('ay',-100), textangle=kwargs.get('textangle',-90), font = dict( color = kwargs['fontcolor'], size=kwargs['fontsize'] ) ) local_list.append(ann) return local_list annotations = make_list(annotations) _list_ann=[] for ann in annotations: _list_ann.extend(check_ann(ann)) return _list_ann
117,517
Strips a figure into multiple figures with a trace on each of them Parameters: ----------- figure : Figure Plotly Figure
def strip_figures(figure): fig=[] for trace in figure['data']: fig.append(dict(data=[trace],layout=figure['layout'])) return fig
117,518
Generates a layout with the union of all properties of multiple figures' layouts Parameters: ----------- fig : list(Figures) List of Plotly Figures
def get_base_layout(figs): layout={} for fig in figs: if not isinstance(fig,dict): fig=fig.to_dict() for k,v in list(fig['layout'].items()): layout[k]=v return layout
117,519
Generates multiple Plotly figures for a given DataFrame Parameters: ----------- df : DataFrame Pandas DataFrame specs : list(dict) List of dictionaries with the properties of each figure. All properties avaialbe can be seen with help(cufflinks.pd.DataFrame.iplot) asList : boolean If True, then a list of figures is returned. Otherwise a single (merged) figure is returned. Default : False
def figures(df,specs,asList=False): figs=[] for spec in specs: figs.append(df.figure(**spec)) if asList: return figs else: return merge_figures(figs)
117,520
Generates a single Figure from a list of figures Parameters: ----------- figures : list(Figures) List of figures to be merged.
def merge_figures(figures): figure={} data=[] for fig in figures: for trace in fig['data']: data.append(trace) layout=get_base_layout(figures) figure['data']=data figure['layout']=layout return figure
117,521
Displays a matrix with scatter plot for each pair of Series in the DataFrame. The diagonal shows a histogram for each of the Series Parameters: ----------- df : DataFrame Pandas DataFrame theme : string Theme to be used (if not the default) bins : int Number of bins to use for histogram color : string Color to be used for each scatter plot size : int Size for each marker on the scatter plot
def scatter_matrix(df,theme=None,bins=10,color='grey',size=2): if not theme: theme = auth.get_config_file()['theme'] figs=[] for i in df.columns: for j in df.columns: if i==j: fig=df.iplot(kind='histogram',keys=[i],asFigure=True,bins=bins) figs.append(fig) else: figs.append(df.iplot(kind='scatter',mode='markers',x=j,y=i,asFigure=True,size=size,colors=[color])) layout=getLayout(theme) layout['xaxis'].update(showgrid=False) layout['yaxis'].update(showgrid=False) sm=subplots(figs,shape=(len(df.columns),len(df.columns)),shared_xaxes=False,shared_yaxes=False, horizontal_spacing=.05,vertical_spacing=.07,base_layout=layout) sm['layout'].update(bargap=.02,showlegend=False) return sm
117,526
Sets the axis in which each trace should appear If the axis doesn't exist then a new axis is created Parameters: ----------- traces : list(str) List of trace names on : string The axis in which the traces should be placed. If this is not indicated then a new axis will be created side : string Side where the axis will be placed 'left' 'right' title : string Sets the title of the axis Applies only to new axis
def _set_axis(self,traces,on=None,side='right',title=''): fig={} fig_cpy=fig_to_dict(self).copy() fig['data']=fig_cpy['data'] fig['layout']=fig_cpy['layout'] fig=Figure(fig) traces=make_list(traces) def update_data(trace,y): anchor=fig.axis['def'][y]['anchor'] if 'anchor' in fig.axis['def'][y] else 'x1' idx=fig.trace_dict[trace] if isinstance(trace,str) else trace fig['data'][idx]['xaxis']=anchor fig['data'][idx]['yaxis']=y for trace in traces: if on: if on not in fig.axis['def']: raise Exception('"on" axis does not exists: {0}'.format(on)) update_data(trace,y=on) else: curr_x,curr_y=fig.axis['ref'][trace] domain='[0.0, 1.0]' if 'domain' not in fig.axis['def'][curr_y] else str(fig.axis['def'][curr_y]['domain']) try: new_axis=fig.axis['dom']['y'][domain][side] except KeyError: axis=fig.axis['def'][curr_y].copy() ### check overlaying values axis.update(title=title,overlaying=curr_y,side=side,anchor=curr_x) axis_idx=str(fig.axis['len']['y']+1) fig['layout']['yaxis{0}'.format(axis_idx)]=axis new_axis='y{0}'.format(axis_idx) update_data(trace,y=new_axis) for k in list(fig.axis['def'].keys()): id='{0}axis{1}'.format(k[0],k[-1:]) if k not in fig.axis['ref_axis']: try: del fig['layout'][id] except KeyError: pass return fig
117,535
Plot a histogram from a numpy array of probabilities Args: Y_p: An [n] or [n, 1] np.ndarray of probabilities (floats in [0,1])
def plot_probabilities_histogram(Y_p, title=None): if Y_p.ndim > 1: msg = ( f"Arg Y_p should be a 1-dimensional np.ndarray, not of shape " f"{Y_p.shape}." ) raise ValueError(msg) plt.hist(Y_p, bins=20) plt.xlim((0, 1.025)) plt.xlabel("Probability") plt.ylabel("# Predictions") if isinstance(title, str): plt.title(title) plt.show()
117,552
Plot a histogram comparing int predictions vs true labels by class Args: Y_ph: An [n] or [n, 1] np.ndarray of predicted int labels Y: An [n] or [n, 1] np.ndarray of gold labels
def plot_predictions_histogram(Y_ph, Y, title=None): labels = list(set(Y).union(set(Y_ph))) edges = [x - 0.5 for x in range(min(labels), max(labels) + 2)] plt.hist([Y_ph, Y], bins=edges, label=["Predicted", "Gold"]) ax = plt.gca() ax.set_xticks(labels) plt.xlabel("Label") plt.ylabel("# Predictions") plt.legend(loc="upper right") if isinstance(title, str): plt.title(title) plt.show()
117,553
Reduces the output of an LSTM step Args: outputs: (torch.FloatTensor) the hidden state outputs from the lstm, with shape [batch_size, max_seq_length, hidden_size]
def _reduce_output(self, outputs, seq_lengths): batch_size = outputs.shape[0] reduced = [] # Necessary to iterate over batch because of different sequence lengths for i in range(batch_size): if self.lstm_reduction == "mean": # Average over all non-padding reduced # Use dim=0 because first dimension disappears after indexing reduced.append(outputs[i, : seq_lengths[i], :].mean(dim=0)) elif self.lstm_reduction == "max": # Max-pool over all non-padding reduced # Use dim=0 because first dimension disappears after indexing reduced.append(outputs[i, : seq_lengths[i], :].max(dim=0)[0]) elif self.lstm_reduction == "last": # Take the last output of the sequence (before padding starts) # NOTE: maybe better to take first and last? reduced.append(outputs[i, seq_lengths[i] - 1, :]) elif self.lstm_reduction == "attention": reduced.append(self._attention(outputs[i, : seq_lengths[i], :])) else: msg = ( f"Did not recognize lstm kwarg 'lstm_reduction' == " f"{self.lstm_reduction}" ) raise ValueError(msg) return torch.stack(reduced, dim=0)
117,572
Builds a vocabulary object based on the tokens in the input. Args: sents: A list of lists of tokens (representing sentences) Vocab kwargs include: max_size min_freq specials unk_init
def fit(self, sents, **kwargs): tokens = list(itertools.chain.from_iterable(sents)) counter = Counter(tokens) self.vocab = self.build_vocab(counter, **kwargs)
117,587
Converts lists of tokens into a Tensor of embedding indices. Args: sents: A list of lists of tokens (representing sentences) NOTE: These sentences should already be marked using the mark_entities() helper. Returns: X: A Tensor of shape (num_items, max_seq_len)
def transform(self, sents): def convert(tokens): return torch.tensor([self.vocab.stoi[t] for t in tokens], dtype=torch.long) if self.vocab is None: raise Exception( "Must run .fit() for .fit_transform() before " "calling .transform()." ) seqs = sorted([convert(s) for s in sents], key=lambda x: -len(x)) X = torch.LongTensor(pad_sequence(seqs, batch_first=True)) return X
117,588
Argmax with random tie-breaking Args: x: a 1-dim numpy array Returns: the argmax index
def rargmax(x, eps=1e-8): idxs = np.where(abs(x - np.max(x, axis=0)) < eps)[0] return np.random.choice(idxs)
117,590
Converts a 1D tensor of predicted labels into a 2D tensor of probabilistic labels Args: Y_h: an [n], or [n,1] tensor of predicted (int) labels in {1,...,k} k: the largest possible label in Y_h Returns: Y_s: a torch.FloatTensor of shape [n, k] where Y_s[i, j-1] is the probabilistic label for item i and label j
def pred_to_prob(Y_h, k): Y_h = Y_h.clone() if Y_h.dim() > 1: Y_h = Y_h.squeeze() assert Y_h.dim() == 1 assert (Y_h >= 1).all() assert (Y_h <= k).all() n = Y_h.shape[0] Y_s = torch.zeros((n, k), dtype=Y_h.dtype, device=Y_h.device) for i, j in enumerate(Y_h): Y_s[i, j - 1] = 1.0 return Y_s
117,591
Converts a 2D [n,m] label matrix into an [n,m,k] one hot 3D tensor Note that in the returned 3D matrix, abstain votes continue to be represented by 0s, not 1s. Args: L: a [n,m] label matrix with categorical labels (0 = abstain) k: the number of classes that could appear in L if None, k is inferred as the max element in L
def label_matrix_to_one_hot(L, k=None): n, m = L.shape if k is None: k = L.max() L_onehot = torch.zeros(n, m, k + 1) for i, row in enumerate(L): for j, k in enumerate(row): if k > 0: L_onehot[i, j, k - 1] = 1 return L_onehot
117,594
Returns a list of outputs for tasks 0,...t-1 Args: x: a [batch_size, ...] batch from X
def forward(self, x): head_outputs = [None] * self.t # Execute input layer if isinstance(self.input_layer, list): # One input_module per task input_outputs = [mod(x) for mod, x in zip(self.input_layer, x)] x = torch.stack(input_outputs, dim=1) # Execute level-0 task heads from their respective input modules for t in self.task_map[0]: head = self.heads[t] head_outputs[t] = head(input_outputs[t]) else: # One input_module for all tasks x = self.input_layer(x) # Execute level-0 task heads from the single input module for t in self.task_map[0]: head = self.heads[t] head_outputs[t] = head(x) # Execute middle layers for i, layer in enumerate(self.middle_layers, start=1): x = layer(x) # Attach level-i task heads from the ith middle module for t in self.task_map[i]: head = self.heads[t] # Optionally include as input the predictions of parent tasks if self.config["pass_predictions"] and bool(self.task_graph.parents[t]): task_input = [x] for p in self.task_graph.parents[t]: task_input.append(head_outputs[p]) task_input = torch.stack(task_input, dim=1) else: task_input = x head_outputs[t] = head(task_input) return head_outputs
117,606
Convert T label matrices with labels in 0...K_t to a one-hot format Here we can view e.g. the $(i,j)$ entries of the $T$ label matrices as a _label vector_ emitted by LF j for data point i. Args: L: a T-length list of [n,m] scipy.sparse label matrices with values in {0,1,...,k} Returns: L_ind: An [n,m*k] dense np.ndarray with values in {0,1} Note that no column is required for 0 (abstain) labels.
def _create_L_ind(self, L): # TODO: Update LabelModel to keep L, L_ind, L_aug as sparse matrices # throughout and remove this line. if issparse(L[0]): L = [L_t.todense() for L_t in L] # Make sure converted to numpy here L = self._to_numpy(L) L_ind = np.ones((self.n, self.m * self.k)) for yi, y in enumerate(self.task_graph.feasible_set()): for t in range(self.t): # A[x::y] slices A starting at x at intervals of y # e.g., np.arange(9)[0::3] == np.array([0,3,6]) L_ind[:, yi :: self.k] *= np.where( np.logical_or(L[t] == y[t], L[t] == 0), 1, 0 ) # Set LFs that abstained on all feasible label vectors to all 0s L_ind[:, yi :: self.k] *= np.where(sum(L) != 0, 1, 0) return L_ind
117,611
Returns the task marginals estimated by the model: a t-length list of [n,k_t] matrices where the (i,j) entry of the sth matrix represents the estimated P((Y_i)_s | \lambda_j(x_i)) Args: L: A t-length list of [n,m] scipy.sparse label matrices with values in {0,1,...,k}
def predict_proba(self, L): # First, get the estimated probability distribution over the feasible # set defined by the TaskGraph # This is an [n,k] array, where k = |(feasible set)| Y_pf = LabelModel.predict_proba(self, L) n, k = Y_pf.shape # Now get the per-task marginals # TODO: Make this optional, versus just returning the above Y_p = [np.zeros((n, k_t)) for k_t in self.task_graph.K] for yi, y in enumerate(self.task_graph.feasible_set()): for t in range(self.t): k_t = int(y[t]) Y_p[t][:, k_t - 1] += Y_pf[:, yi] return Y_p
117,612
Predicts (int) labels for an input X on all tasks Args: X: The input for the predict_proba method break_ties: A tie-breaking policy (see Classifier._break_ties()) return_probs: Return the predicted probabilities as well Returns: Y_p: An n-dim np.ndarray of predictions in {1,...k} [Optionally: Y_s: An [n, k] np.ndarray of predicted probabilities]
def predict(self, X, break_ties="random", return_probs=False, **kwargs): Y_s = self._to_numpy(self.predict_proba(X, **kwargs)) Y_p = self._break_ties(Y_s, break_ties).astype(np.int) if return_probs: return Y_p, Y_s else: return Y_p
117,614
This model resume training of a classifier by reloading the appropriate state_dicts for each model Args: train_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of X (data) and Y (labels) for the train split model_path: the path to the saved checpoint for resuming training valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of X (data) and Y (labels) for the dev split
def resume_training(self, train_data, model_path, valid_data=None): restore_state = self.checkpointer.restore(model_path) loss_fn = self._get_loss_fn() self.train() self._train_model( train_data=train_data, loss_fn=loss_fn, valid_data=valid_data, restore_state=restore_state, )
117,619
Restores the model and optimizer states This helper function restores the model's state to a given iteration so that a user can resume training at any epoch. Args: restore_state: a state_dict dictionary
def _restore_training_state(self, restore_state): self.load_state_dict(restore_state["model"]) self.optimizer.load_state_dict(restore_state["optimizer"]) self.lr_scheduler.load_state_dict(restore_state["lr_scheduler"]) start_iteration = restore_state["iteration"] + 1 if self.config["verbose"]: print(f"Restored checkpoint to iteration {start_iteration}.") if restore_state["best_model_found"]: # Update checkpointer with appropriate information about best model # Note that the best model found so far may not be the model in the # checkpoint that is currently being loaded. self.checkpointer.best_model_found = True self.checkpointer.best_iteration = restore_state["best_iteration"] self.checkpointer.best_score = restore_state["best_score"] if self.config["verbose"]: print( f"Updated checkpointer: " f"best_score={self.checkpointer.best_score:.3f}, " f"best_iteration={self.checkpointer.best_iteration}" ) return start_iteration
117,620
Prints a warning statement just once Args: msg: The warning message msg_name: [optional] The name of the warning. If None, the msg_name will be the msg itself.
def warn_once(self, msg, msg_name=None): assert isinstance(msg, str) msg_name = msg_name if msg_name else msg if msg_name not in warnings_given: warnings.warn(msg) warnings_given.add(msg_name)
117,638
Execute sparse linear layer Args: X: an [n, h] torch.LongTensor containing up to h indices of features whose weights should be looked up and used in a sparse linear multiplication.
def forward(self, X): return self.W(X).sum(dim=1) + self.b
117,659
Saves checkpoints as applicable based on a reported metric. Args: checkpoint_runway (int): don't save any checkpoints for the first this many iterations checkpoint_dir (str): the directory for saving checkpoints
def __init__(self, config, verbose=True): self.best_model_found = None self.best_iteration = None self.best_score = None self.verbose = verbose self.checkpoint_best = config["checkpoint_best"] self.checkpoint_every = config["checkpoint_every"] self.checkpoint_metric = config["checkpoint_metric"] self.checkpoint_metric_mode = config["checkpoint_metric_mode"] self.checkpoint_dir = config["checkpoint_dir"] self.checkpoint_runway = config["checkpoint_runway"] # If abbreviated metric name was used, expand here to valid/ by default if "/" not in self.checkpoint_metric: self.checkpoint_metric = "valid/" + self.checkpoint_metric # Create checkpoint directory if necessary if not os.path.exists(self.checkpoint_dir): os.makedirs(self.checkpoint_dir) # Remind about checkpoint runway if self.checkpoint_runway and verbose: print( f"No checkpoints will be saved in the first " f"checkpoint_runway={self.checkpoint_runway} iterations." )
117,660
Convert a label matrix with labels in 0...k to a one-hot format Args: L: An [n,m] scipy.sparse label matrix with values in {0,1,...,k} Returns: L_ind: An [n,m*k] dense np.ndarray with values in {0,1} Note that no column is required for 0 (abstain) labels.
def _create_L_ind(self, L): # TODO: Update LabelModel to keep L variants as sparse matrices # throughout and remove this line. if issparse(L): L = L.todense() L_ind = np.zeros((self.n, self.m * self.k)) for y in range(1, self.k + 1): # A[x::y] slices A starting at x at intervals of y # e.g., np.arange(9)[0::3] == np.array([0,3,6]) L_ind[:, (y - 1) :: self.k] = np.where(L == y, 1, 0) return L_ind
117,673
Returns an augmented version of L where each column is an indicator for whether a certain source or clique of sources voted in a certain pattern. Args: L: An [n,m] scipy.sparse label matrix with values in {0,1,...,k}
def _get_augmented_label_matrix(self, L, higher_order=False): # Create a helper data structure which maps cliques (as tuples of member # sources) --> {start_index, end_index, maximal_cliques}, where # the last value is a set of indices in this data structure self.c_data = {} for i in range(self.m): self.c_data[i] = { "start_index": i * self.k, "end_index": (i + 1) * self.k, "max_cliques": set( [ j for j in self.c_tree.nodes() if i in self.c_tree.node[j]["members"] ] ), } L_ind = self._create_L_ind(L) # Get the higher-order clique statistics based on the clique tree # First, iterate over the maximal cliques (nodes of c_tree) and # separator sets (edges of c_tree) if higher_order: L_aug = np.copy(L_ind) for item in chain(self.c_tree.nodes(), self.c_tree.edges()): if isinstance(item, int): C = self.c_tree.node[item] C_type = "node" elif isinstance(item, tuple): C = self.c_tree[item[0]][item[1]] C_type = "edge" else: raise ValueError(item) members = list(C["members"]) nc = len(members) # If a unary maximal clique, just store its existing index if nc == 1: C["start_index"] = members[0] * self.k C["end_index"] = (members[0] + 1) * self.k # Else add one column for each possible value else: L_C = np.ones((self.n, self.k ** nc)) for i, vals in enumerate(product(range(self.k), repeat=nc)): for j, v in enumerate(vals): L_C[:, i] *= L_ind[:, members[j] * self.k + v] # Add to L_aug and store the indices if L_aug is not None: C["start_index"] = L_aug.shape[1] C["end_index"] = L_aug.shape[1] + L_C.shape[1] L_aug = np.hstack([L_aug, L_C]) else: C["start_index"] = 0 C["end_index"] = L_C.shape[1] L_aug = L_C # Add to self.c_data as well id = tuple(members) if len(members) > 1 else members[0] self.c_data[id] = { "start_index": C["start_index"], "end_index": C["end_index"], "max_cliques": set([item]) if C_type == "node" else set(item), } return L_aug else: return L_ind
117,674
Returns the [n,k] matrix of label probabilities P(Y | \lambda) Args: L: An [n,m] scipy.sparse label matrix with values in {0,1,...,k}
def predict_proba(self, L): self._set_constants(L) L_aug = self._get_augmented_label_matrix(L) mu = np.clip(self.mu.detach().clone().numpy(), 0.01, 0.99) # Create a "junction tree mask" over the columns of L_aug / mu if len(self.deps) > 0: jtm = np.zeros(L_aug.shape[1]) # All maximal cliques are +1 for i in self.c_tree.nodes(): node = self.c_tree.node[i] jtm[node["start_index"] : node["end_index"]] = 1 # All separator sets are -1 for i, j in self.c_tree.edges(): edge = self.c_tree[i][j] jtm[edge["start_index"] : edge["end_index"]] = 1 else: jtm = np.ones(L_aug.shape[1]) # Note: We omit abstains, effectively assuming uniform distribution here X = np.exp(L_aug @ np.diag(jtm) @ np.log(mu) + np.log(self.p)) Z = np.tile(X.sum(axis=1).reshape(-1, 1), self.k) return X / Z
117,680
L2 loss centered around mu_init, scaled optionally per-source. In other words, diagonal Tikhonov regularization, ||D(\mu-\mu_{init})||_2^2 where D is diagonal. Args: - l2: A float or np.array representing the per-source regularization strengths to use
def loss_l2(self, l2=0): if isinstance(l2, (int, float)): D = l2 * torch.eye(self.d) else: D = torch.diag(torch.from_numpy(l2)) # Note that mu is a matrix and this is the *Frobenius norm* return torch.norm(D @ (self.mu - self.mu_init)) ** 2
117,682
Calculate (micro) accuracy. Args: gold: A 1d array-like of gold labels pred: A 1d array-like of predicted labels (assuming abstain = 0) ignore_in_gold: A list of labels for which elements having that gold label will be ignored. ignore_in_pred: A list of labels for which elements having that pred label will be ignored. Returns: A float, the (micro) accuracy score
def accuracy_score(gold, pred, ignore_in_gold=[], ignore_in_pred=[]): gold, pred = _preprocess(gold, pred, ignore_in_gold, ignore_in_pred) if len(gold) and len(pred): acc = np.sum(gold == pred) / len(gold) else: acc = 0 return acc
117,714
Calculate (global) coverage. Args: gold: A 1d array-like of gold labels pred: A 1d array-like of predicted labels (assuming abstain = 0) ignore_in_gold: A list of labels for which elements having that gold label will be ignored. ignore_in_pred: A list of labels for which elements having that pred label will be ignored. Returns: A float, the (global) coverage score
def coverage_score(gold, pred, ignore_in_gold=[], ignore_in_pred=[]): gold, pred = _preprocess(gold, pred, ignore_in_gold, ignore_in_pred) return np.sum(pred != 0) / len(pred)
117,715
Compute the ROC AUC score, given the gold labels and predicted probs. Args: gold: A 1d array-like of gold labels probs: A 2d array-like of predicted probabilities ignore_in_gold: A list of labels for which elements having that gold label will be ignored. Returns: roc_auc_score: The (float) roc_auc score
def roc_auc_score(gold, probs, ignore_in_gold=[], ignore_in_pred=[]): gold = arraylike_to_numpy(gold) # Filter out the ignore_in_gold (but not ignore_in_pred) # Note the current sub-functions (below) do not handle this... if len(ignore_in_pred) > 0: raise ValueError("ignore_in_pred not defined for ROC-AUC score.") keep = [x not in ignore_in_gold for x in gold] gold = gold[keep] probs = probs[keep, :] # Convert gold to one-hot indicator format, using the k inferred from probs gold_s = pred_to_prob(torch.from_numpy(gold), k=probs.shape[1]).numpy() return skm.roc_auc_score(gold_s, probs)
117,719
Predicts int labels for an input X on all tasks Args: X: The input for the predict_proba method break_ties: A tie-breaking policy return_probs: Return the predicted probabilities as well Returns: Y_p: A t-length list of n-dim np.ndarrays of predictions in [1, K_t] [Optionally: Y_s: A t-length list of [n, K_t] np.ndarrays of predicted probabilities]
def predict(self, X, break_ties="random", return_probs=False, **kwargs): Y_s = self.predict_proba(X, **kwargs) self._check(Y_s, typ=list) self._check(Y_s[0], typ=np.ndarray) Y_p = [] for Y_ts in Y_s: Y_tp = self._break_ties(Y_ts, break_ties) Y_p.append(Y_tp.astype(np.int)) if return_probs: return Y_p, Y_s else: return Y_p
117,728
Scores the predictive performance of the Classifier on task t Args: X: The input for the predict_task method Y: A [n] or [n, 1] np.ndarray or torch.Tensor of gold labels in {1,...,K_t} t: The task index to score metric: The metric with which to score performance on this task Returns: The (float) score of the Classifier for the specified task and metric
def score_task(self, X, Y, t=0, metric="accuracy", verbose=True, **kwargs): Y = self._to_numpy(Y) Y_tp = self.predict_task(X, t=t, **kwargs) probs = self.predict_proba(X)[t] score = metric_score( Y[t], Y_tp, metric, ignore_in_gold=[0], probs=probs, **kwargs ) if verbose: print(f"[t={t}] {metric.capitalize()}: {score:.3f}") return score
117,730
Predicts int labels for an input X on task t Args: X: The input for the predict_task_proba method t: The task index to predict Returns: An n-dim tensor of int predictions for the specified task
def predict_task(self, X, t=0, break_ties="random", **kwargs): Y_tp = self.predict_task_proba(X, t=t, **kwargs) Y_tph = self._break_ties(Y_tp, break_ties) return Y_tph
117,731
Predicts probabilistic labels for an input X on task t Args: X: The input for the predict_proba method t: The task index to predict for which to predict probabilities Returns: An [n, K_t] tensor of predictions for task t NOTE: By default, this method calls predict_proba and extracts element t. If it is possible to predict individual tasks in isolation, however, this method may be overriden for efficiency's sake.
def predict_task_proba(self, X, t=0, **kwargs): return self.predict_proba(X, **kwargs)[t]
117,732
Gets the largest hyperband schedule within target_budget. This is required since the original hyperband algorithm uses R, the maximum number of resources per configuration. TODO(maxlam): Possibly binary search it if this becomes a bottleneck. Args: budget: total budget of the schedule. proportion_discard: hyperband parameter that specifies the proportion of configurations to discard per iteration.
def get_largest_schedule_within_budget(self, budget, proportion_discard): # Exhaustively generate schedules and check if # they're within budget, adding to a list. valid_schedules_and_costs = [] for R in range(1, budget): schedule = self.generate_hyperband_schedule(R, proportion_discard) cost = self.compute_schedule_cost(schedule) if cost <= budget: valid_schedules_and_costs.append((schedule, cost)) # Choose a valid schedule that maximizes usage of the budget. valid_schedules_and_costs.sort(key=lambda x: x[1], reverse=True) return valid_schedules_and_costs[0][0]
117,738
Generate hyperband schedule according to the paper. Args: R: maximum resources per config. eta: proportion of configruations to discard per iteration of successive halving. Returns: hyperband schedule, which is represented as a list of brackets, where each bracket contains a list of (num configurations, num resources to use per configuration). See the paper for more details.
def generate_hyperband_schedule(self, R, eta): schedule = [] s_max = int(math.floor(math.log(R, eta))) # B = (s_max + 1) * R for s in range(0, s_max + 1): n = math.ceil(int((s_max + 1) / (s + 1)) * eta ** s) r = R * eta ** (-s) bracket = [] for i in range(0, s + 1): n_i = int(math.floor(n * eta ** (-i))) r_i = int(r * eta ** i) bracket.append((n_i, r_i)) schedule = [bracket] + schedule return schedule
117,740
Given label matrix L_aug and labels Y, compute the true mu params. Args: L: (np.array {0,1}) [n, d] The augmented (indicator) label matrix Y: (np.array int) [n] The true labels in {1,...,k} k: (int) Cardinality p: (np.array float) [k] The class balance
def compute_mu(L_aug, Y, k, p): n, d = L_aug.shape assert Y.shape[0] == n # Compute mu mu = np.zeros((d, k)) for y in range(1, k + 1): L_y = L_aug[Y == y] mu[:, y - 1] = L_y.sum(axis=0) / L_y.shape[0] return mu
117,753
Given label matrix L_aug and labels Y, compute the covariance. Args: L: (np.array {0,1}) [n, d] The augmented (indicator) label matrix Y: (np.array int) [n] The true labels in {1,...,k} k: (int) Cardinality p: (np.array float) [k] The class balance
def compute_covariance(L_aug, Y, k, p): n, d = L_aug.shape assert Y.shape[0] == n mu = compute_mu(L_aug, Y, k, p) return (L_aug.T @ L_aug) / n - mu @ np.diag(p) @ mu.T
117,754
Given label matrix L and labels Y, compute the covariance. Args: L: (np.array) [n, d] The augmented (indicator) label matrix Y: (np.array int) [n] The true labels in {1,...,k}
def compute_inv_covariance(L_aug, Y, k, p): return np.linalg.inv(compute_covariance(L_aug, Y, k, p))
117,755
Return the polarities of each LF based on evidence in a label matrix. Args: L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the jth LF to the ith candidate
def lf_polarities(L): polarities = [sorted(list(set(L[:, i].data))) for i in range(L.shape[1])] return [p[0] if len(p) == 1 else p for p in polarities]
117,758
Return the **empirical accuracy** against a set of labels Y (e.g. dev set) for each LF. Args: L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the jth LF to the ith candidate Y: an [n] or [n, 1] np.ndarray of gold labels
def lf_empirical_accuracies(L, Y): # Assume labeled set is small, work with dense matrices Y = arraylike_to_numpy(Y) L = L.toarray() X = np.where(L == 0, 0, np.where(L == np.vstack([Y] * L.shape[1]).T, 1, -1)) return 0.5 * (X.sum(axis=0) / (L != 0).sum(axis=0) + 1)
117,761
Returns a pandas DataFrame with the various per-LF statistics. Args: L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the jth LF to the ith candidate Y: an [n] or [n, 1] np.ndarray of gold labels. If provided, the empirical accuracy for each LF will be calculated
def lf_summary(L, Y=None, lf_names=None, est_accs=None): n, m = L.shape if lf_names is not None: col_names = ["j"] d = {"j": list(range(m))} else: lf_names = list(range(m)) col_names = [] d = {} # Default LF stats col_names.extend(["Polarity", "Coverage", "Overlaps", "Conflicts"]) d["Polarity"] = Series(data=lf_polarities(L), index=lf_names) d["Coverage"] = Series(data=lf_coverages(L), index=lf_names) d["Overlaps"] = Series(data=lf_overlaps(L), index=lf_names) d["Conflicts"] = Series(data=lf_conflicts(L), index=lf_names) if Y is not None: col_names.extend(["Correct", "Incorrect", "Emp. Acc."]) confusions = [ confusion_matrix(Y, L[:, i], pretty_print=False) for i in range(m) ] corrects = [np.diagonal(conf).sum() for conf in confusions] incorrects = [ conf.sum() - correct for conf, correct in zip(confusions, corrects) ] accs = lf_empirical_accuracies(L, Y) d["Correct"] = Series(data=corrects, index=lf_names) d["Incorrect"] = Series(data=incorrects, index=lf_names) d["Emp. Acc."] = Series(data=accs, index=lf_names) if est_accs is not None: col_names.append("Learned Acc.") d["Learned Acc."] = Series(est_accs, index=lf_names) return DataFrame(data=d, index=lf_names)[col_names]
117,762
Calculates coverage, overlap, conflicts, and accuracy for a single LF Args: Y_p: a np.array or torch.Tensor of predicted labels Y: a np.array or torch.Tensor of true labels (if known)
def single_lf_summary(Y_p, Y=None): L = sparse.csr_matrix(arraylike_to_numpy(Y_p).reshape(-1, 1)) return lf_summary(L, Y)
117,763
A shortcut method for building a confusion matrix all at once. Args: gold: an array-like of gold labels (ints) pred: an array-like of predictions (ints) null_pred: If True, include the row corresponding to null predictions null_gold: If True, include the col corresponding to null gold labels normalize: if True, divide counts by the total number of items pretty_print: if True, pretty-print the matrix before returning
def confusion_matrix( gold, pred, null_pred=False, null_gold=False, normalize=False, pretty_print=True ): conf = ConfusionMatrix(null_pred=null_pred, null_gold=null_gold) gold = arraylike_to_numpy(gold) pred = arraylike_to_numpy(pred) conf.add(gold, pred) mat = conf.compile() if normalize: mat = mat / len(gold) if pretty_print: conf.display(normalize=normalize) return mat
117,765
Return a list of suggestions based on grid search. Params: matrix: `dict` representing the {hyperparam: hyperparam matrix config}. n_suggestions: number of suggestions to make.
def get_suggestions(self, iteration_config=None): matrix = self.hptuning_config.matrix suggestions = [] keys = list(matrix.keys()) values = [v.to_numpy() for v in matrix.values()] for v in itertools.product(*values): suggestions.append(dict(zip(keys, v))) if self.hptuning_config.grid_search: n_suggestions = self.hptuning_config.grid_search.n_experiments if n_suggestions: return suggestions[:n_suggestions] return suggestions
119,027
Return a list of suggestions based on random search. Params: matrix: `dict` representing the {hyperparam: hyperparam matrix config}. n_suggestions: number of suggestions to make.
def get_suggestions(self, iteration_config=None): matrix = self.hptuning_config.matrix n_suggestions = self.hptuning_config.random_search.n_experiments seed = self.hptuning_config.seed return get_random_suggestions(matrix=matrix, n_suggestions=n_suggestions, seed=seed)
119,478
Return a dag representation of the nodes passed. This is equally used for pipelines and pipeline runs. Params: nodes: an instance of `Operation` | `OperationRun` the nodes to represent en dag. downstream_fn: a function that returns the downstream nodes of the a node. Returns: tuple: (dag, dict(node_id: node))
def get_dag(nodes, downstream_fn) -> Tuple[Dict, Dict]: dag = {} node_by_ids = {} for node in nodes: downstream_ops = downstream_fn(node) dag[node.id] = set(downstream_ops) node_by_ids[node.id] = node return dag, node_by_ids
119,608
Create resources requirements. Args: resources: `PodResourcesConfig` Return: `V1ResourceRequirements`
def get_resources(resources): # pylint:disable=too-many-branches limits = {} requests = {} if resources is None: return None if resources.cpu: if resources.cpu.limits: limits['cpu'] = resources.cpu.limits if resources.cpu.requests: requests['cpu'] = resources.cpu.requests if resources.memory: if resources.memory.limits: limits['memory'] = '{}Mi'.format(resources.memory.limits) if resources.memory.requests: requests['memory'] = '{}Mi'.format(resources.memory.requests) if resources.gpu: if resources.gpu.limits: limits[conf.get('K8S_GPU_RESOURCE_KEY')] = resources.gpu.limits if resources.gpu.requests: requests[conf.get('K8S_GPU_RESOURCE_KEY')] = resources.gpu.requests if resources.tpu: if resources.tpu.limits: limits[conf.get('K8S_TPU_RESOURCE_KEY')] = resources.tpu.limits if resources.tpu.requests: requests[conf.get('K8S_TPU_RESOURCE_KEY')] = resources.tpu.requests return client.V1ResourceRequirements(limits=limits or None, requests=requests or None)
119,762
Lookup equities by symbol. Parameters: args (iterable[str]): List of ticker symbols for the asset. Returns: equities (List[Equity]): The equity lookuped by the ``symbol``. Raises: AssetNotFound: When could not resolve the ``Asset`` by ``symbol``.
def symbols(self, *args, **kwargs): return [self.symbol(idendifier, **kwargs) for idendifier in args]
122,973
Evaluate on voc dataset. Args: pred_boxlists(list[BoxList]): pred boxlist, has labels and scores fields. gt_boxlists(list[BoxList]): ground truth boxlist, has labels field. iou_thresh: iou thresh use_07_metric: boolean Returns: dict represents the results
def eval_detection_voc(pred_boxlists, gt_boxlists, iou_thresh=0.5, use_07_metric=False): assert len(gt_boxlists) == len( pred_boxlists ), "Length of gt and pred lists need to be same." prec, rec = calc_detection_voc_prec_rec( pred_boxlists=pred_boxlists, gt_boxlists=gt_boxlists, iou_thresh=iou_thresh ) ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric) return {"ap": ap, "map": np.nanmean(ap)}
123,082
Apply algorith 2 in https://arxiv.org/pdf/1901.08910.pdf. Args: usv: matrix to reduce given in SVD form with the spectrum s in increasing order. num_rows: number of rows in the output matrix. num_cols: number of columns in the output matrix. Returns: A resized version of (u, s, v) whose non zero singular values will be identical to the largest singular values in s.
def resize_matrix(usv, num_rows, num_cols): u, s, v = usv k = min(num_rows, num_cols) u_random_proj = transform.resize(u[:, :k], (num_rows, k)) v_random_proj = transform.resize(v[:k, :], (k, num_cols)) u_random_proj_orth = _closest_column_orthogonal_matrix(u_random_proj) v_random_proj_orth = _closest_column_orthogonal_matrix(v_random_proj.T).T return np.matmul(u_random_proj_orth, np.matmul(np.diag(s[::-1][:k]), v_random_proj_orth))
123,097
Read and sort lines from the file sorted by decreasing length. Args: filename: String name of file to read inputs from. Returns: Sorted list of inputs, and dictionary mapping original index->sorted index of each element.
def _get_sorted_inputs(filename): with tf.gfile.Open(filename) as f: records = f.read().split("\n") inputs = [record.strip() for record in records] if not inputs[-1]: inputs.pop() input_lens = [(i, len(line.split())) for i, line in enumerate(inputs)] sorted_input_lens = sorted(input_lens, key=lambda x: x[1], reverse=True) sorted_inputs = [] sorted_keys = {} for i, (index, _) in enumerate(sorted_input_lens): sorted_inputs.append(inputs[index]) sorted_keys[index] = i return sorted_inputs, sorted_keys
123,114
Translate lines in file, and save to output file if specified. Args: estimator: tf.Estimator used to generate the translations. subtokenizer: Subtokenizer object for encoding and decoding source and translated lines. input_file: file containing lines to translate output_file: file that stores the generated translations. print_all_translations: If true, all translations are printed to stdout. Raises: ValueError: if output file is invalid.
def translate_file( estimator, subtokenizer, input_file, output_file=None, print_all_translations=True): batch_size = _DECODE_BATCH_SIZE # Read and sort inputs by length. Keep dictionary (original index-->new index # in sorted list) to write translations in the original order. sorted_inputs, sorted_keys = _get_sorted_inputs(input_file) num_decode_batches = (len(sorted_inputs) - 1) // batch_size + 1 def input_generator(): for i, line in enumerate(sorted_inputs): if i % batch_size == 0: batch_num = (i // batch_size) + 1 print("Decoding batch %d out of %d." % (batch_num, num_decode_batches)) yield _encode_and_add_eos(line, subtokenizer) def input_fn(): ds = tf.data.Dataset.from_generator( input_generator, tf.int64, tf.TensorShape([None])) ds = ds.padded_batch(batch_size, [None]) return ds translations = [] for i, prediction in enumerate(estimator.predict(input_fn)): translation = _trim_and_decode(prediction["outputs"], subtokenizer) translations.append(translation) if print_all_translations: print("Translating:") print("\tInput: %s" % sorted_inputs[i]) print("\tOutput: %s\n" % translation) print("=" * 100) # Write translations in the order they appeared in the original file. if output_file is not None: if tf.gfile.IsDirectory(output_file): raise ValueError("File output is a directory, will not save outputs to " "file.") tf.logging.info("Writing to file %s" % output_file) with tf.gfile.Open(output_file, "w") as f: for index in xrange(len(sorted_keys)): f.write("%s\n" % translations[sorted_keys[index]])
123,116
Upload benchmark run information to Bigquery. Args: dataset_name: string, the name of bigquery dataset where the data will be uploaded. table_name: string, the name of bigquery table under the dataset where the data will be uploaded. run_id: string, a unique ID that will be attached to the data, usually this is a UUID4 format.
def upload_benchmark_run(self, dataset_name, table_name, run_id): expected_file = os.path.join( self._logging_dir, logger.BENCHMARK_RUN_LOG_FILE_NAME) with tf.gfile.GFile(expected_file) as f: benchmark_json = json.load(f) benchmark_json["model_id"] = run_id table_ref = self._bq_client.dataset(dataset_name).table(table_name) errors = self._bq_client.insert_rows_json(table_ref, [benchmark_json]) if errors: tf.logging.error( "Failed to upload benchmark info to bigquery: {}".format(errors))
123,133
Upload metric information to Bigquery. Args: dataset_name: string, the name of bigquery dataset where the data will be uploaded. table_name: string, the name of bigquery table under the dataset where the metric data will be uploaded. This is different from the benchmark_run table. run_id: string, a unique ID that will be attached to the data, usually this is a UUID4 format. This should be the same as the benchmark run_id.
def upload_metric(self, dataset_name, table_name, run_id): expected_file = os.path.join( self._logging_dir, logger.METRIC_LOG_FILE_NAME) with tf.gfile.GFile(expected_file) as f: lines = f.readlines() metrics = [] for line in filter(lambda l: l.strip(), lines): metric = json.loads(line) metric["run_id"] = run_id metrics.append(metric) table_ref = self._bq_client.dataset(dataset_name).table(table_name) errors = self._bq_client.insert_rows_json(table_ref, metrics) if errors: tf.logging.error( "Failed to upload benchmark info to bigquery: {}".format(errors))
123,134
Plays matches between two neural nets. Args: black_model: Path to the model for black player white_model: Path to the model for white player
def play_match(black_model, white_model, games, sgf_dir): with utils.logged_timer("Loading weights"): black_net = dual_net.DualNetwork(black_model) white_net = dual_net.DualNetwork(white_model) readouts = FLAGS.num_readouts black = MCTSPlayer(black_net, two_player_mode=True) white = MCTSPlayer(white_net, two_player_mode=True) black_name = os.path.basename(black_net.save_file) white_name = os.path.basename(white_net.save_file) for i in range(games): num_move = 0 # The move number of the current game for player in [black, white]: player.initialize_game() first_node = player.root.select_leaf() prob, val = player.network.run(first_node.position) first_node.incorporate_results(prob, val, first_node) while True: start = time.time() active = white if num_move % 2 else black inactive = black if num_move % 2 else white current_readouts = active.root.N while active.root.N < current_readouts + readouts: active.tree_search() # print some stats on the search if FLAGS.verbose >= 3: print(active.root.position) # First, check the roots for hopeless games. if active.should_resign(): # Force resign active.set_result(-1 * active.root.position.to_play, was_resign=True) inactive.set_result( active.root.position.to_play, was_resign=True) if active.is_done(): fname = "{:d}-{:s}-vs-{:s}-{:d}.sgf".format(int(time.time()), white_name, black_name, i) active.set_result(active.root.position.result(), was_resign=False) with gfile.GFile(os.path.join(sgf_dir, fname), 'w') as _file: sgfstr = sgf_wrapper.make_sgf(active.position.recent, active.result_string, black_name=black_name, white_name=white_name) _file.write(sgfstr) print("Finished game", i, active.result_string) break move = active.pick_move() active.play_move(move) inactive.play_move(move) dur = time.time() - start num_move += 1 if (FLAGS.verbose > 1) or (FLAGS.verbose == 1 and num_move % 10 == 9): timeper = (dur / readouts) * 100.0 print(active.root.position) print("%d: %d readouts, %.3f s/100. (%.2f sec)" % (num_move, readouts, timeper, dur))
123,151