function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
sequence
def main(): print("Storage Name: %s" % (storage_name)) print("Block Count: %s" % (block_count)) print("Block Size: %s" % (MemorySize(block_size))) print("Total Memory: %s" % (MemorySize(block_size*block_count))) print("Actual Storage Required: %s" % (MemorySize( EncryptedBlockStorage.compute_storage_size( block_size, block_count, storage_type='sftp')))) print("") # Start an SSH client using paramiko print("Starting SSH Client") with paramiko.SSHClient() as ssh: ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.load_system_host_keys() ssh.connect(ssh_host, username=ssh_username, password=ssh_password) print("Setting Up Encrypted Block Storage") setup_start = time.time() with EncryptedBlockStorage.setup(storage_name, block_size, block_count, storage_type='sftp', sshclient=ssh, ignore_existing=True) as f: print("Total Setup Time: %2.f s" % (time.time()-setup_start)) print("Total Data Transmission: %s" % (MemorySize(f.bytes_sent + f.bytes_received))) print("") # We close the device and reopen it after # setup to reset the bytes sent and bytes # received stats. with EncryptedBlockStorage(storage_name, key=f.key, storage_type='sftp', sshclient=ssh) as f: test_count = 1000 start_time = time.time() for t in tqdm.tqdm(list(range(test_count)), desc="Running I/O Performance Test"): f.read_block(random.randint(0,f.block_count-1)) stop_time = time.time() print("Access Block Avg. Data Transmitted: %s (%.3fx)" % (MemorySize((f.bytes_sent + f.bytes_received)/float(test_count)), (f.bytes_sent + f.bytes_received)/float(test_count)/float(block_size))) print("Access Block Avg. Latency: %.2f ms" % ((stop_time-start_time)/float(test_count)*1000)) print("")
ghackebeil/PyORAM
[ 25, 1, 25, 1, 1457916027 ]
def test_defaults_to_pythonanywhere_dot_com_if_no_environment_variables(self): assert get_api_endpoint() == "https://www.pythonanywhere.com/api/v0/user/{username}/{flavor}/"
pythonanywhere/helper_scripts
[ 31, 11, 31, 11, 1484133551 ]
def test_gets_domain_from_pythonanywhere_domain_and_adds_on_www_if_set_but_no_pythonanywhere_site( self, monkeypatch
pythonanywhere/helper_scripts
[ 31, 11, 31, 11, 1484133551 ]
def test_raises_on_401(self, api_token, api_responses): url = "https://foo.com/" api_responses.add(responses.POST, url, status=401, body="nope") with pytest.raises(AuthenticationError) as e: call_api(url, "post") assert str(e.value) == "Authentication error 401 calling API: nope"
pythonanywhere/helper_scripts
[ 31, 11, 31, 11, 1484133551 ]
def __init__(self, notification_service: NotificationService, download_service: AbstractDownloadService, repository_url: str, repo_path: Path, patch_file: Path): self._notification_service = notification_service self._download_service = download_service self._url = repository_url self._repo_path = repo_path self._patch_file = patch_file self._target_version = None
Brutus5000/BiReUS
[ 1, 1, 1, 4, 1481245107 ]
def get_factory(cls, protocol: int): if cls._patch_tasks is None: cls._patch_tasks = dict() for patch_task_version in PatchTask.__subclasses__(): cls._patch_tasks[patch_task_version.get_version()] = patch_task_version.create if protocol in cls._patch_tasks: return cls._patch_tasks[protocol] else: raise ProtocolException("Protocol version `%s` is not supported in this client version", protocol)
Brutus5000/BiReUS
[ 1, 1, 1, 4, 1481245107 ]
def get_version(cls) -> int: pass
Brutus5000/BiReUS
[ 1, 1, 1, 4, 1481245107 ]
def create(cls, notification_service: NotificationService, download_service: AbstractDownloadService, repository_url: str, repo_path: Path, patch_file: Path) -> 'PatchTask': """ Abstract factory function for dynamic patcher initialization same params as in constructor! """ pass
Brutus5000/BiReUS
[ 1, 1, 1, 4, 1481245107 ]
def random_path(): return tempfile.mkdtemp()
richo/groundstation
[ 57, 12, 57, 10, 1356253789 ]
def setUp(self): self.path = random_path() self.repo = self.storeClass(self.path)
richo/groundstation
[ 57, 12, 57, 10, 1356253789 ]
def create_update_object(self, parents, data): return UpdateObject(parents, data)
richo/groundstation
[ 57, 12, 57, 10, 1356253789 ]
def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def begin_delete( self, resource_group_name, # type: str service_endpoint_policy_name, # type: str service_endpoint_policy_definition_name, # type: str **kwargs # type: Any
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {})
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def get( self, resource_group_name, # type: str service_endpoint_policy_name, # type: str service_endpoint_policy_definition_name, # type: str **kwargs # type: Any
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def _create_or_update_initial( self, resource_group_name, # type: str service_endpoint_policy_name, # type: str service_endpoint_policy_definition_name, # type: str service_endpoint_policy_definitions, # type: "_models.ServiceEndpointPolicyDefinition" **kwargs # type: Any
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def begin_create_or_update( self, resource_group_name, # type: str service_endpoint_policy_name, # type: str service_endpoint_policy_definition_name, # type: str service_endpoint_policy_definitions, # type: "_models.ServiceEndpointPolicyDefinition" **kwargs # type: Any
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def get_long_running_output(pipeline_response): deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def list_by_resource_group( self, resource_group_name, # type: str service_endpoint_policy_name, # type: str **kwargs # type: Any
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_by_resource_group.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def isInterleave(self, s1, s2, s3): """ dfs dp
algorhythms/LeetCode
[ 823, 267, 823, 3, 1403872362 ]
def isInterleave_TLE(self, s1, s2, s3): """ dfs Time Limit Exceeded :param s1: :param s2: :param s3: :return: boolean """ if not s3: return True letter = s3[0] if s1 and s1[0] == letter: if self.isInterleave(s1[1:], s2, s3[1:]): return True if s2 and s2[0] == letter: if self.isInterleave(s1, s2[1:], s3[1:]): return True return False
algorhythms/LeetCode
[ 823, 267, 823, 3, 1403872362 ]
def __init__(self, caption, label=None, headings=None, rows=None, footnotes=None, **kwargs): super(Table, self).__init__(caption=caption, label=label, **kwargs) self.headings = headings if headings is not None else [] # list(list(Cell)) self.rows = rows if rows is not None else [] # list(list(Cell)) self.footnotes = footnotes if footnotes is not None else []
mcs07/ChemDataExtractor
[ 238, 100, 238, 21, 1475452201 ]
def document(self): return self._document
mcs07/ChemDataExtractor
[ 238, 100, 238, 21, 1475452201 ]
def document(self, document): self._document = document self.caption.document = document for row in self.headings: for cell in row: cell.document = document for row in self.rows: for cell in row: cell.document = document
mcs07/ChemDataExtractor
[ 238, 100, 238, 21, 1475452201 ]
def _repr_html_(self): html_lines = ['<table class="table">'] html_lines.append(self.caption._repr_html_ ()) html_lines.append('<thead>') for hrow in self.headings: html_lines.append('<tr>') for cell in hrow: html_lines.append('<th>' + cell.text + '</th>') html_lines.append('</thead>') html_lines.append('<tbody>') for row in self.rows: html_lines.append('<tr>') for cell in row: html_lines.append('<td>' + cell.text + '</td>') html_lines.append('</tbody>') html_lines.append('</table>') return '\n'.join(html_lines)
mcs07/ChemDataExtractor
[ 238, 100, 238, 21, 1475452201 ]
def records(self): """Chemical records that have been parsed from the table.""" caption_records = self.caption.records # Parse headers to extract contextual data and determine value parser for the column value_parsers = {} header_compounds = defaultdict(list) table_records = ModelList() seen_compound_col = False log.debug('Parsing table headers') for i, col_headings in enumerate(zip(*self.headings)): # log.info('Considering column %s' % i) for parsers in self.parsers: log.debug(parsers) heading_parser = parsers[0] value_parser = parsers[1] if len(parsers) > 1 else None disallowed_parser = parsers[2] if len(parsers) > 2 else None allowed = False disallowed = False for cell in col_headings: log.debug(cell.tagged_tokens) results = list(heading_parser.parse(cell.tagged_tokens)) if results: allowed = True log.debug('Heading column %s: Match %s: %s' % (i, heading_parser.__class__.__name__, [c.serialize() for c in results])) # Results from every parser are stored as header compounds header_compounds[i].extend(results) # Referenced footnote records are also stored for footnote in self.footnotes: # print('%s - %s - %s' % (footnote.id, cell.references, footnote.id in cell.references)) if footnote.id in cell.references: log.debug('Adding footnote %s to column %s: %s' % (footnote.id, i, [c.serialize() for c in footnote.records])) # print('Footnote records: %s' % [c.to_primitive() for c in footnote.records]) header_compounds[i].extend(footnote.records) # Check if the disallowed parser matches this cell if disallowed_parser and list(disallowed_parser.parse(cell.tagged_tokens)): log.debug('Column %s: Disallowed %s' % (i, heading_parser.__class__.__name__)) disallowed = True # If heading parser matches and disallowed parser doesn't, store the value parser if allowed and not disallowed and value_parser and i not in value_parsers: if isinstance(value_parser, CompoundCellParser): # Only take the first compound col if seen_compound_col: continue seen_compound_col = True log.debug('Column %s: Value parser: %s' % (i, value_parser.__class__.__name__)) value_parsers[i] = value_parser # Stop after value parser is assigned? # for hrow in self.headings: # for i, cell in enumerate(hrow): # log.debug(cell.tagged_tokens) # for heading_parser, value_parser in self.parsers: # results = list(heading_parser.parse(cell.tagged_tokens)) # if results: # log.debug('Heading column %s: Match %s: %s' % (i, heading_parser.__class__.__name__, [c.to_primitive() for c in results])) # # Results from every parser are stored as header compounds # header_compounds[i].extend(results) # if results and value_parser and i not in value_parsers: # if isinstance(value_parser, CompoundCellParser): # # Only take the first compound col # if seen_compound_col: # continue # seen_compound_col = True # value_parsers[i] = value_parser # break # Stop after first heading parser matches # # Referenced footnote records are also stored # for footnote in self.footnotes: # # print('%s - %s - %s' % (footnote.id, cell.references, footnote.id in cell.references)) # if footnote.id in cell.references: # log.debug('Adding footnote %s to column %s: %s' % (footnote.id, i, [c.to_primitive() for c in footnote.records])) # # print('Footnote records: %s' % [c.to_primitive() for c in footnote.records]) # header_compounds[i].extend(footnote.records) # If no parsers, skip processing table if value_parsers: # If no CompoundCellParser() in value_parsers and value_parsers[0] == [] then set CompoundCellParser() if not seen_compound_col and 0 not in value_parsers: log.debug('No compound column found in table, assuming first column') value_parsers[0] = CompoundCellParser() for row in self.rows: row_compound = Compound() # Keep cell records that are contextual to merge at the end contextual_cell_compounds = [] for i, cell in enumerate(row): log.debug(cell.tagged_tokens) if i in value_parsers: results = list(value_parsers[i].parse(cell.tagged_tokens)) if results: log.debug('Cell column %s: Match %s: %s' % (i, value_parsers[i].__class__.__name__, [c.serialize() for c in results])) # For each result, merge in values from elsewhere for result in results: # Merge each header_compounds[i] for header_compound in header_compounds[i]: if header_compound.is_contextual: result.merge_contextual(header_compound) # Merge footnote compounds for footnote in self.footnotes: if footnote.id in cell.references: for footnote_compound in footnote.records: result.merge_contextual(footnote_compound) if result.is_contextual: # Don't merge cell as a value compound if there are no values contextual_cell_compounds.append(result) else: row_compound.merge(result) # Merge contextual information from cells for contextual_cell_compound in contextual_cell_compounds: row_compound.merge_contextual(contextual_cell_compound) # If no compound name/label, try take from previous row if not row_compound.names and not row_compound.labels and table_records: prev = table_records[-1] row_compound.names = prev.names row_compound.labels = prev.labels # Merge contextual information from caption into the full row for caption_compound in caption_records: if caption_compound.is_contextual: row_compound.merge_contextual(caption_compound) # And also merge from any footnotes that are referenced from the caption for footnote in self.footnotes: if footnote.id in self.caption.references: # print('Footnote records: %s' % [c.to_primitive() for c in footnote.records]) for fn_compound in footnote.records: row_compound.merge_contextual(fn_compound) log.debug(row_compound.serialize()) if row_compound.serialize(): table_records.append(row_compound) # TODO: If no rows have name or label, see if one is in the caption # Include non-contextual caption records in the final output caption_records = [c for c in caption_records if not c.is_contextual] table_records += caption_records return table_records
mcs07/ChemDataExtractor
[ 238, 100, 238, 21, 1475452201 ]
def abbreviation_definitions(self): """Empty list. Abbreviation detection is disabled within table cells.""" return []
mcs07/ChemDataExtractor
[ 238, 100, 238, 21, 1475452201 ]
def dpn92(num_classes=1000): return DPN(num_init_features=64, k_R=96, G=32, k_sec=(3,4,20,3), inc_sec=(16,32,24,128), num_classes=num_classes)
oyam/pytorch-DPNs
[ 91, 30, 91, 1, 1499940247 ]
def dpn131(num_classes=1000): return DPN(num_init_features=128, k_R=160, G=40, k_sec=(4,8,28,3), inc_sec=(16,32,32,128), num_classes=num_classes)
oyam/pytorch-DPNs
[ 91, 30, 91, 1, 1499940247 ]
def __init__(self, in_chs, num_1x1_a, num_3x3_b, num_1x1_c, inc, G, _type='normal'): super(DualPathBlock, self).__init__() self.num_1x1_c = num_1x1_c if _type is 'proj': key_stride = 1 self.has_proj = True if _type is 'down': key_stride = 2 self.has_proj = True if _type is 'normal': key_stride = 1 self.has_proj = False if self.has_proj: self.c1x1_w = self.BN_ReLU_Conv(in_chs=in_chs, out_chs=num_1x1_c+2*inc, kernel_size=1, stride=key_stride) self.layers = nn.Sequential(OrderedDict([ ('c1x1_a', self.BN_ReLU_Conv(in_chs=in_chs, out_chs=num_1x1_a, kernel_size=1, stride=1)), ('c3x3_b', self.BN_ReLU_Conv(in_chs=num_1x1_a, out_chs=num_3x3_b, kernel_size=3, stride=key_stride, padding=1, groups=G)), ('c1x1_c', self.BN_ReLU_Conv(in_chs=num_3x3_b, out_chs=num_1x1_c+inc, kernel_size=1, stride=1)), ]))
oyam/pytorch-DPNs
[ 91, 30, 91, 1, 1499940247 ]
def forward(self, x): data_in = torch.cat(x, dim=1) if isinstance(x, list) else x if self.has_proj: data_o = self.c1x1_w(data_in) data_o1 = data_o[:,:self.num_1x1_c,:,:] data_o2 = data_o[:,self.num_1x1_c:,:,:] else: data_o1 = x[0] data_o2 = x[1] out = self.layers(data_in) summ = data_o1 + out[:,:self.num_1x1_c,:,:] dense = torch.cat([data_o2, out[:,self.num_1x1_c:,:,:]], dim=1) return [summ, dense]
oyam/pytorch-DPNs
[ 91, 30, 91, 1, 1499940247 ]
def __init__(self, num_init_features=64, k_R=96, G=32, k_sec=(3, 4, 20, 3), inc_sec=(16,32,24,128), num_classes=1000): super(DPN, self).__init__() blocks = OrderedDict() # conv1 blocks['conv1'] = nn.Sequential( nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False), nn.BatchNorm2d(num_init_features), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, padding=1), ) # conv2 bw = 256 inc = inc_sec[0] R = int((k_R*bw)/256) blocks['conv2_1'] = DualPathBlock(num_init_features, R, R, bw, inc, G, 'proj') in_chs = bw + 3 * inc for i in range(2, k_sec[0]+1): blocks['conv2_{}'.format(i)] = DualPathBlock(in_chs, R, R, bw, inc, G, 'normal') in_chs += inc # conv3 bw = 512 inc = inc_sec[1] R = int((k_R*bw)/256) blocks['conv3_1'] = DualPathBlock(in_chs, R, R, bw, inc, G, 'down') in_chs = bw + 3 * inc for i in range(2, k_sec[1]+1): blocks['conv3_{}'.format(i)] = DualPathBlock(in_chs, R, R, bw, inc, G, 'normal') in_chs += inc # conv4 bw = 1024 inc = inc_sec[2] R = int((k_R*bw)/256) blocks['conv4_1'] = DualPathBlock(in_chs, R, R, bw, inc, G, 'down') in_chs = bw + 3 * inc for i in range(2, k_sec[2]+1): blocks['conv4_{}'.format(i)] = DualPathBlock(in_chs, R, R, bw, inc, G, 'normal') in_chs += inc # conv5 bw = 2048 inc = inc_sec[3] R = int((k_R*bw)/256) blocks['conv5_1'] = DualPathBlock(in_chs, R, R, bw, inc, G, 'down') in_chs = bw + 3 * inc for i in range(2, k_sec[3]+1): blocks['conv5_{}'.format(i)] = DualPathBlock(in_chs, R, R, bw, inc, G, 'normal') in_chs += inc self.features = nn.Sequential(blocks) self.classifier = nn.Linear(in_chs, num_classes)
oyam/pytorch-DPNs
[ 91, 30, 91, 1, 1499940247 ]
def _find_contpix_given_cuts(f_cut, sig_cut, wl, fluxes, ivars): """ Find and return continuum pixels given the flux and sigma cut Parameters ---------- f_cut: float the upper limit imposed on the quantity (fbar-1) sig_cut: float the upper limit imposed on the quantity (f_sig) wl: numpy ndarray of length npixels rest-frame wavelength vector fluxes: numpy ndarray of shape (nstars, npixels) pixel intensities ivars: numpy ndarray of shape nstars, npixels inverse variances, parallel to fluxes Returns ------- contmask: boolean mask of length npixels True indicates that the pixel is continuum """ f_bar = np.median(fluxes, axis=0) sigma_f = np.var(fluxes, axis=0) bad = np.logical_and(f_bar==0, sigma_f==0) cont1 = np.abs(f_bar-1) <= f_cut cont2 = sigma_f <= sig_cut contmask = np.logical_and(cont1, cont2) contmask[bad] = False return contmask
annayqho/TheCannon
[ 33, 14, 33, 15, 1411721658 ]
def parseArgs(): parser = argparse.ArgumentParser() parser.add_argument("cg1", help="This is the first .CGX (CommonGraph) file") parser.add_argument("cg2", help="This is the second .CGX (CommonGraph) file") parser.add_argument("ds", help="This is the output filename of a .DSX (DiffSet) file") return parser.parse_args()
oderby/VVD
[ 20, 5, 20, 1, 1443293871 ]
def build_list_request( subscription_id: str, resource_group_name: str, resource_name: str, **kwargs: Any
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def build_get_request( subscription_id: str, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def build_update_request( subscription_id: str, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, *, json: JSONType = None, content: Any = None, **kwargs: Any
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def build_delete_request_initial( subscription_id: str, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def list( self, resource_group_name: str, resource_name: str, **kwargs: Any
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def get( self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def update( self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, parameters: "_models.PrivateEndpointConnection", **kwargs: Any
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def _delete_initial( self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def begin_delete( self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {})
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def test_handle_errors(self): l1 = verifier.send_to_retry verifier.handle_errors() l2 = verifier.send_to_retry self.assertGreater(len(l1), len(l2)) self.assertEqual(len(l2), len(l1) + len(run_info['error_list']))
zamattiac/ROSIEBot
[ 1, 3, 1, 5, 1464891428 ]
def test_generate_page_dictionary(self): d1 = v.generate_page_dictionary('wiki/') self.assertGreater(len(d1), 0)
zamattiac/ROSIEBot
[ 1, 3, 1, 5, 1464891428 ]
def __init__(self, msg_center): super(FacialEventTranslator, self).__init__(msg_center) # instance vars self.touching_forehead = False self.last_clench_streak = 0 self.clench_start_time = None self.clench_last_one = False self.blink_in_progress = False self.first_blink = None self.num_of_blinks_in_row = 0 self.blink_timer = None self.last_clench_time = None # Subscribe to events for k in self.msg_keys_handled: self.subscribe(k)
prydom/MuseIC-EventServer
[ 1, 1, 1, 1, 1411803305 ]
def subscribe(self, key): super(FacialEventTranslator, self).subscribe(key)
prydom/MuseIC-EventServer
[ 1, 1, 1, 1, 1411803305 ]
def clenchEndEvent(self, event_time): clench_time = abs( event_time - self.clench_start_time ) print("EVENT: Clench for " + str(clench_time) + " seconds") self.publish('clench_end', clench_time) if clench_time > 1: rounded_int = int(round(clench_time * self.seek_multiplyer)) self.publish('long_clench_rounded_int', rounded_int) if self.last_clench_time and self.last_clench_time < 1 and clench_time < 1: self.publish('quick_clench_two_row', clench_time) self.last_clench_time = False else: self.last_clench_time = clench_time
prydom/MuseIC-EventServer
[ 1, 1, 1, 1, 1411803305 ]
def blinkPeriodEnd(self): self.publish('blink_period_end', None) if self.num_of_blinks_in_row > 1: self.blinksInARowEvent(self.num_of_blinks_in_row) self.num_of_blinks_in_row = 0 self.first_blink = None self.blink_timer = None
prydom/MuseIC-EventServer
[ 1, 1, 1, 1, 1411803305 ]
def setUp(self): self.patcher = mock.patch('luigi_slack.api.SlackAPI') self.mock_SlackAPI = self.patcher.start() self.token = 'dummy-token' self.channels = ['channel1', 'channel2'] self.bot = SlackBot(self.token, channels=self.channels)
bonzanini/luigi-slack
[ 46, 13, 46, 3, 1446892260 ]
def test_send_notification(self): """Test SlackAPI is called by send_notification()""" self.bot.send_notification()
bonzanini/luigi-slack
[ 46, 13, 46, 3, 1446892260 ]
def test_set_handlers_valid(self): """Test set_handlers() for valid events""" bot = SlackBot(self.token, events=[SUCCESS, FAILURE]) bot.set_handlers()
bonzanini/luigi-slack
[ 46, 13, 46, 3, 1446892260 ]
def test_event_label(self): """Test event labels for output""" fixtures = { 'SUCCESS': 'Success', 'FAILURE': 'Failure', 'MISSING': 'Missing', } for event, expected in fixtures.items(): self.assertEqual(event_label(event), expected)
bonzanini/luigi-slack
[ 46, 13, 46, 3, 1446892260 ]
def setUp(self): self.patcher = mock.patch('luigi_slack.api.SlackAPI') self.mock_SlackAPI = self.patcher.start() self.token = 'dummy-token' self.channels = ['channel1']
bonzanini/luigi-slack
[ 46, 13, 46, 3, 1446892260 ]
def test_success(self): """Test successful task if queued""" bot = SlackBot(self.token, events=[SUCCESS], channels=self.channels) bot.set_handlers() task = luigi.Task() self.assertEqual(len(bot.event_queue.get(SUCCESS, [])), 0) task.trigger_event(luigi.event.Event.SUCCESS, task) self.assertEqual(len(bot.event_queue.get(SUCCESS)), 1)
bonzanini/luigi-slack
[ 46, 13, 46, 3, 1446892260 ]
def test_different_task_doesnt_empty_queue(self): """Test a successful task doesn't empty queue with different task""" class CustomTask(luigi.Task): pass bot = SlackBot(self.token, events=[SUCCESS, FAILURE], channels=self.channels) bot.set_handlers() task1 = luigi.Task() # task1 and task2 have different task_id task2 = CustomTask() self.assertEqual(len(bot.event_queue.get(FAILURE, [])), 0) task2.trigger_event(luigi.event.Event.FAILURE, task2, Exception()) self.assertEqual(len(bot.event_queue.get(FAILURE)), 1) task1.trigger_event(luigi.event.Event.SUCCESS, task1) self.assertEqual(len(bot.event_queue.get(FAILURE)), 1)
bonzanini/luigi-slack
[ 46, 13, 46, 3, 1446892260 ]
def test_failure(self): """Test failure event adds task in queue""" bot = SlackBot(self.token, events=[FAILURE], channels=self.channels) bot.set_handlers() task = luigi.Task() self.assertEqual(len(bot.event_queue.get(FAILURE, [])), 0) task.trigger_event(luigi.event.Event.FAILURE, task, Exception()) self.assertEqual(len(bot.event_queue.get(FAILURE)), 1)
bonzanini/luigi-slack
[ 46, 13, 46, 3, 1446892260 ]
def test_event_not_implemented(self): """Test processing time event is not implemented yet""" bot = SlackBot(self.token, events=[PROCESSING_TIME], channels=self.channels) bot.set_handlers() task = luigi.Task() self.assertRaises(NotImplementedError, task.trigger_event(luigi.event.Event.PROCESSING_TIME, task))
bonzanini/luigi-slack
[ 46, 13, 46, 3, 1446892260 ]
def set_handlers(self): return True
bonzanini/luigi-slack
[ 46, 13, 46, 3, 1446892260 ]
def test(): """ A simple test routine to draw the quadcopter model """
guiccbr/autonomous-fuzzy-quadcopter
[ 10, 8, 10, 5, 1426022747 ]
def __init__( self, plotly_name="showticklabels", parent_name="sunburst.marker.colorbar", **kwargs
plotly/python-api
[ 13052, 2308, 13052, 1319, 1385013188 ]
def __init__( self, plotly_name="typesrc", parent_name="scatterternary.marker.gradient", **kwargs
plotly/plotly.py
[ 13052, 2308, 13052, 1319, 1385013188 ]
def set_override_tba_test(monkeypatch: MonkeyPatch) -> None: monkeypatch.setenv("TBA_UNIT_TEST", "false")
the-blue-alliance/the-blue-alliance
[ 334, 153, 334, 422, 1283632451 ]
def set_dev(monkeypatch: MonkeyPatch) -> None: monkeypatch.setenv("GAE_ENV", "localdev")
the-blue-alliance/the-blue-alliance
[ 334, 153, 334, 422, 1283632451 ]
def set_prod(monkeypatch: MonkeyPatch) -> None: monkeypatch.setenv("GAE_ENV", "standard")
the-blue-alliance/the-blue-alliance
[ 334, 153, 334, 422, 1283632451 ]
def set_project(monkeypatch: MonkeyPatch) -> None: monkeypatch.setenv("GOOGLE_CLOUD_PROJECT", "tbatv-prod-hrd")
the-blue-alliance/the-blue-alliance
[ 334, 153, 334, 422, 1283632451 ]
def set_storage_mode_remote(monkeypatch: MonkeyPatch) -> None: monkeypatch.setenv("STORAGE_MODE", "remote")
the-blue-alliance/the-blue-alliance
[ 334, 153, 334, 422, 1283632451 ]
def set_storage_path(monkeypatch: MonkeyPatch) -> None: monkeypatch.setenv("STORAGE_PATH", "some/fake/path")
the-blue-alliance/the-blue-alliance
[ 334, 153, 334, 422, 1283632451 ]
def test_client_for_env_unit_test_remote(set_storage_mode_remote): client = storage._client_for_env() assert type(client) is InMemoryClient
the-blue-alliance/the-blue-alliance
[ 334, 153, 334, 422, 1283632451 ]
def test_client_for_env_dev_path(set_override_tba_test, set_dev, set_storage_path): client = storage._client_for_env() assert type(client) is LocalStorageClient assert client.base_path == Path("some/fake/path")
the-blue-alliance/the-blue-alliance
[ 334, 153, 334, 422, 1283632451 ]
def test_client_for_env_dev_remote( set_override_tba_test, set_dev, set_storage_mode_remote, set_project
the-blue-alliance/the-blue-alliance
[ 334, 153, 334, 422, 1283632451 ]
def test_client_for_env_production_no_project(set_override_tba_test, set_prod): with pytest.raises( Exception, match=re.escape( "Environment.project (GOOGLE_CLOUD_PROJECT) unset - should be set in production." ), ): storage._client_for_env()
the-blue-alliance/the-blue-alliance
[ 334, 153, 334, 422, 1283632451 ]
def test_write(): file_name = "some_file.json" content = "some_content" client = Mock() with patch.object(storage, "_client_for_env", return_value=client): storage.write(file_name, content) client.write.assert_called_with(file_name, content)
the-blue-alliance/the-blue-alliance
[ 334, 153, 334, 422, 1283632451 ]
def extra_downloader_converter(value): """Parses extra_{downloader,converter} arguments. Parameters ---------- value : iterable or str If the value is a string, it is split into a list using spaces as delimiters. Otherwise, it is returned as is. """ if isinstance(value, six.string_types): value = value.split(" ") return value
mila-udem/fuel
[ 851, 262, 851, 90, 1423773764 ]
def __init__(self): self.config = {}
mila-udem/fuel
[ 851, 262, 851, 90, 1423773764 ]
def __getattr__(self, key): if key == 'config' or key not in self.config: raise AttributeError config_setting = self.config[key] if 'value' in config_setting: value = config_setting['value'] elif ('env_var' in config_setting and config_setting['env_var'] in os.environ): value = os.environ[config_setting['env_var']] elif 'yaml' in config_setting: value = config_setting['yaml'] elif 'default' in config_setting: value = config_setting['default'] else: raise ConfigurationError("Configuration not set and no default " "provided: {}.".format(key)) return config_setting['type'](value)
mila-udem/fuel
[ 851, 262, 851, 90, 1423773764 ]
def add_config(self, key, type_, default=NOT_SET, env_var=None): """Add a configuration setting. Parameters ---------- key : str The name of the configuration setting. This must be a valid Python attribute name i.e. alphanumeric with underscores. type : function A function such as ``float``, ``int`` or ``str`` which takes the configuration value and returns an object of the correct type. Note that the values retrieved from environment variables are always strings, while those retrieved from the YAML file might already be parsed. Hence, the function provided here must accept both types of input. default : object, optional The default configuration to return if not set. By default none is set and an error is raised instead. env_var : str, optional The environment variable name that holds this configuration value. If not given, this configuration can only be set in the YAML configuration file. """ self.config[key] = {'type': type_} if env_var is not None: self.config[key]['env_var'] = env_var if default is not NOT_SET: self.config[key]['default'] = default
mila-udem/fuel
[ 851, 262, 851, 90, 1423773764 ]
def make_cookie_file(user_referrer): cookie_file = '{}:{}'.format(user_referrer.campaign.key, user_referrer.key) return cookie_file
moneypark/whydjango
[ 1, 7, 1, 48, 1507605161 ]
def setUp(self): campaign = CampaignFactory( bonus_policy={ 'click': 1, 'registration': 6, } ) self.user_referrer = UserReferrerFactory( campaign=campaign ) self.user = UserFactory()
moneypark/whydjango
[ 1, 7, 1, 48, 1507605161 ]
def test_unreferred_user_registered(self): request = HttpRequest() models.associate_registered_user_with_referral("", user=self.user, request=request) # new user is in the database user_in_db = User.objects.get(username=self.user.username) self.assertEqual(user_in_db.username, self.user.username)
moneypark/whydjango
[ 1, 7, 1, 48, 1507605161 ]
def test_login(self): leaf = Leaf(USERNAME, PASSWORD) assert leaf.VIN == "vin123" assert leaf.custom_sessionid == "csessid"
nricklin/leafpy
[ 16, 7, 16, 53, 1493055618 ]
def test_exeption_raised_when_bad_credentials_passed(self): with self.assertRaises(Exception) as w: leaf = Leaf('[email protected]','invalidpassword')
nricklin/leafpy
[ 16, 7, 16, 53, 1493055618 ]
def test_exception_raised_when_bad_vin_and_customsessionid_used(self): leaf = Leaf(VIN='vin345',custom_sessionid='csid123') with self.assertRaises(Exception) as w: leaf.BatteryStatusRecordsRequest()
nricklin/leafpy
[ 16, 7, 16, 53, 1493055618 ]
def test_login_with_only_VIN_raises_exception(self): with self.assertRaises(Exception): leaf = Leaf(VIN='vin123')
nricklin/leafpy
[ 16, 7, 16, 53, 1493055618 ]
def test_login_with_no_args_raises_exception(self): with self.assertRaises(Exception): leaf = Leaf()
nricklin/leafpy
[ 16, 7, 16, 53, 1493055618 ]
def read_region(config, *args, **kwargs): """Snip-out target regions from nc4 file Quick and dirty hax to reduce the size of data read in from netCDF files. Keeps a memory leak in the module from blowing up the script. Not the best way to handle this. Parameters ---------- config : dict Run configuration dictionary. Used to parse out target regions. *args : Passed on to read(). **kwargs : Passed on to read(). Returns ------- years : array-like regions : array-like data : array-like """ years, regions, data = read(*args, **kwargs) if configs.is_allregions(config): regions_msk = np.ones(regions.shape, dtype='bool') else: target_regions = configs.get_regions(config, regions) regions_msk = np.isin(regions, target_regions) return years, regions[regions_msk], data[..., regions_msk]
jrising/prospectus-tools
[ 2, 7, 2, 2, 1448042602 ]
def iterate_regions(filepath, column, config={}): global deltamethod_vcv do_deltamethod = False if configs.is_parallel_deltamethod(config) else config.get('deltamethod', None) if column is not None or 'costs' not in filepath: years, regions, data = read_region(config, filepath, column if column is not None else 'rebased', do_deltamethod) else: years, regions, data1 = read_region(config, filepath, 'costs_lb', do_deltamethod) years, regions, data2 = read_region(config, filepath, 'costs_ub', do_deltamethod) data = data2 / 1e5 if deltamethod_vcv is not None and not config.get('deltamethod', False): ## Inferred that these were deltamethod files config['deltamethod'] = True if config.get('multiimpact_vcv', None) is not None and deltamethod_vcv is not None: assert isinstance(config['multiimpact_vcv'], np.ndarray) # Extend data to conform to multiimpact_vcv foundindex = None for ii in range(config['multiimpact_vcv'].shape[0] - deltamethod_vcv.shape[0] + 1): if np.allclose(deltamethod_vcv, config['multiimpact_vcv'][ii:(ii+deltamethod_vcv.shape[0]), ii:(ii+deltamethod_vcv.shape[1])]): foundindex = ii break if foundindex is None: print np.sum(np.abs(deltamethod_vcv - config['multiimpact_vcv'][:deltamethod_vcv.shape[0], :deltamethod_vcv.shape[1]])) print np.sum(np.abs(deltamethod_vcv - config['multiimpact_vcv'][deltamethod_vcv.shape[0]:, deltamethod_vcv.shape[1]:])) assert foundindex is not None, "Cannot find the VCV for " + filepath + " within the master VCV." newdata = np.zeros(tuple([config['multiimpact_vcv'].shape[0]] + list(data.shape[1:]))) if len(data.shape) == 2: newdata[foundindex:(foundindex + deltamethod_vcv.shape[0]),:] = data else: newdata[foundindex:(foundindex + deltamethod_vcv.shape[0]),:,:] = data data = newdata deltamethod_vcv = None # reset for next file config['regionorder'] = list(regions) if configs.is_allregions(config): yield 'all', years, data return regions = list(regions) for region in configs.get_regions(config, regions): ii = regions.index(region) if config.get('deltamethod', False) and not configs.is_parallel_deltamethod(config): yield regions[ii], years, data[:, :, ii] else: yield regions[ii], years, data[:, ii]
jrising/prospectus-tools
[ 2, 7, 2, 2, 1448042602 ]
def main(): """Main function""" img = Image.open("test.jpeg") # Print the image's EXIF metadata dictionary indexed by EXIF numeric tags exif_data_num_dict = img._getexif() print exif_data_num_dict # Print the image's EXIF metadata dictionary indexed by EXIF tag name strings exif_data_str_dict = {PIL.ExifTags.TAGS[k]: v for k, v in exif_data_num_dict.items() if k in PIL.ExifTags.TAGS} print exif_data_str_dict
jeremiedecock/snippets
[ 20, 6, 20, 1, 1433499549 ]
def __init__(self, directory_id, private_key, url=LAUNCHKEY_PRODUCTION, testing=False, transport=None): """ :param directory_id: UUID for the requesting directory :param private_key: PEM formatted private key string :param url: URL for the LaunchKey API :param testing: Boolean stating whether testing mode is being used. This will determine whether SSL validation occurs. :param: transport: Instantiated transport object. The default and currently only supported transport is launchkey.transports.JOSETransport. If you wish to set encryption or hashing algorithms, this is where you would do it. IE: JOSETransport(jwt_algorithm="RS512", jwe_cek_encryption="RSA-OAEP", jwe_claims_encryption="A256CBC-HS512", content_hash_algorithm="S256") """ super(DirectoryFactory, self).__init__('dir', directory_id, private_key, url, testing, transport)
LaunchKey/launchkey-python
[ 20, 7, 20, 5, 1372713315 ]
def _genCommentHeader(comment): lines = [] lines.append('/*********************************************************************************************************************') lines.append('* %s'%comment) lines.append('*********************************************************************************************************************/') return lines
cogu/autosar
[ 233, 130, 233, 19, 1469005526 ]
def _genCommentHeader2(comment): """ Same as _genCommentHeader but returns a C sequence instead of raw strings """ code = C.sequence() code.append(C.line('/*********************************************************************************************************************')) code.append(C.line('* %s'%comment)) code.append(C.line('*********************************************************************************************************************/')) return code
cogu/autosar
[ 233, 130, 233, 19, 1469005526 ]
def __init__(self, partition, useDefaultTypes=True): self.partition = partition self.defaultTypes = {} if useDefaultTypes: self._initDefaultType()
cogu/autosar
[ 233, 130, 233, 19, 1469005526 ]
def generate(self, dest_dir = '.', file_name='Rte_Type.h'): """ Generates Rte_Type.h Note: The last argument has been deprecated and is no longer in use """ if self.partition.isFinalized == False: self.partition.finalize() file_path = os.path.join(dest_dir, file_name) with io.open(file_path, 'w', newline='\n') as fp: hfile=C.hfile(file_name) hfile.code.extend([C.line(x) for x in _genCommentHeader('Includes')]) hfile.code.append(C.include("Std_Types.h")) hfile.code.append(C.blank()) (basicTypes,complexTypes,modeTypes) = self.partition.types.getTypes() hfile.code.extend([C.line(x) for x in _genCommentHeader('Data Type Definitions')]) hfile.code.append(C.blank()) ws = self.partition.ws unusedDefaultTypes = self._findUnusedDefaultTypes(ws, basicTypes)
cogu/autosar
[ 233, 130, 233, 19, 1469005526 ]
def _initDefaultType(self): self.defaultTypes['Boolean']=C.sequence().extend([C.statement(C.typedef('boolean', 'Boolean'))]) self.defaultTypes['UInt8']=C.sequence().extend([C.statement(C.typedef('uint8', 'UInt8')), C.define('UInt8_LowerLimit', '((UInt8)0u)'), C.define('UInt8_UpperLimit', '((UInt8)255u)')]) self.defaultTypes['UInt16']=C.sequence().extend([C.statement(C.typedef('uint16', 'UInt16')), C.define('UInt16_LowerLimit', '((UInt16)0u)'), C.define('UInt16_UpperLimit', '((UInt16)65535u)')]) self.defaultTypes['UInt32']=C.sequence().extend([C.statement(C.typedef('uint32', 'UInt32')), C.define('UInt32_LowerLimit', '((UInt32)0u)'), C.define('UInt32_UpperLimit', '((UInt32)4294967295u)')]) self.defaultTypes['SInt8']=C.sequence().extend([C.statement(C.typedef('sint8', 'SInt8')), C.define('SInt8_LowerLimit', '((SInt8)-128)'), C.define('SInt8_UpperLimit', '((SInt8)127)')]) self.defaultTypes['SInt16']=C.sequence().extend([C.statement(C.typedef('sint16', 'SInt16')), C.define('SInt16_LowerLimit', '((SInt16)-32768)'), C.define('SInt16_UpperLimit', '((SInt16)32767)')]) self.defaultTypes['SInt32']=C.sequence().extend([C.statement(C.typedef('sint32', 'SInt32')), C.define('SInt32_LowerLimit', '((SInt32)-2147483648)'), C.define('SInt32_UpperLimit', '((SInt32)2147483647)')])
cogu/autosar
[ 233, 130, 233, 19, 1469005526 ]
def _findUnusedDefaultTypes(self, ws, typerefs): defaultTypeNames = set(self.defaultTypes.keys()) usedTypeNames = set()
cogu/autosar
[ 233, 130, 233, 19, 1469005526 ]
def _typename(bitcount,minval): if bitcount <=8: return 'uint8' if minval >= 0 else 'sint8' elif bitcount <=16: return 'uint16' if minval >= 0 else 'sint16' elif bitcount <=32: return 'uint32' if minval >= 0 else 'sint32' elif bitcount <=64: return 'uint64' if minval >= 0 else 'sint64' else: raise ValueError(bitcount)
cogu/autosar
[ 233, 130, 233, 19, 1469005526 ]