ext
stringclasses
9 values
sha
stringlengths
40
40
content
stringlengths
3
1.04M
py
1a45a0e152dded180258925cfae5b28177f86fc1
import sys from typing import List def reverse_args(av: List[str]) -> None: if len(av) == 1: return without_program_name = av[1:len(av)] joined = ' '.join(without_program_name) reversed = joined[::-1] swap_Aa_aA = reversed.swapcase() print(swap_Aa_aA) return if __name__ == "__main__": reverse_args(sys.argv)
py
1a45a2ca143d2bf3f3d9b5c7bfe10cfb56d3c341
""" example showing how to plot data from a DEM file and an ESRI shape file using gdal (http://pypi.python.org/pypi/GDAL). """ from osgeo import gdal, ogr from mpl_toolkits.basemap import Basemap, cm import numpy as np import matplotlib.pyplot as plt from numpy import ma # read 2.5 minute U.S. DEM file using gdal. # (http://www.prism.oregonstate.edu/docs/meta/dem_25m.htm) gd = gdal.Open('us_25m.dem') array = gd.ReadAsArray() # get lat/lon coordinates from DEM file. coords = gd.GetGeoTransform() nlons = array.shape[1]; nlats = array.shape[0] delon = coords[1] delat = coords[5] lons = coords[0] + delon*np.arange(nlons) lats = coords[3] + delat*np.arange(nlats)[::-1] # reverse lats # setup figure. fig = plt.figure(figsize=(11,6)) # setup basemap instance. m = Basemap(llcrnrlon=-119,llcrnrlat=22,urcrnrlon=-64,urcrnrlat=49, projection='lcc',lat_1=33,lat_2=45,lon_0=-95) # create masked array, reversing data in latitude direction # (so that data is oriented in increasing latitude, as transform_scalar requires). topoin = ma.masked_values(array[::-1,:],-999.) # transform DEM data to a 4 km native projection grid nx = int((m.xmax-m.xmin)/4000.)+1; ny = int((m.ymax-m.ymin)/4000.)+1 topodat = m.transform_scalar(topoin,lons,lats,nx,ny,masked=True) # plot DEM image on map. im = m.imshow(topodat,cmap=cm.GMT_haxby_r) # draw meridians and parallels. m.drawparallels(np.arange(20,71,10),labels=[1,0,0,0]) m.drawmeridians(np.arange(-120,-40,10),labels=[0,0,0,1]) # plot state boundaries from shapefile using ogr. g = ogr.Open ("st99_d00.shp") L = g.GetLayer(0) # data is in 1st layer. for feat in L: # iterate over features in layer geo = feat.GetGeometryRef() # iterate over geometries. for count in range(geo.GetGeometryCount()): geom = geo.GetGeometryRef(count) if not geom.GetGeometryCount(): # just one geometry. # get lon,lat points lons = [geom.GetX(i) for i in range(geom.GetPointCount())] lats = [geom.GetY(i) for i in range(geom.GetPointCount())] # convert to map projection coords. x, y = m(lons,lats) # plot on map. m.plot(x,y,'k') else: # iterate over nested geometries. for cnt in range( geom.GetGeometryCount()): g = geom.GetGeometryRef( cnt ) lons = [g.GetX(i) for i in range(g.GetPointCount())] lats = [g.GetY(i) for i in range(g.GetPointCount())] x, y = m(lons,lats) m.plot(x,y,'k') # draw colorbar. m.colorbar(im) plt.title(gd.GetDescription()+' with state boundaries from '+g.GetName(),y=1.05) plt.show()
py
1a45a3b36bf6daf42cb68dee09f7fccbb0189f36
#!/usr/bin/python import povray_file_making povray_file_making.povray_making_movie(1)
py
1a45a3ba4032de2780c3515429fc694b96c9a761
frase = str(input('Digite uma frase qualquer: ').upper().strip()) #Frase palavras = frase.split() junto = ''.join(palavras) inverso ='' for letra in range(len(junto) - 1, -1, -1): inverso += junto[letra] print(junto[letra], end='')
py
1a45a463a470c5d844e5808993a02a4a32c712ae
from __future__ import division from time import sleep import httplib2 import json h = httplib2.Http() url = raw_input("Please enter the uri you want to access, \n If left blank the connection will be set to 'http://localhost:5000/rate-limited': ") if url == '': url = 'http://localhost:5000/rate-limited' req_per_minute = float(raw_input("Please specify the number of requests per minute: ") ) interval = (60.0 / req_per_minute) def SendRequests(url, req_per_minute): requests = 0 while requests < req_per_minute: result = json.loads(h.request(url,'GET')[1]) #result = h.request(url,'GET')[1] #print result if result.get('error') is not None: print "Error #%s : %s" %(result.get('error'), result.get('data')) print "Hit rate limit. Waiting 5 seconds and trying again..." sleep(5) SendRequests(url, req_per_minute) else: print "Number of Requests: ", requests+1 print result.get('response') requests = requests + 1 sleep(interval) print "Sending Requests..." SendRequests(url, req_per_minute)
py
1a45a4a5c38db50935abdc2efce7adc02bc4ca5c
from django.test import TestCase from django.urls import reverse class DjangoTracerTests(TestCase): def test_request_exists(self): url = reverse('request-test') response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'VIEW') # Pull out our request from headers header = response.get('X-Request-ID', None) # Make sure it exists self.assertIsNotNone(header) # Make sure it's 36 characters long (uuid4) self.assertEqual(len(header), 36) # Since it's a UUID it should have 4 hyphens in it's string # representation self.assertEqual(header.count('-'), 4)
py
1a45a6db0ecfb9684c54b53215501f1a93caa714
from lgsf.councillors.scrapers import ModGovCouncillorScraper class Scraper(ModGovCouncillorScraper): base_url = "http://meetings.derrycityandstrabanedistrict.com"
py
1a45a82fbfc6e2386bfc7bf67bf522f66fbd8d62
# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.tests import base from vmware_nsx.plugins.nsx_v.vshield import vcns_driver from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common EDGE_ID = 'edge-x' POOL_ID = 'b3dfb476-6fdf-4ddd-b6bd-e86ae78dc30b' def firewall_section_maker(if_ip_list, vip_ip_list): return ( '<section id="1132" name="LBaaS FW Rules"><rule><name>' + POOL_ID + '</name><action>allow</action><sources excluded="false"><source>' '<type>Ipv4Address</type><value>' + ','.join(if_ip_list) + '</value></source></sources><destinations excluded="false">' '<destination><type>Ipv4Address</type><value>' + ','.join(vip_ip_list) + '</value></destination></destinations></rule>' '</section>') def if_maker(ip_list): intf = { 'index': 1, 'name': 'internal1', 'addressGroups': { 'addressGroups': [ {'subnetPrefixLength': '24', 'secondaryAddresses': { 'ipAddress': ip_list, 'type': 'secondary_addresses'}, 'primaryAddress': '10.0.0.1', 'subnetMask': '255.255.255.0'}]}, 'portgroupName': 'pg1234', 'label': 'vNic_1', 'type': 'internal', 'portgroupId': 'virtualwire-31'} return intf def if_list_maker(ip_list): if_list = { 'vnics': [ {'index': 0, 'name': 'external', 'addressGroups': { 'addressGroups': [ {'subnetMask': '255.255.255.0', 'primaryAddress': '172.24.4.2', 'subnetPrefixLength': '24'}]}, 'portgroupName': 'VM Network', 'label': 'vNic_0', 'type': 'uplink', 'portgroupId': 'network-13'}, {'index': 1, 'name': 'internal1', 'addressGroups': { 'addressGroups': [ {'subnetPrefixLength': '24', 'secondaryAddresses': { 'ipAddress': ip_list, 'type': 'secondary_addresses'}, 'primaryAddress': '10.0.0.1', 'subnetMask': '255.255.255.0'}]}, 'portgroupName': 'pg1234', 'label': 'vNic_1', 'type': 'internal', 'portgroupId': 'virtualwire-31'}, {'index': 2, 'name': 'vnic2', 'addressGroups': {'addressGroups': []}, 'label': 'vNic_2', 'type': 'internal'}, {'index': 3, 'name': 'vnic3', 'addressGroups': {'addressGroups': []}, 'label': 'vNic_3', 'type': 'internal'}]} return if_list class TestLbaasCommon(base.BaseTestCase): def setUp(self): super(TestLbaasCommon, self).setUp() callbacks = mock.Mock() callbacks.plugin = mock.Mock() self.edge_driver = vcns_driver.VcnsDriver(callbacks) self.edge_driver._lb_driver_prop = mock.Mock() def _mock_edge_driver_vcns(self, attr): return mock.patch.object(self.edge_driver.vcns, attr) def test_add_vip_as_secondary_ip(self): update_if = if_maker(['10.0.0.6', '10.0.0.8']) with self._mock_edge_driver_vcns('get_interfaces') as mock_get_if,\ self._mock_edge_driver_vcns( 'update_interface') as mock_update_if: mock_get_if.return_value = (None, if_list_maker(['10.0.0.6'])) lb_common.add_vip_as_secondary_ip( self.edge_driver.vcns, EDGE_ID, '10.0.0.8') mock_update_if.assert_called_with(EDGE_ID, update_if) def test_del_vip_as_secondary_ip(self): update_if = if_maker(['10.0.0.6']) with self._mock_edge_driver_vcns('get_interfaces') as mock_get_if,\ self._mock_edge_driver_vcns( 'update_interface') as mock_update_if: mock_get_if.return_value = (None, if_list_maker(['10.0.0.6', '10.0.0.8'])) lb_common.del_vip_as_secondary_ip( self.edge_driver.vcns, EDGE_ID, '10.0.0.8') mock_update_if.assert_called_with(EDGE_ID, update_if) def test_get_edge_ip_addresses(self): get_if_list = if_list_maker(['10.0.0.6']) with mock.patch.object(self.edge_driver.vcns, 'get_interfaces', return_value=(None, get_if_list)): ip_list = lb_common.get_edge_ip_addresses(self.edge_driver.vcns, EDGE_ID) self.assertEqual(['172.24.4.2', '10.0.0.1'], ip_list)
py
1a45a8f08f49a3223a954cad267f81fed2bd5ae0
#!/usr/bin/env python3 # Copyright 2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import logging import os import sys import time import random import hashlib import avalon_enclave_manager.sgx_work_order_request as work_order_request import avalon_enclave_manager.avalon_enclave_helper as enclave_helper import avalon_crypto_utils.signature as signature import avalon_crypto_utils.crypto_utility as crypto_utils from database import connector from error_code.error_status import ReceiptCreateStatus, WorkOrderStatus from avalon_sdk.worker.worker_details import WorkerStatus, WorkerType from avalon_sdk.work_order_receipt.work_order_receipt \ import WorkOrderReceiptRequest logger = logging.getLogger(__name__) # representation of the enclave data enclave_data = None # XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX class EnclaveManager: """ Wrapper for managing Worker data """ def __init__(self, config, signup_data, measurements): self.config = config self.enclave_data = signup_data self.sealed_data = signup_data.sealed_data self.verifying_key = signup_data.verifying_key self.encryption_key = signup_data.encryption_key # TODO: EncryptionKeyNonce and EncryptionKeySignature are hardcoded # to dummy values. # Need to come up with a scheme to generate both for every unique # encryption key. self.encryption_key_nonce = "" self.encryption_key_signature = "" self.enclave_id = signup_data.enclave_id self.extended_measurements = measurements # ProofDataType is one of TEE prefixed type # TODO: Read ProofDataType from config file self.proof_data_type = config.get("WorkerConfig")["ProofDataType"] self.proof_data = signup_data.proof_data # Key pair for work order receipt signing # This is temporary approach self.private_key = crypto_utils.generate_signing_keys() self.public_key = self.private_key.GetPublicKey().Serialize() def manager_on_boot(self, kv_helper): """ Executes Boot flow of enclave manager """ logger.info("Executing boot time procedure") # Cleanup "workers" table workers_list = kv_helper.lookup("workers") if len(workers_list) == 0: logger.info("No worker entries available in workers table; " + "skipping cleanup") else: logger.info("Clearing entries in workers table") for worker in workers_list: kv_helper.remove("workers", worker) worker_info = create_json_worker(self, self.config) logger.info("Adding enclave workers to workers table") worker_id = crypto_utils.strip_begin_end_public_key(self.enclave_id) \ .encode("UTF-8") # Calculate sha256 of worker id to get 32 bytes. The TC spec proxy # model contracts expect byte32. Then take a hexdigest for hex str. worker_id = hashlib.sha256(worker_id).hexdigest() kv_helper.set("workers", worker_id, worker_info) # Cleanup wo-processing" table processing_list = kv_helper.lookup("wo-processing") if len(processing_list) == 0: logger.info("No workorder entries found in " + "wo-processing table, skipping Cleanup") return for wo in processing_list: logger.info("Validating workorders in wo-processing table") wo_json_resp = kv_helper.get("wo-responses", wo) wo_processed = kv_helper.get("wo-processed", wo) if wo_json_resp is not None: try: wo_resp = json.loads(wo_json_resp) except ValueError as e: logger.error( "Invalid JSON format found for the response for " + "workorder %s - %s", wo, e) if wo_processed is None: kv_helper.set("wo-processed", wo, WorkOrderStatus.FAILED.name) kv_helper.remove("wo-processing", wo) continue if "Response" in wo_resp and \ wo_resp["Response"]["Status"] == \ WorkOrderStatus.FAILED: if wo_processed is None: kv_helper.set("wo-processed", wo, WorkOrderStatus.FAILED.name) logger.error("Work order processing failed; " + "removing it from wo-processing table") kv_helper.remove("wo-processing", wo) continue wo_receipt = kv_helper.get("wo-receipts", wo) if wo_receipt: # update receipt logger.info("Updating receipt in boot flow") self.__update_receipt(kv_helper, wo, wo_json_resp) logger.info("Receipt updated for workorder %s during boot", wo) if wo_processed is None: kv_helper.set("wo-processed", wo, WorkOrderStatus.SUCCESS.name) else: logger.info("No response found for the workorder %s; " + "hence placing the workorder request " + "back in wo-scheduled", wo) kv_helper.set("wo-scheduled", wo, WorkOrderStatus.SCHEDULED.name) logger.info( "Finally deleting workorder %s from wo-processing table", wo) kv_helper.remove("wo-processing", wo) # End of for-loop # ----------------------------------------------------------------- def process_work_orders(self, kv_helper): """ Executes Run time flow of enclave manager """ logger.info("Processing work orders") try: # Get all workorders requests from KV storage lookup and process list_of_workorders = kv_helper.lookup("wo-scheduled") if not list_of_workorders: logger.info("Received empty list of work orders from " + "wo-scheduled table") return except Exception as e: logger.error("Problem while getting keys from wo-scheduled table") return for wo_id in list_of_workorders: try: kv_helper.set("wo-processing", wo_id, WorkOrderStatus.PROCESSING.name) # Get JSON workorder request corresponding to wo_id wo_json_req = kv_helper.get("wo-requests", wo_id) if wo_json_req is None: logger.error("Received empty work order corresponding " + "to id %s from wo-requests table", wo_id) kv_helper.remove("wo-processing", wo_id) return except Exception as e: logger.error("Problem while reading the work order %s" "from wo-requests table", wo_id) kv_helper.remove("wo-processing", wo_id) return logger.info("Create workorder entry %s in wo-processing table", wo_id) kv_helper.set("wo-processing", wo_id, WorkOrderStatus.PROCESSING.name) logger.info("Delete workorder entry %s from wo-scheduled table", wo_id) kv_helper.remove("wo-scheduled", wo_id) logger.info("Validating JSON workorder request %s", wo_id) validation_status = validate_request(wo_json_req) if not validation_status: logger.error( "JSON validation for Workorder %s failed; " + "handling Failure scenarios", wo_id) wo_response["Response"]["Status"] = WorkOrderStatus.FAILED wo_response["Response"]["Message"] = \ "Workorder JSON request is invalid" kv_helper.set("wo-responses", wo_id, json.dumps(wo_response)) kv_helper.set("wo-processed", wo_id, WorkOrderStatus.FAILED.name) kv_helper.remove("wo-processing", wo_id) return # Execute work order request logger.info("Execute workorder with id %s", wo_id) wo_json_resp = execute_work_order(self.enclave_data, wo_json_req) wo_resp = json.loads(wo_json_resp) logger.info("Update workorder receipt for workorder %s", wo_id) receipt = self.__update_receipt(kv_helper, wo_id, wo_resp) if "Response" in wo_resp and \ wo_resp["Response"]["Status"] == WorkOrderStatus.FAILED: logger.error("error in Response") kv_helper.set("wo-processed", wo_id, WorkOrderStatus.FAILED.name) kv_helper.set("wo-responses", wo_id, wo_json_resp) kv_helper.remove("wo-processing", wo_id) return logger.info("Mark workorder status for workorder id %s " + "as Completed in wo-processed", wo_id) kv_helper.set("wo-processed", wo_id, WorkOrderStatus.SUCCESS.name) logger.info("Create entry in wo-responses table for workorder %s", wo_id) kv_helper.set("wo-responses", wo_id, wo_json_resp) logger.info("Delete workorder entry %s from wo-processing table", wo_id) kv_helper.remove("wo-processing", wo_id) # end of for loop # ----------------------------------------------------------------- def __update_receipt(self, kv_helper, wo_id, wo_json_resp): """ Update the existing work order receipt with the status as in wo_json_ Parameters: - kv_helper is lmdb instance to access database - wo_id is work order id of request for which receipt is to be created. - wo_json_resp is json rpc response of the work order execution. status of the work order receipt and updater signature update in the receipt. """ receipt_entry = kv_helper.get("wo-receipts", wo_id) if receipt_entry: update_type = None if "error" in wo_json_resp and \ wo_json_resp["error"]["code"] != \ WorkOrderStatus.PENDING.value: update_type = ReceiptCreateStatus.FAILED.value else: update_type = ReceiptCreateStatus.PROCESSED.value receipt_obj = WorkOrderReceiptRequest() wo_receipt = receipt_obj.update_receipt( wo_id, update_type, wo_json_resp, self.private_key ) updated_receipt = None # load previous updates to receipt updates_to_receipt = kv_helper.get("wo-receipt-updates", wo_id) # If it is first update to receipt if updates_to_receipt is None: updated_receipt = [] else: updated_receipt = json.loads(updates_to_receipt) # Get the last update to receipt last_receipt = updated_receipt[len(updated_receipt) - 1] # If receipt updateType is completed, # then no further update allowed if last_receipt["updateType"] == \ ReceiptCreateStatus.COMPLETED.value: logger.info( "Receipt for the workorder id %s is completed " + "and no further updates are allowed", wo_id) return updated_receipt.append(wo_receipt) # Since receipts_json is jrpc request updating only params object. kv_helper.set("wo-receipt-updates", wo_id, json.dumps( updated_receipt)) logger.info("Receipt for the workorder id %s is updated to %s", wo_id, wo_receipt) else: logger.info("Work order receipt is not created, " + "so skipping the update") # ----------------------------------------------------------------- def create_enclave_signup_data(): """ Create enclave signup data """ try: enclave_signup_data = \ enclave_helper.EnclaveHelper.create_enclave_signup_data() except Exception as e: logger.error("failed to create enclave signup data; %s", str(e)) sys.exit(-1) return enclave_signup_data # ----------------------------------------------------------------- def execute_work_order(enclave_data, input_json_str, indent=4): """ Submits workorder request to Worker enclave and retrieves the response """ try: wo_request = work_order_request.SgxWorkOrderRequest( enclave_data, input_json_str) wo_response = wo_request.execute() try: json_response = json.dumps(wo_response, indent=indent) except Exception as err: logger.error("ERROR: Failed to serialize JSON; %s", str(err)) wo_response["Response"]["Status"] = WorkOrderStatus.FAILED wo_response["Response"]["Message"] = "Failed to serialize JSON" json_response = json.dumps(wo_response) except Exception as e: logger.error("failed to execute work order; %s", str(e)) wo_response["Response"]["Status"] = WorkOrderStatus.FAILED wo_response["Response"]["Message"] = str(e) json_response = json.dumps(wo_response) return json_response # ----------------------------------------------------------------- def validate_request(wo_request): """ Validate JSON workorder request """ try: json.loads(wo_request) except ValueError as e: logger.error("Invalid JSON format found for workorder - %s", e) return False return True # ----------------------------------------------------------------- def create_json_worker(enclave_data, config): """ Create JSON worker object which gets saved in KvStorage """ worker_type_data = dict() worker_type_data["verificationKey"] = enclave_data.verifying_key worker_type_data["extendedMeasurements"] = \ enclave_data.extended_measurements worker_type_data["proofDataType"] = enclave_data.proof_data_type worker_type_data["proofData"] = enclave_data.proof_data worker_type_data["encryptionKey"] = enclave_data.encryption_key worker_type_data["encryptionKeySignature"] = \ enclave_data.encryption_key_signature worker_info = dict() worker_info["workerType"] = WorkerType.TEE_SGX.value worker_info["organizationId"] = \ config.get("WorkerConfig")["OrganizationId"] worker_info["applicationTypeId"] = \ config.get("WorkerConfig")["ApplicationTypeId"] details_info = dict() details_info["workOrderSyncUri"] = \ config.get("WorkerConfig")["WorkOrderSyncUri"] details_info["workOrderAsyncUri"] = \ config.get("WorkerConfig")["WorkOrderAsyncUri"] details_info["workOrderPullUri"] = \ config.get("WorkerConfig")["WorkOrderPullUri"] details_info["workOrderNotifyUri"] = \ config.get("WorkerConfig")["WorkOrderNotifyUri"] details_info["receiptInvocationUri"] = \ config.get("WorkerConfig")["ReceiptInvocationUri"] details_info["workOrderInvocationAddress"] = config.get( "WorkerConfig")["WorkOrderInvocationAddress"] details_info["receiptInvocationAddress"] = config.get( "WorkerConfig")["ReceiptInvocationAddress"] details_info["fromAddress"] = config.get("WorkerConfig")["FromAddress"] details_info["hashingAlgorithm"] = \ config.get("WorkerConfig")["HashingAlgorithm"] details_info["signingAlgorithm"] = \ config.get("WorkerConfig")["SigningAlgorithm"] details_info["keyEncryptionAlgorithm"] = \ config.get("WorkerConfig")["KeyEncryptionAlgorithm"] details_info["dataEncryptionAlgorithm"] = \ config.get("WorkerConfig")["DataEncryptionAlgorithm"] details_info["workOrderPayloadFormats"] = \ config.get("WorkerConfig")["workOrderPayloadFormats"] details_info["workerTypeData"] = worker_type_data worker_info["details"] = details_info worker_info["status"] = WorkerStatus.ACTIVE.value # JSON serialize worker_info json_worker_info = json.dumps(worker_info) logger.info("JSON serialized worker info is %s", json_worker_info) return json_worker_info # ----------------------------------------------------------------- def start_enclave_manager(config): """ Instantiate KvStorage, Execute boot flow and run time flow """ global enclave_data if config.get("KvStorage") is None: logger.error("Kv Storage path is missing") sys.exit(-1) try: logger.debug("initialize the enclave") # Extended measurements is a list of enclave basename and # enclave measurement extended_measurements = \ enclave_helper.initialize_enclave(config.get("EnclaveModule")) except Exception as e: logger.exception("failed to initialize enclave; %s", str(e)) sys.exit(-1) logger.info("creating a new enclave") enclave_signup_data = create_enclave_signup_data() logger.info("initialize enclave_manager") enclave_manager = EnclaveManager( config, enclave_signup_data, extended_measurements) logger.info("Enclave manager started") try: kv_helper = connector.open(config['KvStorage']['remote_url']) except Exception as err: logger.error("Failed to open KV storage interface; " + "exiting SGX Enclave manager: {err}") sys.exit(-1) try: logger.info("--------------- Starting Boot time flow ----------------") enclave_manager.manager_on_boot(kv_helper) logger.info("--------------- Boot time flow Complete ----------------") except Exception as err: logger.error("Failed to execute boot time flow; " + "exiting SGX Enclave manager: {err}") exit(1) try: sleep_interval = int(config["EnclaveManager"]["sleep_interval"]) except Exception as err: logger.error("Failed to get sleep interval from config file. " + "Setting sleep interval to 10 seconds: %s", str(err)) sleep_interval = 10 try: while True: # Poll KV storage for new work-order requests and process enclave_manager.process_work_orders(kv_helper) logger.info("Enclave manager sleeping for %d secs", sleep_interval) time.sleep(sleep_interval) except Exception as inst: logger.error("Error while processing work-order; " + "shutting down enclave manager") logger.error("Exception: {} args {} details {}".format(type(inst), inst.args, inst)) exit(1) TCFHOME = os.environ.get("TCF_HOME", "../../../../") # ----------------------------------------------------------------- # ----------------------------------------------------------------- def parse_command_line(config, args): """ Parse command line arguments """ # global consensus_file_name parser = argparse.ArgumentParser() parser.add_argument( "--logfile", help="Name of the log file, __screen__ for standard output", type=str) parser.add_argument("--loglevel", help="Logging leve", type=str) parser.add_argument( "--lmdb_url", help="DB url to connect to lmdb", type=str) options = parser.parse_args(args) if config.get("Logging") is None: config["Logging"] = { "LogFile": "__screen__", "LogLevel": "INFO" } if options.logfile: config["Logging"]["LogFile"] = options.logfile if options.loglevel: config["Logging"]["LogLevel"] = options.loglevel.upper() if options.lmdb_url: config["KvStorage"]["remote_url"] = options.lmdb_url # ----------------------------------------------------------------- def main(args=None): import config.config as pconfig import utility.logger as plogger # parse out the configuration file first conffiles = ["tcs_config.toml"] confpaths = [".", TCFHOME + "/" + "config"] parser = argparse.ArgumentParser() parser.add_argument("--config", help="configuration file", nargs="+") parser.add_argument("--config-dir", help="configuration folder", nargs="+") (options, remainder) = parser.parse_known_args(args) if options.config: conffiles = options.config if options.config_dir: confpaths = options.config_dir try: config = pconfig.parse_configuration_files(conffiles, confpaths) json.dumps(config, indent=4) except pconfig.ConfigurationException as e: logger.error(str(e)) sys.exit(-1) plogger.setup_loggers(config.get("Logging", {})) sys.stdout = plogger.stream_to_logger( logging.getLogger("STDOUT"), logging.DEBUG) sys.stderr = plogger.stream_to_logger( logging.getLogger("STDERR"), logging.WARN) parse_command_line(config, remainder) logger.info("Starting Enclave manager") start_enclave_manager(config) main()
py
1a45a92e4f695d713f8569961033092677af0054
#!/usr/bin/python3 """This module defines a class User""" from models.base_model import BaseModel, Base from sqlalchemy import Column, String from sqlalchemy.orm import relationship from models.review import Review from models.place import Place from os import getenv class User(BaseModel, Base): """This class defines a user by various attributes""" __tablename__ = "users" if getenv("HBNB_TYPE_STORAGE") == "db": email = Column(String(128), nullable=False) password = Column(String(128), nullable=False) first_name = Column(String(128)) last_name = Column(String(128)) places = relationship( "Place", cascade="delete", backref="user" ) reviews = relationship( "Review", cascade="delete", backref="user" ) else: email = password = first_name = last_name = ''
py
1a45a948020502f043f5e7654e14dac17a6c980e
from . import ExporterModel from . import SERIAL_model from . import RuntimeModel class SERIAL(ExporterModel.Module): def __init__(self, name=None): if not name: name = self.__class__.__name__ super(SERIAL, self).__init__(name, visible=True) self.model = SERIAL_model
py
1a45a956fb1d7b71ed0ba65ffa5ec303e191632c
from functools import reduce from sys import * import numpy as np import random as r import socket import struct import subprocess as sp import threading from threading import Thread import ast import time import datetime as dt import os import psutil from netifaces import interfaces, ifaddresses, AF_INET import paho.mqtt.client as mqtt import smtplib import config import paramiko import argparse import pickle import logging current_path = os.path.dirname(os.path.abspath(__file__)) os.chdir(current_path) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(name)s:%(message)s') _tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15}, 't2': {'wcet': 1, 'period': 5, 'deadline': 4}, 't3': {'wcet': 2, 'period': 10, 'deadline': 8}, 't4': {'wcet': 1, 'period': 10, 'deadline': 9}, 't5': {'wcet': 3, 'period': 15, 'deadline': 12} } # mat = {'p0': ['cpu', 'mem', 'storage']} _need = { 't1': [7, 4, 3], 't2': [1, 2, 2], 't3': [6, 0, 0], 't4': [0, 1, 1], 't5': [4, 3, 1] } allocation = { 't1': [0, 1, 0], 't2': [2, 0, 0], 't3': [3, 0, 2], 't4': [2, 1, 1], 't5': [0, 0, 2] } _cpu = [] # cpu plot list prev_t = 0 # variable for cpu util _off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec _off_cloud = 0 # used to keep a count of tasks offloaded to cloud _loc = 0 # used to keep a count of tasks executed locally _inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec deadlock = [1] # keeps count of how many deadlock is resolved memory = [] mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]} mec_rtt = {} # {ip: [RTT]} offload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload reoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute. discovering = 0 # if discovering == 0 update host test = [] _time = [] _pos = 0 received_task_queue = [] # [[(task_list,wait_time), host_ip], ....] thread_record = [] _port_ = 64000 cloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud cloud_port = 63000 received_time = [] task_record = {} # keeps record of task reoffloaded task_id = 0 # id for each task reoffloaded shared_resource_lock = threading.Lock() t_track = 1 def ping(host): cmd = [f'ping -c 1 {host}'] output = str(sp.check_output(cmd, shell=True), 'utf-8').split('\n') try: value = float(output[-2].split('=')[-1].split('/')[0]) except ValueError: value = None return value def discovering_group(): global sock1 multicast_group = '224.3.29.71' server_address = ('', 10000) # Create the socket sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # Bind to the server address sock1.bind(server_address) # Tell the operating system to add the socket to the multicast group # on all interfaces. group = socket.inet_aton(multicast_group) mreq = struct.pack('4sL', group, socket.INADDR_ANY) sock1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq) def offloading_group(): global sock2 multicast_group = '224.5.5.55' server_address = ('', 20000) # Create the socket sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # Bind to the server address sock2.bind(server_address) # Tell the operating system to add the socket to the multicast group # on all interfaces. group = socket.inet_aton(multicast_group) mreq = struct.pack('4sL', group, socket.INADDR_ANY) sock2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq) def ip_address(): try: # cmd = ['ifconfig eth1 | grep inet | cut -d ":" -f 2 | cut -d " " -f 1'] cmd = ['ifconfig ens4 | grep inet | head -n 1 | cut -d "t" -f 2 | cut -d " " -f 2'] address = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1] if len(address.strip().split('.')) == 4: return address.strip() else: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) return s.getsockname()[0] except Exception as e: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) return s.getsockname()[0] def _memory(): global memory memory.append(round(my_algo.memory_percent(), 4)) def m_cpu(): global prev_t # get cpu next_t = psutil.cpu_percent(percpu=False) delta = abs(prev_t - next_t) prev_t = next_t _cpu.append(round(delta, 4)) def get_mec_rtts(): for i in mec_rtt: mec_rtt[i].append(get_rtt(i)) def generate_results(): _memory() m_cpu() get_mec_rtts() def host_ip_set(): global ip_set ip_set = set() for ifaceName in interfaces(): addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr': 'No IP addr'}])] ip_set.add(', '.join(addresses)) def get_time(): _time_ = [] d = str(dt.datetime.utcnow()).split() _time_ += d[0].split('-') g = d[1].split('.') _time_ += g[0].split(':') _time_.append(g[1]) return _time_ def get_rtt(host): rtt = ping(host) if rtt: return round(rtt, 4) else: return get_rtt(host) def gcd(a, b): if b == 0: return a return gcd(b, a % b) def _lcm(a, b): return int(a * b / gcd(a, b)) def lcm(_list): return reduce(_lcm, _list) def gosh_dist(_range): return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range def on_connect(connect_client, userdata, flags, rc): # logger.info("Connected with Code :" +str(rc)) # Subscribe Topic from here connect_client.subscribe(node_id) # Callback Function on Receiving the Subscribed Topic/Message def on_message(message_client, userdata, msg): global run data = str(msg.payload, 'utf-8') if data[0] == 'c': # receive from cloud received_task = data[2:] # send_client({received_task: get_time()}, cloud_register[received_task.split('.')[2]]) if received_task in task_record: del task_record[received_task] received_task = '.'.join(received_task.split('.')[:-1]) _client.publish(topic=received_task.split('.')[2], payload=str({received_task: get_time() + ['cloud']}), ) cooperate['cloud'] += 1 count_task_sent(received_task) elif data[0] == 't': # receive from client received_task = ast.literal_eval(data[2:]) received_task_queue.append(received_task) received_time.append(time.time()) elif data.strip() == 'stop': # stop {hostname: ip} logger.info('sending stop alert') run = 0 def connect_to_broker(stop): global _client username = 'mec' password = 'password' broker_port_no = 1883 _client = mqtt.Client() _client.on_connect = on_connect _client.on_message = on_message _client.username_pw_set(username, password) _client.connect(broker_ip, broker_port_no, 60) _client.loop_start() while True: if stop(): _client.loop_stop() _client.disconnect() logger.info('broker loop terminated') break def task_time_map(seq, process): exe_seq = [] capacity_sum = 0 for job in process: capacity_sum += process[job]['wcet'] while capacity_sum > 0: for job in seq: if process[job]['wcet'] > 0: exe_seq.append(job) process[job]['wcet'] -= 1 capacity_sum -= 1 return exe_seq def load_tasks(): period_list = [tasks[i]['period'] for i in tasks] lcm_period = lcm(period_list) # insert idle task s_task = {**tasks, 'idle': {'wcet': lcm_period, 'period': lcm_period + 1}} return lcm_period, s_task total_received_task = 0 def scheduler(_lcm_, s_tasks): # RMS algorithm global total_received_task queue = list(s_tasks.keys()) # initialize task queue schedule = [] rms = [] curr = '' # current task prev = '' # previous task tmp = {} for task in s_tasks.keys(): tmp[task] = {} # temporary data for each task tmp[task]['deadline'] = s_tasks[task]['period'] tmp[task]['executed'] = 0 # start scheduling... # proceed by one timestamp to handle preemption for _time_ in range(_lcm_): # insert new tasks into the queue for t in tmp.keys(): if _time_ == tmp[t]['deadline']: if s_tasks[t]['wcet'] > tmp[t]['executed']: # logger.info('Scheduling Failed at %d' % time) exit(1) else: tmp[t]['deadline'] += s_tasks[t]['period'] tmp[t]['executed'] = 0 queue.append(t) # select next task to be scheduled _min_ = _lcm_ * 2 for task in queue: if tmp[task]['deadline'] < _min_: _min_ = tmp[task]['deadline'] curr = task tmp[curr]['executed'] += 1 # logger.info(time, queue, curr) # dequeue the execution-completed task if tmp[curr]['executed'] == s_tasks[curr]['wcet']: for i in range(len(queue)): if curr == queue[i]: del queue[i] break # record to the schedule trace if prev != curr: if prev in queue and prev != 'idle': # previous task is preempted.. s = schedule.pop() schedule.append([s[0], s[1], '*']) rms.append(s[1]) schedule.append([_time_, curr]) if curr != 'idle': rms.append(curr) prev = curr process = {task: {'wcet': tasks[task]['wcet']} for task in tasks} rms = task_time_map(seq=rms, process=process) total_received_task += len(rms) return rms # generate execution sequence with wait_die def wait_die(processes, avail, n_need, allocat): global deadlock offload = [] # To store execution sequence exec_seq = [] # Make a copy of available resources work = [0] * len(processes) # While all processes are not finished # or system is not in safe state. while 'w' or 0 in work: if 0 in work: ind = work.index(0) i = processes[ind] elif 'w' in work: # logger.info('wk: ', work) ind = work.index('w') i = processes[ind] else: break # logger.info('comparing| process: ', i, n_need[i], 'work: ', avail) if not (False in list(np.greater_equal(avail, n_need[i]))): exec_seq.append(i) avail = np.add(avail, allocat[i]) work[ind] = 1 # logger.info('added: ', exec_seq) else: a = list(set(processes) - set(exec_seq) - set(offload)) n = {} for j in a: n[j] = sum(allocat[j]) _max = max(n, key=n.get) # logger.info('work: ', work, 'need: ', n_need[_max]) if processes.index(_max) > processes.index(i): # if true, i is older # if process is already waiting then offload process if work[ind] == 'w': offload.append(i) avail = np.array(avail) + np.array(allocat[i]) work[processes.index(i)] = 1 # logger.info('offload reentry: ', i, offload) else: # wait put process to waiting work[processes.index(i)] = 'w' # logger.info('waiting: ', i) else: # abort i offload.append(i) avail = np.array(avail) + np.array(allocat[i]) work[processes.index(i)] = 1 # logger.info('offload: ', i) if len(offload) > 0: logger.info(f'offloading tasks: {offload}') cooperative_mec(offload) deadlock[0] += 1 logger.info(f'Execution seq: {exec_seq}') return exec_seq def get_exec_seq(pro): # Number of processes p = len(pro) processes = ['{}_{}'.format(pro[i], i) for i in range(p)] # Available instances of resources avail = [6, 5, 5] n_need = {i: _need[i[:2]] for i in processes} # logger.info('need', n_need) # Resources allocated to processes allot = {i: allocation[i[:2]] for i in processes} # return execution sequence return wait_die(processes, avail, n_need, allot) def calc_wait_time(list_seq): pre = 0 time_dic = {} for i in list_seq: j = i.split('_')[0] time_dic[i] = round(t_time[j][0] + pre, 3) pre += t_time[j][0] # waiting time = total waiting time ÷ 2 average waiting time might be too tight w_send = round(time_dic[list(time_dic.keys())[-1]] / 2, 3) send_message('wt {} {}'.format(ip_address(), str(w_send))) # Broadcasting waiting time to cooperative MECs return time_dic def compare_local_mec(list_seq): time_compare_dict = {i: t_time[i.split('_')[0]][1] > list_seq[i] for i in list_seq} logger.info(f'local vs MEC comparison: {time_compare_dict}') execute_mec = [] execute_locally = [] for i in time_compare_dict: if time_compare_dict[i]: execute_locally.append(i) else: execute_mec.append(i) return execute_mec, execute_locally def calculate_mov_avg(ma1, a1): if ma1 in mec_waiting_time: _count = len(mec_waiting_time[ma1]) avg1 = mec_waiting_time[ma1][-1] else: _count = 0 avg1 = 0 _count += 1 avg1 = ((_count - 1) * avg1 + a1) / _count # ma1.append(avg1) #cumulative average formula # μ_n=((n-1) μ_(n-1) + x_n)/n return round(avg1, 4) def send_message(mg): _multicast_group = ('224.3.29.71', 10000) try: # Send data to the multicast group if mg == 'hello': smg = mg + ' ' + str([get_hostname(), ip_address()]) sock1.sendto(str.encode(smg), _multicast_group) logger.info('\nHello message sent') else: sock1.sendto(str.encode(mg), _multicast_group) except Exception as e: logger.info(str(e)) def get_hostname(): cmd = ['cat /etc/hostname'] hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1] return hostname def receive_message(stop): # used for multi-cast message exchange among MEC global hosts while True: if stop(): logger.info('Stopped: receive_message()') break else: data, address = sock1.recvfrom(1024) _d = data.decode() if _d[:5] == 'hello': _data = ast.literal_eval(_d[6:]) hosts[_data[0]] = _data[1] if _data[1] != host_ip: mec_rtt[_data[1]] = [] elif (_d[:6] == 'update') and (discovering == 0): hosts = ast.literal_eval(_d[7:]) # logger.info('received: ', hosts) for i in hosts: if i != host_ip: mec_rtt[i] = [] elif _d[:2] == 'wt': split_data = _d.split() if split_data[1] != host_ip: w_time = calculate_mov_avg(split_data[1], float(split_data[2]) + get_rtt( address[0])) # calcuate moving average of mec wait time => w_time = wait time + rtt if split_data[1] in mec_waiting_time: mec_waiting_time[split_data[1]].append(w_time) else: mec_waiting_time[split_data[1]] = [w_time] def mec_comparison(): # returns min average waiting for all mecs if len(mec_waiting_time) == 0: return 0 min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time} min_wt = min(min_mec, key=min_mec.get) return min_wt def cooperative_mec(mec_list): global _off_cloud global _off_mec global task_id, task_record for i in mec_list: _host = mec_comparison() if _host == 0: # send_cloud([i.split('_')[0], t_time[i.split('_')[0]][0]]) # [task_id,exec_time] _send_task = f"{i.split('_')[0]}.{task_id}" _client.publish(cloud_ip, str([_send_task, t_time[i.split('_')[0]][0]]), ) task_record[_send_task] = 'cloud' task_id += 1 _off_cloud += 1 # cloud_register[i.split('_')[0].split('.')[2]] = send_back_host logger.info('\n=========SENDING {} TO CLOUD==========='.format(i)) else: j = i.split('_')[0] _max = np.array([6, 5, 5]) send = 'false' if not (False in list(np.greater_equal(_max, _need[j[:2]]))): send = 'true' # CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN LATENCY if mec_waiting_time[_host][-1] < t_time[j][1] and send == 'true': _send_task = f"{j}.{task_id}" send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]])) task_record[_send_task] = 'mec' task_id += 1 _off_mec += 1 # SENDS TASK TO MEC FOR EXECUTION w_send = mec_waiting_time[_host][-1] + 0.001 mec_waiting_time[_host].append(w_send) # adds a new average waiting time logger.info('\n======SENDING {} TO MEC {}========='.format(i, _host)) elif send == 'true' and (get_rtt(_host) < get_rtt(cloud_ip)): _send_task = f"{j}.{task_id}" send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]])) task_record[_send_task] = 'mec' task_id += 1 _off_mec += 1 # SENDS TASK TO MEC FOR EXECUTION w_send = mec_waiting_time[_host][-1] + 0.001 mec_waiting_time[_host].append(w_send) # adds a new average waiting time logger.info('\n======SENDING {} TO MEC {}========='.format(i, _host)) else: _send_task = f"{j}.{task_id}" _client.publish(cloud_ip, str([_send_task, t_time[j][0]]), ) task_record[_send_task] = 'cloud' task_id += 1 _off_cloud += 1 # send_cloud([j, t_time[j][0]]) # # [task_id,exec_time] # cloud_register[j.split('.')[2]] = send_back_host logger.info('\n=========SENDING {} TO CLOUD==========='.format(i)) outward_mec = 0 offload_check = [0, 0] def execute_re_offloaded_task(offloaded_task): global outward_mec, offload_check exec_list = get_exec_seq(offloaded_task[0]) outward_mec += len(exec_list) for i in offloaded_task[0]: # i = 't1.1.2.3*1_3' j = i.split('_')[0] time.sleep(offloaded_task[1][j] / 2) # logger.info('j task: ', j) send_offloaded_task_mec('{} {}'.format(j.split('.')[1], i.split('*')[0])) clients_record = {} def count_task_sent(task): global clients_record c_id = task.split('.')[2] if c_id in clients_record: clients_record[c_id] += 1 else: clients_record[c_id] = 1 def execute(local): logger.info(f'\nExecuting :{local}') for i in local: j = i.split('_')[0] _t = t_time[j][0] / 2 time.sleep(_t) logger.info('#{}'.format(local.index(i) + 1) + f' Executed: {i}') _client.publish(j.split('.')[2], str({j: get_time() + ['local']}), ) count_task_sent(j) logger.info('============== EXECUTION DONE ===============') cooperate = {'mec': 0, 'cloud': 0} def receive_offloaded_task_mec(stop): # run as a thread global _inward_mec global t_track while True: if stop(): logger.info('Stopped: receive_offloaded_task_mec()') break else: data, address = sock2.recvfrom(1024) if len(data.decode()) > 0: da = data.decode().split(' ') if (address[0] not in ip_set) and (da[0] == node_id): # send back to client # send_client({da[1]: get_time()}, offload_register[da[1]]) # send back to client if da[1] in task_record: del task_record[da[1]] task_new = '.'.join(da[1].split('.')[:-1]) _client.publish(da[1].split('.')[2], str({task_new: get_time() + ['mec']}), ) count_task_sent(da[1]) cooperate['mec'] += 1 else: logger.info('*' * 30 + f'\n{da[1]} Not in Task Record\n' + '*' * 30) elif (address[0] not in ip_set) and (da[0] == 'ex') and (da[1] == node_id): _received = ast.literal_eval(da[2] + da[3]) shared_resource_lock.acquire() task = _received[0] + '*{}'.format(t_track) reoffload_list[0].append(task) reoffload_list[1][task] = _received[1] shared_resource_lock.release() t_track += 1 _inward_mec += 1 def call_execute_re_offload(stop): global reoffload_list, outward_mec global offload_check while True: if stop(): logger.info('Stopped: call_execute_re_offload()') break else: if len(reoffload_list[0]) == 1: t = reoffload_list[0][-1] time.sleep(reoffload_list[1][t] / 2) shared_resource_lock.acquire() reoffload_list[0].remove(t) del reoffload_list[1][t] shared_resource_lock.release() send_offloaded_task_mec('{} {}'.format(t.split('.')[1], t.split('*')[0])) outward_mec += 1 offload_check[0] += 1 elif len(reoffload_list[0]) > 1: o = reoffload_list.copy() offload_check[1] += len(o) execute_re_offloaded_task(o) for i in o[0]: shared_resource_lock.acquire() reoffload_list[0].remove(i) del reoffload_list[1][i] shared_resource_lock.release() def send_email(msg, send_path): try: server = smtplib.SMTP_SSL('smtp.gmail.com') server.ehlo() server.login(config.email_address, config.password) subject = 'Deadlock results rms+wait-die {} {}'.format(get_hostname(), send_path) # msg = 'Attendance done for {}'.format(_timer) _message = 'Subject: {}\n\n{}\n\n SENT BY RIHANNA \n\n'.format(subject, msg) server.sendmail(config.email_address, config.send_email, _message) server.quit() logger.info("Email sent!") except Exception as e: logger.info(str(e)) def send_offloaded_task_mec(msg): _multicast_group = ('224.5.5.55', 20000) try: sock2.sendto(str.encode(msg), _multicast_group) except Exception as e: logger.info(str(e)) def mec_id(client_ip): _id = client_ip.split('.')[-1] if len(_id) == 1: return '00' + _id elif len(_id) == 2: return '0' + _id else: return _id def send_result(host_, data): try: c = paramiko.SSHClient() un = 'mec' pw = 'password' port = 22 c.set_missing_host_key_policy(paramiko.AutoAddPolicy()) c.connect(host_, port, un, pw) for i in data: cmd = ('echo "{}" >> /home/mec/result/data.py'.format(i)) # task share : host ip task stdin, stdout, stderr = c.exec_command(cmd) except Exception as e: logger.info(str(e)) def save_and_send(send_path): _id_ = get_hostname()[-1] result = f"\nwt{_id_}_10_{mec_no} = {mec_waiting_time} " \ f"\nrtt{_id_}_10_{mec_no} = {mec_rtt} \ncpu{_id_}_10_{mec_no} = {_cpu} " \ f"\noff_mec{_id_}_10_{mec_no} = {_off_mec} " \ f"\noff_cloud{_id_}_10_{mec_no} = {_off_cloud} " \ f"\ninward_mec{_id_}_10_{mec_no} = {_inward_mec}" \ f"\nloc{_id_}_10_{mec_no} = {_loc} " \ f"\ndeadlock{_id_}_10_{mec_no} = {deadlock} \nmemory{_id_}_10_{mec_no} = {memory}" \ f"\ntask_received{_id_}_10_{mec_no} = {total_received_task} \nsent_t{_id_}_10_{mec_no} = {clients_record}" \ f"\ncooperate{_id_}_10_{mec_no} = {cooperate} \ntask_record{_id_}_10_{mec_no} = {task_record}" \ f"\noutward_mec{_id_}_10_{mec_no} = {outward_mec}" \ f"\noffload_check{_id_}_10_{mec_no} = {offload_check}" list_result = [ f"\nwt{_id_}_10_{mec_no} = {mec_waiting_time} ", f"\nrtt{_id_}_10_{mec_no} = {mec_rtt} \ncpu{_id_}_10_{mec_no} = {_cpu} ", f"\noff_mec{_id_}_10_{mec_no} = {_off_mec} \noff_cloud{_id_}_10_{mec_no} = {_off_cloud} ", f"\ninward_mec{_id_}_10_{mec_no} = {_inward_mec}", f"\nloc{_id_}_10_{mec_no} = {_loc} ", f"\ndeadlock{_id_}_10_{mec_no} = {deadlock} \nmemory{_id_}_10_{mec_no} = {memory}", f"\ntask_received{_id_}_10_{mec_no} = {total_received_task} \nsent_t{_id_}_10_{mec_no} = {clients_record}", f"\ncooperate{_id_}_10_{mec_no} = {cooperate} \ntask_record{_id_}_10_{mec_no} = {task_record} " f"\noutward_mec{_id_}_10_{mec_no} = {outward_mec}", f"\noffload_check{_id_}_10_{mec_no} = {offload_check}" ] file_ = open(f'{_id_}_10_{mec_no}datap.py', 'w') for i in list_result: file_.write(i) file_.close() cmd = f'mv {_id_}_10_{mec_no}datap.py {send_path}' os.system(cmd) send_email(result, send_path) if len(task_record) > 0: for _task_ in task_record: task_new = '.'.join(_task_.split('.')[:-1]) _client.publish(task_new.split('.')[2], str({task_new: get_time() + [task_record[_task_]]}), ) run = 1 # tell agents child when to stop def start_loop(): global _loc global tasks global t_time global node_id global run logger.info('\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\n') node_id = mec_id(ip_address()) # logger.info('node id: ', node_id) func_to_thread = [receive_message, receive_offloaded_task_mec, call_execute_re_offload, connect_to_broker] threads_ = [] stop = False for i in func_to_thread: threads_.append(Thread(target=i, args=(lambda: stop,))) threads_[-1].daemon = True threads_[-1].start() logger.info('algorithm is starting....') logger.info('========= Waiting for tasks ==========') while run == 1: try: if len(received_task_queue) > 0: info = received_task_queue.pop(0) tasks, t_time = info logger.info(f'EDF List of Processes: {tasks}\n') logger.info('\n========= Running Deadlock Algorithm ===========') lcm_result, task_load = load_tasks() list_seq = get_exec_seq(scheduler(lcm_result, task_load)) if len(list_seq) > 0: # do only when there is a task in safe sequence wait_list = calc_wait_time(list_seq) logger.info(f'\nWaiting Time List: {wait_list}') compare_result = compare_local_mec(wait_list) logger.info(f'\nExecute Locally: {compare_result[1]}') _loc += len(compare_result[1]) # total number of tasks to be executed locally logger.info(f'\nExecute in MEC: {compare_result[0]}') logger.info('\nSending to cooperative platform') if len(compare_result[0]) > 0: cooperative_mec(compare_result[0]) execute(compare_result[1]) generate_results() _time_ = dt.datetime.now() else: send_message(str('wt {} 0.0'.format(ip_address()))) time.sleep(.5) except KeyboardInterrupt: logger.info('\nProgramme Terminated') stop = False cmd = 'kill -9 {}'.format(os.getpid()) os.system(cmd) break logger.info('algo stopped!') def run_me(hosts_, mec_no_, cloud_ip_, send_path, broker_ip_): # call this from agent global discovering global hosts global mec_no global host_ip global cloud_ip global my_algo global broker_ip logger.info(f'mec ip: {ip_address()}') my_algo = psutil.Process() discovering_group() offloading_group() host_ip_set() hosts = hosts_ mec_no = mec_no_ cloud_ip = cloud_ip_ broker_ip = broker_ip_ host_ip = ip_address() logger.info(f'MEC Details: {hosts}') discovering = 1 time.sleep(2) for host in hosts: if hosts[host] != host_ip: mec_rtt[hosts[host]] = [] start_loop() logger.info('saving data') save_and_send(send_path) logger.info('send alert to control') time.sleep(r.uniform(1, 10)) _client.publish('control/control', pickle.dumps(['stop', ip_address()])) logger.info('Terminating process') cmd = 'kill -9 {}'.format(os.getpid()) os.system(cmd) def main(): # (hosts_, mec_no_, cloud_ip_, send_path, broker_ip_) , (--hosts, --mec_no_, --cloud_ip, --s_path, --b_ip) parser = argparse.ArgumentParser() parser.add_argument('--hosts', type=str, help="{hostname: 'ip address', ...} of all mec") parser.add_argument('--mec_no', type=int, default=1.0, help='Number of MEC nodes') parser.add_argument('--cloud_ip', type=str, help="cloud ip address") parser.add_argument('--s_path', type=str, default='/home/mec/result/python', help='Path to send result to') parser.add_argument('--b_ip', type=str, help='Broker ip address') args = parser.parse_args() # h_hosts = ast.literal_eval(args.hosts) l_host, l_len = args.hosts.split('_'), len(args.hosts.split('_')) h_hosts = dict(zip(l_host[:l_len//2], l_host[l_len//2:])) f_name = os.path.basename(__file__).split('/')[-1].split('.')[0] tim = dt.datetime.now().strftime("%a_%H%M") name = f'logs/{f_name}_{tim}_{args.mec_no}' file_handler = logging.FileHandler(name) file_handler.setFormatter(formatter) logger.addHandler(file_handler) logger.info('Process Started') run_me(hosts_=h_hosts, mec_no_=args.mec_no, cloud_ip_=args.cloud_ip, send_path=args.s_path, broker_ip_=args.b_ip) if __name__ == '__main__': main()
py
1a45aba9ffad3ba14c3e640b87a8c8d2d176cb93
# # Copyright 2018 Analytics Zoo Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from bigdl.optim.optimizer import OptimMethod from zoo.util.tf import process_grad class FakeOptimMethod(OptimMethod): def __init__(self): super(FakeOptimMethod, self).__init__(None, "float") # cannot subclass tf.train.Optimizer without importing it import tensorflow as tf class ZooOptimizer(tf.train.Optimizer): """An optimizer that wraps another tf.Optimizer, using an allreduce to combine gradient values before applying gradients to model weights.""" def __init__(self, optimizer, name=None): if name is None: name = "Zoo{}".format(type(optimizer).__name__) super(ZooOptimizer, self).__init__(name=name, use_locking=False) self._optimizer = optimizer def compute_gradients(self, *args, **kwargs): """Compute gradients of all trainable variables. See Optimizer.compute_gradients() for more info. In DistributedOptimizer, compute_gradients() is overriden to also allreduce the gradients before returning them. """ gradients = self._optimizer.compute_gradients(*args, **kwargs) results = [] for grad_var in gradients: grad = grad_var[0] var = grad_var[1] grad = process_grad(grad) with tf.control_dependencies([var]): grad_i = tf.identity(grad, name="zoo_identity_op_for_grad") results.append((grad_i, var)) return results def apply_gradients(self, *args, **kwargs): """Calls this same method on the underlying optimizer.""" return self._optimizer.apply_gradients(*args, **kwargs) def get_slot(self, *args, **kwargs): """Calls this same method on the underlying optimizer.""" return self._optimizer.get_slot(*args, **kwargs) def get_slot_names(self, *args, **kwargs): """Calls this same method on the underlying optimizer.""" return self._optimizer.get_slot_names(*args, **kwargs) def variables(self, *args, **kwargs): """Calls this same method on the underlying optimizer.""" return self._optimizer.variables(*args, **kwargs) def _resource_apply_sparse(self, *args, **kwargs): self._optimizer._resource_apply_sparse(*args, **kwargs) def _resource_apply_dense(self, *args, **kwargs): self._optimizer._resource_apply_sparse(*args, **kwargs) def _apply_sparse(self, *args, **kwargs): self._optimizer._apply_sparse(*args, **kwargs) def _apply_dense(self, *args, **kwargs): self._optimizer._apply_dense(*args, **kwargs)
py
1a45acc09cab962c95a7e1504e9ef938f672e83e
# -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath(os.pardir)) import graphviz # -- Project information ----------------------------------------------------- project = 'graphviz' copyright = '2013-2020, Sebastian Bank' author = 'Sebastian Bank' # The short X.Y version version = '0.14.2.dev0' # The full version, including alpha/beta/rc tags release = version # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.intersphinx', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path . exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if not on_rtd: import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'graphvizdoc' # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'graphviz.tex', 'graphviz Documentation', 'Sebastian Bank', 'manual'), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'graphviz', 'graphviz Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'graphviz', 'graphviz Documentation', author, 'graphviz', 'One line description of project.', 'Miscellaneous'), ] # -- Extension configuration ------------------------------------------------- # -- Options for intersphinx extension --------------------------------------- # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { 'py': ('https://docs.python.org/2', None), 'py3': ('https://docs.python.org/3', None), } # monkey patch, see https://github.com/sphinx-doc/sphinx/issues/2044 from sphinx.ext.autodoc import ClassLevelDocumenter, InstanceAttributeDocumenter def add_directive_header(self, sig): ClassLevelDocumenter.add_directive_header(self, sig) InstanceAttributeDocumenter.add_directive_header = add_directive_header
py
1a45acd6bc3e3c0a1e1802fe7523859a579378a7
# ********************************************************************************* # REopt, Copyright (c) 2019-2020, Alliance for Sustainable Energy, LLC. # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, this list # of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright notice, this # list of conditions and the following disclaimer in the documentation and/or other # materials provided with the distribution. # # Neither the name of the copyright holder nor the names of its contributors may be # used to endorse or promote products derived from this software without specific # prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE # OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED # OF THE POSSIBILITY OF SUCH DAMAGE. # ********************************************************************************* # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('reo', '0039_loadprofilemodel_gen_energy_at_start_of_outage_kwh'), ] operations = [ migrations.RenameField( model_name='loadprofilemodel', old_name='gen_energy_at_start_of_outage_kwh', new_name='fuel_avail_before_outage_pct', ), ]
py
1a45acf17435050d7a9bbcd0e2e6604945fa618e
# Copyright (C) 2021, Mindee. # This program is licensed under the Apache License version 2. # See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details. import json import os from typing import Any, Callable, List, Optional, Tuple import numpy as np from doctr.utils.geometry import fit_rbbox from .datasets import AbstractDataset __all__ = ["DetectionDataset"] class DetectionDataset(AbstractDataset): """Implements a text detection dataset Example:: >>> from doctr.datasets import DetectionDataset >>> train_set = DetectionDataset(img_folder="/path/to/images", label_path="/path/to/labels.json") >>> img, target = train_set[0] Args: img_folder: folder with all the images of the dataset label_path: path to the annotations of each image sample_transforms: composable transformations that will be applied to each image rotated_bbox: whether polygons should be considered as rotated bounding box (instead of straight ones) """ def __init__( self, img_folder: str, label_path: str, sample_transforms: Optional[Callable[[Any], Any]] = None, rotated_bbox: bool = False, ) -> None: super().__init__(img_folder) self.sample_transforms = sample_transforms # File existence check if not os.path.exists(label_path): raise FileNotFoundError(f"unable to locate {label_path}") with open(label_path, 'rb') as f: labels = json.load(f) self.data: List[Tuple[str, np.ndarray]] = [] for img_name, label in labels.items(): # File existence check if not os.path.exists(os.path.join(self.root, img_name)): raise FileNotFoundError(f"unable to locate {os.path.join(self.root, img_name)}") polygons = np.asarray(label['polygons']) if rotated_bbox: # Switch to rotated rects boxes = np.asarray([list(fit_rbbox(poly)) for poly in polygons]) else: # Switch to xmin, ymin, xmax, ymax boxes = np.concatenate((polygons.min(axis=1), polygons.max(axis=1)), axis=1) self.data.append((img_name, np.asarray(boxes, dtype=np.float32))) def __getitem__( self, index: int ) -> Tuple[Any, np.ndarray]: img, boxes = self._read_sample(index) h, w = self._get_img_shape(img) if self.sample_transforms is not None: img = self.sample_transforms(img) # Boxes boxes = boxes.copy() boxes[..., [0, 2]] /= w boxes[..., [1, 3]] /= h boxes = boxes.clip(0, 1) return img, boxes
py
1a45ad18949eb790a34bddfce6a002d4df4a587e
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Dummy router supporting IGD.""" # Instructions: # - Change `SOURCE``. When using IPv6, be sure to set the scope_id, the last value in the tuple. # - Run this module. # - Run upnp-client (change IP to your own IP): # upnp-client call-action 'http://0.0.0.0:8000/device.xml' \ # WANCIC/GetTotalPacketsReceived import asyncio import logging import time import xml.etree.ElementTree as ET from typing import Dict, Mapping, Sequence, Type from async_upnp_client.client import UpnpRequester, UpnpStateVariable from async_upnp_client.const import ( STATE_VARIABLE_TYPE_MAPPING, DeviceInfo, ServiceInfo, StateVariableTypeInfo, ) from .server import UpnpServerDevice, UpnpServerService, callable_action, run_server logging.basicConfig(level=logging.DEBUG) LOGGER = logging.getLogger("dummy_router") LOGGER_SSDP_TRAFFIC = logging.getLogger("async_upnp_client.traffic") LOGGER_SSDP_TRAFFIC.setLevel(logging.WARNING) SOURCE = ("172.24.83.184", 0) # Your IP here! # SOURCE = ("fe80::215:5dff:fe3e:6d23", 0, 0, 6) # Your IP here! HTTP_PORT = 8000 class WANIPConnectionService(UpnpServerService): """WANIPConnection service.""" SERVICE_DEFINITION = ServiceInfo( service_id="urn:upnp-org:serviceId:WANIPConnection1", service_type="urn:schemas-upnp-org:service:WANIPConnection:1", control_url="/upnp/control/WANIPConnection1", event_sub_url="/upnp/event/WANIPConnection1", scpd_url="/WANIPConnection_1.xml", xml=ET.Element("server_service"), ) STATE_VARIABLE_DEFINITIONS = { "ExternalIPAddress": StateVariableTypeInfo( data_type="string", data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["string"], default_value="1.2.3.4", allowed_value_range={}, allowed_values=None, xml=ET.Element("server_stateVariable"), ), "ConnectionStatus": StateVariableTypeInfo( data_type="string", data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["string"], default_value="Unconfigured", allowed_value_range={}, allowed_values=[ "Unconfigured", "Authenticating", "Connecting", "Connected", "PendingDisconnect", "Disconnecting", "Disconnected", ], xml=ET.Element("server_stateVariable"), ), "LastConnectionError": StateVariableTypeInfo( data_type="string", data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["string"], default_value="ERROR_NONE", allowed_value_range={}, allowed_values=[ "ERROR_NONE", ], xml=ET.Element("server_stateVariable"), ), "Uptime": StateVariableTypeInfo( data_type="ui4", data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["ui4"], default_value="0", allowed_value_range={}, allowed_values=None, xml=ET.Element("server_stateVariable"), ), } @callable_action( name="GetStatusInfo", in_args={}, out_args={ "NewConnectionStatus": "ConnectionStatus", "NewLastConnectionError": "LastConnectionError", "NewUptime": "Uptime", }, ) async def get_status_info(self) -> Dict[str, UpnpStateVariable]: """Get status info.""" # from async_upnp_client.exceptions import UpnpActionError, UpnpActionErrorCode # raise UpnpActionError( # error_code=UpnpActionErrorCode.INVALID_ACTION, error_desc="Invalid action" # ) return { "NewConnectionStatus": self.state_variable("ConnectionStatus"), "NewLastConnectionError": self.state_variable("LastConnectionError"), "NewUptime": self.state_variable("Uptime"), } @callable_action( name="GetExternalIPAddress", in_args={}, out_args={ "NewExternalIPAddress": "ExternalIPAddress", }, ) async def get_external_ip_address(self) -> Dict[str, UpnpStateVariable]: """Get external IP address.""" # from async_upnp_client.exceptions import UpnpActionError, UpnpActionErrorCode # raise UpnpActionError( # error_code=UpnpActionErrorCode.INVALID_ACTION, error_desc="Invalid action" # ) return { "NewExternalIPAddress": self.state_variable("ExternalIPAddress"), } class WanConnectionDevice(UpnpServerDevice): """WAN Connection device.""" DEVICE_DEFINITION = DeviceInfo( device_type="urn:schemas-upnp-org:device:WANConnectionDevice:1", friendly_name="Dummy Router WAN Connection Device", manufacturer="Steven", model_name="DummyRouter v1", udn="uuid:51e00c19-c8f3-4b28-9ef1-7f562f204c82", model_description="Dummy Router IGD", model_number="v0.0.1", serial_number="0000001", url="/device.xml", icons=[], xml=ET.Element("server_device"), ) EMBEDDED_DEVICES: Sequence[Type[UpnpServerDevice]] = [] SERVICES = [WANIPConnectionService] def __init__(self, requester: UpnpRequester, base_uri: str) -> None: """Initialize.""" super().__init__( requester=requester, base_uri=base_uri, ) class WANCommonInterfaceConfigService(UpnpServerService): """WANCommonInterfaceConfig service.""" SERVICE_DEFINITION = ServiceInfo( service_id="urn:upnp-org:serviceId:WANCommonInterfaceConfig1", service_type="urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1", control_url="/upnp/control/WANCommonInterfaceConfig1", event_sub_url="/upnp/event/WANCommonInterfaceConfig1", scpd_url="/WANCommonInterfaceConfig_1.xml", xml=ET.Element("server_service"), ) STATE_VARIABLE_DEFINITIONS = { "TotalBytesReceived": StateVariableTypeInfo( data_type="ui4", data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["ui4"], default_value="0", allowed_value_range={}, allowed_values=None, xml=ET.Element("server_stateVariable"), ), "TotalBytesSent": StateVariableTypeInfo( data_type="ui4", data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["ui4"], default_value="0", allowed_value_range={}, allowed_values=None, xml=ET.Element("server_stateVariable"), ), "TotalPacketsReceived": StateVariableTypeInfo( data_type="ui4", data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["ui4"], default_value="0", allowed_value_range={}, allowed_values=None, xml=ET.Element("server_stateVariable"), ), "TotalPacketsSent": StateVariableTypeInfo( data_type="ui4", data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["ui4"], default_value="0", allowed_value_range={}, allowed_values=None, xml=ET.Element("server_stateVariable"), ), } MAX_COUNTER = 2**32 def _update_bytes(self, state_var_name: str) -> None: """Update bytes state variable.""" new_bytes = int(time.time() * 1000) % self.MAX_COUNTER self.state_variable(state_var_name).value = new_bytes def _update_packets(self, state_var_name: str) -> None: """Update state variable values.""" new_packets = int(time.time()) % self.MAX_COUNTER self.state_variable(state_var_name).value = new_packets self.state_variable(state_var_name).value = new_packets @callable_action( name="GetTotalBytesReceived", in_args={}, out_args={ "NewTotalBytesReceived": "TotalBytesReceived", }, ) async def get_total_bytes_received(self) -> Dict[str, UpnpStateVariable]: """Get total bytes received.""" self._update_bytes("TotalBytesReceived") return { "NewTotalBytesReceived": self.state_variable("TotalBytesReceived"), } @callable_action( name="GetTotalBytesSent", in_args={}, out_args={ "NewTotalBytesSent": "TotalBytesSent", }, ) async def get_total_bytes_sent(self) -> Dict[str, UpnpStateVariable]: """Get total bytes sent.""" self._update_bytes("TotalBytesSent") return { "NewTotalBytesSent": self.state_variable("TotalBytesSent"), } @callable_action( name="GetTotalPacketsReceived", in_args={}, out_args={ "NewTotalPacketsReceived": "TotalPacketsReceived", }, ) async def get_total_packets_received(self) -> Dict[str, UpnpStateVariable]: """Get total packets received.""" self._update_packets("TotalPacketsReceived") return { "NewTotalPacketsReceived": self.state_variable("TotalPacketsReceived"), } @callable_action( name="GetTotalPacketsSent", in_args={}, out_args={ "NewTotalPacketsSent": "TotalPacketsSent", }, ) async def get_total_packets_sent(self) -> Dict[str, UpnpStateVariable]: """Get total packets sent.""" self._update_packets("TotalPacketsSent") return { "NewTotalPacketsSent": self.state_variable("TotalPacketsSent"), } class WanDevice(UpnpServerDevice): """WAN device.""" DEVICE_DEFINITION = DeviceInfo( device_type="urn:schemas-upnp-org:device:WANDevice:1", friendly_name="Dummy Router WAN Device", manufacturer="Steven", model_name="DummyRouter v1", udn="uuid:51e00c19-c8f3-4b28-9ef1-7f562f204c81", model_description="Dummy Router IGD", model_number="v0.0.1", serial_number="0000001", url="/device.xml", icons=[], xml=ET.Element("server_device"), ) EMBEDDED_DEVICES = [WanConnectionDevice] SERVICES = [WANCommonInterfaceConfigService] def __init__(self, requester: UpnpRequester, base_uri: str) -> None: """Initialize.""" super().__init__( requester=requester, base_uri=base_uri, ) class Layer3ForwardingService(UpnpServerService): """Layer3Forwarding service.""" SERVICE_DEFINITION = ServiceInfo( service_id="urn:upnp-org:serviceId:Layer3Forwarding1", service_type="urn:schemas-upnp-org:service:Layer3Forwarding:1", control_url="/upnp/control/Layer3Forwarding1", event_sub_url="/upnp/event/Layer3Forwarding1", scpd_url="/Layer3Forwarding_1.xml", xml=ET.Element("server_service"), ) STATE_VARIABLE_DEFINITIONS: Mapping[str, StateVariableTypeInfo] = {} class IgdDevice(UpnpServerDevice): """IGD device.""" DEVICE_DEFINITION = DeviceInfo( device_type="urn:schemas-upnp-org:device:InternetGatewayDevice:1", friendly_name="Dummy Router", manufacturer="Steven", model_name="DummyRouter v1", udn="uuid:51e00c19-c8f3-4b28-9ef1-7f562f204c80", model_description="Dummy Router IGD", model_number="v0.0.1", serial_number="0000001", url="/device.xml", icons=[], xml=ET.Element("server_device"), ) EMBEDDED_DEVICES = [WanDevice] SERVICES = [Layer3ForwardingService] def __init__(self, requester: UpnpRequester, base_uri: str) -> None: """Initialize.""" super().__init__( requester=requester, base_uri=base_uri, ) async def async_main() -> None: """Main.""" await run_server(SOURCE, HTTP_PORT, IgdDevice) if __name__ == "__main__": asyncio.run(async_main())
py
1a45af10c57080483fe4f58230770fa9e074b58a
# coding: utf-8 from __future__ import absolute_import from datetime import date, datetime # noqa: F401 from typing import List, Dict # noqa: F401 from openapi_server.com.h21lab.TS29510_Nnrf_NFDiscovery.handler.base_model_ import Model from openapi_server.com.h21lab.TS29510_Nnrf_NFDiscovery.handler.ipv6_addr import Ipv6Addr from openapi_server import util from openapi_server.com.h21lab.TS29510_Nnrf_NFDiscovery.handler.ipv6_addr import Ipv6Addr # noqa: E501 class N2InterfaceAmfInfo(Model): """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). Do not edit the class manually. """ def __init__(self, ipv4_endpoint_address=None, ipv6_endpoint_address=None, amf_name=None): # noqa: E501 """N2InterfaceAmfInfo - a model defined in OpenAPI :param ipv4_endpoint_address: The ipv4_endpoint_address of this N2InterfaceAmfInfo. # noqa: E501 :type ipv4_endpoint_address: List[str] :param ipv6_endpoint_address: The ipv6_endpoint_address of this N2InterfaceAmfInfo. # noqa: E501 :type ipv6_endpoint_address: List[Ipv6Addr] :param amf_name: The amf_name of this N2InterfaceAmfInfo. # noqa: E501 :type amf_name: str """ self.openapi_types = { 'ipv4_endpoint_address': List[str], 'ipv6_endpoint_address': List[Ipv6Addr], 'amf_name': str } self.attribute_map = { 'ipv4_endpoint_address': 'ipv4EndpointAddress', 'ipv6_endpoint_address': 'ipv6EndpointAddress', 'amf_name': 'amfName' } self._ipv4_endpoint_address = ipv4_endpoint_address self._ipv6_endpoint_address = ipv6_endpoint_address self._amf_name = amf_name @classmethod def from_dict(cls, dikt) -> 'N2InterfaceAmfInfo': """Returns the dict as a model :param dikt: A dict. :type: dict :return: The N2InterfaceAmfInfo of this N2InterfaceAmfInfo. # noqa: E501 :rtype: N2InterfaceAmfInfo """ return util.deserialize_model(dikt, cls) @property def ipv4_endpoint_address(self): """Gets the ipv4_endpoint_address of this N2InterfaceAmfInfo. :return: The ipv4_endpoint_address of this N2InterfaceAmfInfo. :rtype: List[str] """ return self._ipv4_endpoint_address @ipv4_endpoint_address.setter def ipv4_endpoint_address(self, ipv4_endpoint_address): """Sets the ipv4_endpoint_address of this N2InterfaceAmfInfo. :param ipv4_endpoint_address: The ipv4_endpoint_address of this N2InterfaceAmfInfo. :type ipv4_endpoint_address: List[str] """ self._ipv4_endpoint_address = ipv4_endpoint_address @property def ipv6_endpoint_address(self): """Gets the ipv6_endpoint_address of this N2InterfaceAmfInfo. :return: The ipv6_endpoint_address of this N2InterfaceAmfInfo. :rtype: List[Ipv6Addr] """ return self._ipv6_endpoint_address @ipv6_endpoint_address.setter def ipv6_endpoint_address(self, ipv6_endpoint_address): """Sets the ipv6_endpoint_address of this N2InterfaceAmfInfo. :param ipv6_endpoint_address: The ipv6_endpoint_address of this N2InterfaceAmfInfo. :type ipv6_endpoint_address: List[Ipv6Addr] """ self._ipv6_endpoint_address = ipv6_endpoint_address @property def amf_name(self): """Gets the amf_name of this N2InterfaceAmfInfo. :return: The amf_name of this N2InterfaceAmfInfo. :rtype: str """ return self._amf_name @amf_name.setter def amf_name(self, amf_name): """Sets the amf_name of this N2InterfaceAmfInfo. :param amf_name: The amf_name of this N2InterfaceAmfInfo. :type amf_name: str """ self._amf_name = amf_name
py
1a45af1546219afe6345dadb252b509616981f9b
def count_words(filename): """Count the approximate number of words in a file.""" try: with open(filename) as f_obj: contents = f_obj.read() except FileNotFoundError: pass else: # Count approximate number of words in the file. words = contents.split() num_words = len(words) print('The file ' + filename + ' has about ' + str(num_words) + ' words.') filenames = [ 'data/alice.txt', 'data/sssssiddhartha.txt', 'data/moby_dick.txt', 'data/little_women.txt', ] for filename in filenames: count_words(filename)
py
1a45b07899978ca620a01e8b09b8791a649864a4
import urllib from io import StringIO from io import BytesIO import csv import numpy as np from datetime import datetime import matplotlib.pylab as plt import pandas as pd import scipy.signal as signal datos_2008=pd.read_csv(https://hub.mybinder.org/user/computociencias-fisi2029-201910-zgf4osv2/edit/Seccion_1/Fourier/Datos/transacciones2008.txt) datos_2009=pd.read_csv(https://hub.mybinder.org/user/computociencias-fisi2029-201910-zgf4osv2/edit/Seccion_1/Fourier/Datos/transacciones2009.txt) datos_2010=pd.read_csv(https://hub.mybinder.org/user/computociencias-fisi2029-201910-zgf4osv2/edit/Seccion_1/Fourier/Datos/transacciones2010.txt) plt.plot(datos_2008)
py
1a45b16fed371d664735fb1db73d8052f3d26acc
from dagster_graphql.client.util import parse_raw_log_lines from dagster_k8s.utils import ( get_pod_names_in_job, retrieve_pod_logs, wait_for_job, wait_for_job_success, ) from dagster import check def wait_for_job_ready(job_name, namespace): '''Wait for a dagster-k8s job to be ready ''' check.str_param(job_name, 'job_name') check.str_param(namespace, 'namespace') wait_for_job(job_name=job_name, namespace=namespace) def wait_for_job_and_get_logs(job_name, namespace): '''Wait for a dagster-k8s job to complete, ensure it launched only one pod, and then grab the logs from the pod it launched. ''' check.str_param(job_name, 'job_name') check.str_param(namespace, 'namespace') wait_for_job_success(job_name, namespace=namespace) pod_names = get_pod_names_in_job(job_name, namespace) assert len(pod_names) == 1 pod_name = pod_names[0] raw_logs = retrieve_pod_logs(pod_name, namespace=namespace) return parse_raw_log_lines(raw_logs.split('\n'))
py
1a45b278e793ecdad40be839e03ebfaa41b6abd7
""" PASSENGERS """ numPassengers = 1846 passenger_arriving = ( (3, 5, 4, 2, 1, 0, 5, 5, 7, 2, 0, 0), # 0 (1, 2, 2, 4, 3, 0, 1, 4, 4, 4, 1, 0), # 1 (1, 2, 0, 2, 2, 0, 7, 2, 0, 4, 2, 0), # 2 (3, 4, 4, 0, 0, 0, 4, 3, 0, 3, 1, 0), # 3 (4, 4, 2, 3, 1, 0, 3, 3, 3, 2, 0, 0), # 4 (0, 5, 2, 2, 0, 0, 5, 5, 2, 2, 1, 0), # 5 (0, 4, 7, 1, 1, 0, 7, 4, 4, 2, 3, 0), # 6 (4, 6, 5, 0, 1, 0, 2, 7, 1, 4, 2, 0), # 7 (2, 7, 2, 1, 2, 0, 7, 4, 6, 0, 2, 0), # 8 (4, 3, 6, 2, 1, 0, 3, 2, 1, 3, 0, 0), # 9 (0, 5, 2, 3, 0, 0, 1, 2, 4, 1, 1, 0), # 10 (4, 5, 7, 1, 2, 0, 2, 2, 5, 0, 0, 0), # 11 (4, 8, 4, 1, 1, 0, 6, 3, 4, 4, 0, 0), # 12 (4, 6, 5, 2, 1, 0, 5, 11, 3, 2, 0, 0), # 13 (2, 1, 6, 3, 1, 0, 3, 5, 3, 2, 1, 0), # 14 (1, 5, 3, 1, 2, 0, 5, 2, 4, 1, 1, 0), # 15 (5, 9, 2, 1, 3, 0, 5, 6, 4, 3, 2, 0), # 16 (1, 8, 3, 1, 2, 0, 3, 5, 1, 5, 5, 0), # 17 (0, 7, 4, 4, 1, 0, 4, 7, 3, 3, 0, 0), # 18 (2, 3, 2, 1, 0, 0, 4, 5, 2, 1, 0, 0), # 19 (1, 7, 6, 2, 2, 0, 4, 4, 3, 5, 0, 0), # 20 (3, 2, 3, 1, 1, 0, 6, 2, 4, 1, 0, 0), # 21 (1, 6, 4, 4, 1, 0, 2, 6, 7, 5, 2, 0), # 22 (2, 2, 4, 2, 0, 0, 5, 3, 4, 4, 1, 0), # 23 (0, 7, 2, 4, 0, 0, 5, 5, 5, 3, 0, 0), # 24 (0, 9, 6, 1, 2, 0, 8, 8, 5, 3, 2, 0), # 25 (1, 6, 3, 0, 0, 0, 7, 2, 4, 5, 1, 0), # 26 (4, 5, 1, 1, 0, 0, 4, 2, 4, 7, 2, 0), # 27 (0, 4, 3, 1, 0, 0, 2, 4, 3, 7, 0, 0), # 28 (0, 12, 2, 1, 0, 0, 3, 8, 1, 1, 1, 0), # 29 (2, 5, 5, 5, 2, 0, 7, 6, 6, 3, 1, 0), # 30 (1, 5, 4, 2, 3, 0, 6, 6, 5, 4, 1, 0), # 31 (1, 4, 5, 1, 3, 0, 4, 8, 2, 3, 3, 0), # 32 (2, 3, 5, 1, 3, 0, 5, 4, 1, 1, 2, 0), # 33 (3, 7, 2, 1, 2, 0, 4, 7, 5, 4, 0, 0), # 34 (3, 6, 7, 2, 1, 0, 11, 5, 4, 4, 0, 0), # 35 (0, 4, 6, 0, 1, 0, 7, 7, 1, 3, 1, 0), # 36 (3, 5, 2, 3, 0, 0, 2, 4, 0, 3, 0, 0), # 37 (1, 4, 7, 3, 1, 0, 6, 8, 4, 4, 0, 0), # 38 (2, 5, 6, 2, 2, 0, 4, 8, 4, 2, 1, 0), # 39 (1, 8, 3, 2, 1, 0, 3, 2, 4, 1, 2, 0), # 40 (2, 6, 8, 3, 2, 0, 9, 3, 1, 1, 1, 0), # 41 (3, 8, 1, 2, 0, 0, 7, 6, 8, 3, 1, 0), # 42 (1, 5, 4, 2, 3, 0, 1, 3, 3, 4, 2, 0), # 43 (1, 2, 6, 2, 2, 0, 1, 8, 3, 3, 1, 0), # 44 (6, 2, 4, 3, 2, 0, 4, 7, 7, 1, 2, 0), # 45 (1, 7, 4, 2, 1, 0, 5, 5, 2, 1, 4, 0), # 46 (3, 5, 1, 4, 0, 0, 4, 5, 3, 3, 2, 0), # 47 (3, 8, 3, 3, 2, 0, 1, 4, 2, 4, 2, 0), # 48 (4, 8, 4, 4, 2, 0, 4, 3, 7, 3, 1, 0), # 49 (2, 2, 3, 0, 3, 0, 0, 8, 2, 3, 1, 0), # 50 (6, 4, 3, 4, 2, 0, 2, 7, 3, 1, 3, 0), # 51 (3, 3, 4, 3, 1, 0, 8, 2, 4, 4, 2, 0), # 52 (3, 8, 5, 1, 1, 0, 2, 2, 1, 4, 1, 0), # 53 (4, 7, 2, 2, 1, 0, 6, 5, 4, 3, 2, 0), # 54 (3, 3, 5, 3, 3, 0, 5, 6, 1, 4, 2, 0), # 55 (0, 6, 2, 3, 1, 0, 5, 9, 3, 3, 3, 0), # 56 (3, 1, 4, 2, 0, 0, 3, 3, 2, 4, 1, 0), # 57 (5, 5, 3, 3, 1, 0, 4, 6, 5, 3, 1, 0), # 58 (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # 59 ) station_arriving_intensity = ( (2.1197212467076385, 5.437168560606061, 6.39538881748072, 5.069021739130434, 5.714423076923077, 3.805434782608696), # 0 (2.13961760803824, 5.497633278970259, 6.429932430019996, 5.097251509661836, 5.757253205128205, 3.8041377113526575), # 1 (2.159286781777387, 5.5572011223344555, 6.4636560982576405, 5.124859903381643, 5.7991794871794875, 3.802800966183575), # 2 (2.178712071992976, 5.615807812500001, 6.496535186375322, 5.151823369565217, 5.840163461538463, 3.80142472826087), # 3 (2.1978767827529024, 5.673389071268239, 6.528545058554698, 5.178118357487923, 5.880166666666668, 3.800009178743961), # 4 (2.216764218125061, 5.729880620440517, 6.559661078977435, 5.203721316425121, 5.919150641025641, 3.7985544987922704), # 5 (2.235357682177349, 5.785218181818182, 6.5898586118251945, 5.2286086956521745, 5.957076923076923, 3.7970608695652177), # 6 (2.253640478977661, 5.839337477202581, 6.6191130212796345, 5.252756944444445, 5.9939070512820525, 3.7955284722222227), # 7 (2.2715959125938934, 5.892174228395061, 6.6473996715224235, 5.2761425120772945, 6.029602564102564, 3.7939574879227056), # 8 (2.289207287093942, 5.94366415719697, 6.67469392673522, 5.298741847826087, 6.064125, 3.7923480978260873), # 9 (2.306457906545703, 5.993742985409653, 6.700971151099686, 5.320531400966185, 6.097435897435898, 3.790700483091787), # 10 (2.3233310750170717, 6.042346434834457, 6.726206708797486, 5.341487620772948, 6.129496794871795, 3.7890148248792275), # 11 (2.339810096575944, 6.089410227272727, 6.750375964010283, 5.361586956521739, 6.160269230769231, 3.787291304347826), # 12 (2.355878275290215, 6.134870084525815, 6.7734542809197364, 5.380805857487923, 6.189714743589745, 3.7855301026570047), # 13 (2.371518915227783, 6.178661728395063, 6.795417023707511, 5.399120772946859, 6.2177948717948714, 3.7837314009661833), # 14 (2.3867153204565406, 6.220720880681817, 6.816239556555269, 5.416508152173913, 6.244471153846154, 3.781895380434783), # 15 (2.401450795044386, 6.2609832631874305, 6.835897243644673, 5.432944444444445, 6.269705128205128, 3.7800222222222226), # 16 (2.415708643059214, 6.299384597713243, 6.854365449157384, 5.448406099033817, 6.293458333333334, 3.778112107487923), # 17 (2.4294721685689202, 6.335860606060606, 6.871619537275065, 5.462869565217392, 6.315692307692309, 3.7761652173913043), # 18 (2.4427246756414007, 6.370347010030864, 6.887634872179378, 5.476311292270531, 6.33636858974359, 3.7741817330917877), # 19 (2.455449468344552, 6.402779531425364, 6.902386818051984, 5.4887077294686, 6.355448717948718, 3.772161835748792), # 20 (2.467629850746269, 6.433093892045453, 6.915850739074552, 5.500035326086957, 6.37289423076923, 3.7701057065217394), # 21 (2.479249126914447, 6.461225813692481, 6.9280019994287345, 5.510270531400966, 6.388666666666666, 3.7680135265700487), # 22 (2.4902906009169836, 6.48711101816779, 6.938815963296202, 5.519389794685991, 6.402727564102564, 3.765885477053141), # 23 (2.5007375768217734, 6.510685227272727, 6.948267994858611, 5.527369565217391, 6.415038461538462, 3.763721739130435), # 24 (2.5105733586967123, 6.531884162808642, 6.95633345829763, 5.534186292270532, 6.425560897435897, 3.761522493961353), # 25 (2.5197812506096966, 6.550643546576879, 6.962987717794916, 5.539816425120772, 6.43425641025641, 3.759287922705314), # 26 (2.5283445566286216, 6.566899100378787, 6.968206137532133, 5.544236413043479, 6.44108653846154, 3.7570182065217397), # 27 (2.5362465808213837, 6.580586546015713, 6.971964081690946, 5.54742270531401, 6.44601282051282, 3.7547135265700486), # 28 (2.5434706272558776, 6.591641605289002, 6.974236914453013, 5.54935175120773, 6.448996794871795, 3.752374064009662), # 29 (2.5500000000000003, 6.6000000000000005, 6.9750000000000005, 5.550000000000001, 6.45, 3.75), # 30 (2.5561096227621487, 6.606943039772727, 6.974427958937198, 5.549882924836602, 6.449634929078015, 3.7467010078294187), # 31 (2.562087340153453, 6.613794318181819, 6.972728019323672, 5.549533986928104, 6.448547517730496, 3.7416198067632855), # 32 (2.5679358375959076, 6.620552982954546, 6.96992445652174, 5.548956617647059, 6.446749468085106, 3.734806146926536), # 33 (2.573657800511509, 6.627218181818183, 6.96604154589372, 5.548154248366014, 6.444252482269504, 3.7263097784441115), # 34 (2.5792559143222507, 6.633789062499999, 6.961103562801933, 5.547130310457517, 6.441068262411348, 3.7161804514409464), # 35 (2.584732864450128, 6.640264772727274, 6.955134782608695, 5.545888235294118, 6.437208510638299, 3.7044679160419793), # 36 (2.5900913363171356, 6.646644460227273, 6.9481594806763285, 5.544431454248366, 6.432684929078014, 3.691221922372147), # 37 (2.5953340153452684, 6.652927272727273, 6.94020193236715, 5.54276339869281, 6.427509219858156, 3.676492220556388), # 38 (2.600463586956522, 6.6591123579545455, 6.931286413043478, 5.5408875, 6.421693085106383, 3.66032856071964), # 39 (2.60548273657289, 6.665198863636364, 6.9214371980676335, 5.538807189542484, 6.415248226950354, 3.6427806929868396), # 40 (2.6103941496163685, 6.671185937499999, 6.910678562801933, 5.536525898692811, 6.408186347517731, 3.623898367482926), # 41 (2.6152005115089514, 6.677072727272729, 6.899034782608696, 5.534047058823529, 6.400519148936171, 3.6037313343328337), # 42 (2.6199045076726346, 6.682858380681818, 6.8865301328502415, 5.53137410130719, 6.392258333333333, 3.5823293436615025), # 43 (2.624508823529412, 6.688542045454546, 6.8731888888888895, 5.52851045751634, 6.38341560283688, 3.5597421455938694), # 44 (2.6290161445012785, 6.694122869318182, 6.859035326086958, 5.525459558823529, 6.374002659574469, 3.5360194902548727), # 45 (2.6334291560102305, 6.699600000000001, 6.844093719806764, 5.522224836601307, 6.36403120567376, 3.511211127769449), # 46 (2.637750543478261, 6.7049725852272735, 6.828388345410628, 5.5188097222222225, 6.3535129432624124, 3.4853668082625355), # 47 (2.641982992327366, 6.710239772727274, 6.811943478260869, 5.515217647058823, 6.342459574468085, 3.4585362818590712), # 48 (2.6461291879795397, 6.7154007102272715, 6.794783393719808, 5.511452042483661, 6.33088280141844, 3.430769298683991), # 49 (2.6501918158567777, 6.720454545454544, 6.776932367149759, 5.507516339869282, 6.318794326241135, 3.4021156088622355), # 50 (2.6541735613810746, 6.725400426136364, 6.758414673913044, 5.503413970588236, 6.3062058510638295, 3.3726249625187408), # 51 (2.6580771099744247, 6.7302375, 6.73925458937198, 5.499148366013072, 6.293129078014185, 3.3423471097784443), # 52 (2.6619051470588238, 6.734964914772728, 6.719476388888889, 5.49472295751634, 6.279575709219859, 3.3113318007662835), # 53 (2.6656603580562663, 6.739581818181818, 6.699104347826086, 5.490141176470589, 6.265557446808511, 3.2796287856071964), # 54 (2.6693454283887466, 6.7440873579545455, 6.6781627415458935, 5.485406454248366, 6.251085992907802, 3.2472878144261204), # 55 (2.6729630434782607, 6.748480681818181, 6.6566758454106285, 5.4805222222222225, 6.236173049645391, 3.214358637347993), # 56 (2.6765158887468035, 6.7527609375000015, 6.634667934782609, 5.475491911764706, 6.220830319148936, 3.180891004497751), # 57 (2.6800066496163684, 6.756927272727272, 6.612163285024154, 5.470318954248366, 6.205069503546099, 3.1469346660003334), # 58 (0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59 ) passenger_arriving_acc = ( (3, 5, 4, 2, 1, 0, 5, 5, 7, 2, 0, 0), # 0 (4, 7, 6, 6, 4, 0, 6, 9, 11, 6, 1, 0), # 1 (5, 9, 6, 8, 6, 0, 13, 11, 11, 10, 3, 0), # 2 (8, 13, 10, 8, 6, 0, 17, 14, 11, 13, 4, 0), # 3 (12, 17, 12, 11, 7, 0, 20, 17, 14, 15, 4, 0), # 4 (12, 22, 14, 13, 7, 0, 25, 22, 16, 17, 5, 0), # 5 (12, 26, 21, 14, 8, 0, 32, 26, 20, 19, 8, 0), # 6 (16, 32, 26, 14, 9, 0, 34, 33, 21, 23, 10, 0), # 7 (18, 39, 28, 15, 11, 0, 41, 37, 27, 23, 12, 0), # 8 (22, 42, 34, 17, 12, 0, 44, 39, 28, 26, 12, 0), # 9 (22, 47, 36, 20, 12, 0, 45, 41, 32, 27, 13, 0), # 10 (26, 52, 43, 21, 14, 0, 47, 43, 37, 27, 13, 0), # 11 (30, 60, 47, 22, 15, 0, 53, 46, 41, 31, 13, 0), # 12 (34, 66, 52, 24, 16, 0, 58, 57, 44, 33, 13, 0), # 13 (36, 67, 58, 27, 17, 0, 61, 62, 47, 35, 14, 0), # 14 (37, 72, 61, 28, 19, 0, 66, 64, 51, 36, 15, 0), # 15 (42, 81, 63, 29, 22, 0, 71, 70, 55, 39, 17, 0), # 16 (43, 89, 66, 30, 24, 0, 74, 75, 56, 44, 22, 0), # 17 (43, 96, 70, 34, 25, 0, 78, 82, 59, 47, 22, 0), # 18 (45, 99, 72, 35, 25, 0, 82, 87, 61, 48, 22, 0), # 19 (46, 106, 78, 37, 27, 0, 86, 91, 64, 53, 22, 0), # 20 (49, 108, 81, 38, 28, 0, 92, 93, 68, 54, 22, 0), # 21 (50, 114, 85, 42, 29, 0, 94, 99, 75, 59, 24, 0), # 22 (52, 116, 89, 44, 29, 0, 99, 102, 79, 63, 25, 0), # 23 (52, 123, 91, 48, 29, 0, 104, 107, 84, 66, 25, 0), # 24 (52, 132, 97, 49, 31, 0, 112, 115, 89, 69, 27, 0), # 25 (53, 138, 100, 49, 31, 0, 119, 117, 93, 74, 28, 0), # 26 (57, 143, 101, 50, 31, 0, 123, 119, 97, 81, 30, 0), # 27 (57, 147, 104, 51, 31, 0, 125, 123, 100, 88, 30, 0), # 28 (57, 159, 106, 52, 31, 0, 128, 131, 101, 89, 31, 0), # 29 (59, 164, 111, 57, 33, 0, 135, 137, 107, 92, 32, 0), # 30 (60, 169, 115, 59, 36, 0, 141, 143, 112, 96, 33, 0), # 31 (61, 173, 120, 60, 39, 0, 145, 151, 114, 99, 36, 0), # 32 (63, 176, 125, 61, 42, 0, 150, 155, 115, 100, 38, 0), # 33 (66, 183, 127, 62, 44, 0, 154, 162, 120, 104, 38, 0), # 34 (69, 189, 134, 64, 45, 0, 165, 167, 124, 108, 38, 0), # 35 (69, 193, 140, 64, 46, 0, 172, 174, 125, 111, 39, 0), # 36 (72, 198, 142, 67, 46, 0, 174, 178, 125, 114, 39, 0), # 37 (73, 202, 149, 70, 47, 0, 180, 186, 129, 118, 39, 0), # 38 (75, 207, 155, 72, 49, 0, 184, 194, 133, 120, 40, 0), # 39 (76, 215, 158, 74, 50, 0, 187, 196, 137, 121, 42, 0), # 40 (78, 221, 166, 77, 52, 0, 196, 199, 138, 122, 43, 0), # 41 (81, 229, 167, 79, 52, 0, 203, 205, 146, 125, 44, 0), # 42 (82, 234, 171, 81, 55, 0, 204, 208, 149, 129, 46, 0), # 43 (83, 236, 177, 83, 57, 0, 205, 216, 152, 132, 47, 0), # 44 (89, 238, 181, 86, 59, 0, 209, 223, 159, 133, 49, 0), # 45 (90, 245, 185, 88, 60, 0, 214, 228, 161, 134, 53, 0), # 46 (93, 250, 186, 92, 60, 0, 218, 233, 164, 137, 55, 0), # 47 (96, 258, 189, 95, 62, 0, 219, 237, 166, 141, 57, 0), # 48 (100, 266, 193, 99, 64, 0, 223, 240, 173, 144, 58, 0), # 49 (102, 268, 196, 99, 67, 0, 223, 248, 175, 147, 59, 0), # 50 (108, 272, 199, 103, 69, 0, 225, 255, 178, 148, 62, 0), # 51 (111, 275, 203, 106, 70, 0, 233, 257, 182, 152, 64, 0), # 52 (114, 283, 208, 107, 71, 0, 235, 259, 183, 156, 65, 0), # 53 (118, 290, 210, 109, 72, 0, 241, 264, 187, 159, 67, 0), # 54 (121, 293, 215, 112, 75, 0, 246, 270, 188, 163, 69, 0), # 55 (121, 299, 217, 115, 76, 0, 251, 279, 191, 166, 72, 0), # 56 (124, 300, 221, 117, 76, 0, 254, 282, 193, 170, 73, 0), # 57 (129, 305, 224, 120, 77, 0, 258, 288, 198, 173, 74, 0), # 58 (129, 305, 224, 120, 77, 0, 258, 288, 198, 173, 74, 0), # 59 ) passenger_arriving_rate = ( (2.1197212467076385, 4.349734848484848, 3.837233290488432, 2.0276086956521735, 1.1428846153846153, 0.0, 3.805434782608696, 4.571538461538461, 3.0414130434782605, 2.5581555269922878, 1.087433712121212, 0.0), # 0 (2.13961760803824, 4.398106623176207, 3.8579594580119974, 2.038900603864734, 1.1514506410256409, 0.0, 3.8041377113526575, 4.6058025641025635, 3.0583509057971017, 2.5719729720079982, 1.0995266557940517, 0.0), # 1 (2.159286781777387, 4.445760897867564, 3.8781936589545842, 2.049943961352657, 1.1598358974358973, 0.0, 3.802800966183575, 4.639343589743589, 3.0749159420289858, 2.585462439303056, 1.111440224466891, 0.0), # 2 (2.178712071992976, 4.49264625, 3.897921111825193, 2.0607293478260864, 1.1680326923076925, 0.0, 3.80142472826087, 4.67213076923077, 3.09109402173913, 2.5986140745501287, 1.1231615625, 0.0), # 3 (2.1978767827529024, 4.53871125701459, 3.9171270351328187, 2.071247342995169, 1.1760333333333335, 0.0, 3.800009178743961, 4.704133333333334, 3.106871014492754, 2.611418023421879, 1.1346778142536476, 0.0), # 4 (2.216764218125061, 4.583904496352414, 3.9357966473864607, 2.0814885265700482, 1.183830128205128, 0.0, 3.7985544987922704, 4.735320512820512, 3.1222327898550724, 2.623864431590974, 1.1459761240881035, 0.0), # 5 (2.235357682177349, 4.628174545454545, 3.9539151670951167, 2.0914434782608695, 1.1914153846153845, 0.0, 3.7970608695652177, 4.765661538461538, 3.1371652173913045, 2.635943444730078, 1.1570436363636363, 0.0), # 6 (2.253640478977661, 4.671469981762065, 3.9714678127677807, 2.1011027777777778, 1.1987814102564105, 0.0, 3.7955284722222227, 4.795125641025642, 3.151654166666667, 2.647645208511854, 1.1678674954405162, 0.0), # 7 (2.2715959125938934, 4.7137393827160485, 3.988439802913454, 2.1104570048309177, 1.2059205128205128, 0.0, 3.7939574879227056, 4.823682051282051, 3.1656855072463768, 2.658959868608969, 1.1784348456790121, 0.0), # 8 (2.289207287093942, 4.754931325757576, 4.004816356041132, 2.119496739130435, 1.2128249999999998, 0.0, 3.7923480978260873, 4.851299999999999, 3.1792451086956524, 2.6698775706940876, 1.188732831439394, 0.0), # 9 (2.306457906545703, 4.794994388327722, 4.020582690659811, 2.1282125603864737, 1.2194871794871796, 0.0, 3.790700483091787, 4.877948717948718, 3.1923188405797105, 2.680388460439874, 1.1987485970819305, 0.0), # 10 (2.3233310750170717, 4.833877147867565, 4.035724025278491, 2.1365950483091787, 1.2258993589743588, 0.0, 3.7890148248792275, 4.903597435897435, 3.2048925724637685, 2.690482683518994, 1.2084692869668912, 0.0), # 11 (2.339810096575944, 4.8715281818181815, 4.050225578406169, 2.1446347826086956, 1.2320538461538462, 0.0, 3.787291304347826, 4.928215384615385, 3.2169521739130436, 2.700150385604113, 1.2178820454545454, 0.0), # 12 (2.355878275290215, 4.907896067620651, 4.0640725685518415, 2.152322342995169, 1.237942948717949, 0.0, 3.7855301026570047, 4.951771794871796, 3.2284835144927535, 2.7093817123678945, 1.2269740169051628, 0.0), # 13 (2.371518915227783, 4.94292938271605, 4.077250214224507, 2.1596483091787437, 1.2435589743589741, 0.0, 3.7837314009661833, 4.9742358974358964, 3.2394724637681156, 2.7181668094830043, 1.2357323456790126, 0.0), # 14 (2.3867153204565406, 4.976576704545454, 4.089743733933161, 2.166603260869565, 1.2488942307692308, 0.0, 3.781895380434783, 4.995576923076923, 3.2499048913043476, 2.726495822622107, 1.2441441761363634, 0.0), # 15 (2.401450795044386, 5.008786610549944, 4.101538346186804, 2.1731777777777777, 1.2539410256410255, 0.0, 3.7800222222222226, 5.015764102564102, 3.2597666666666667, 2.734358897457869, 1.252196652637486, 0.0), # 16 (2.415708643059214, 5.039507678170594, 4.11261926949443, 2.1793624396135267, 1.2586916666666665, 0.0, 3.778112107487923, 5.034766666666666, 3.2690436594202903, 2.7417461796629534, 1.2598769195426485, 0.0), # 17 (2.4294721685689202, 5.068688484848485, 4.122971722365039, 2.185147826086957, 1.2631384615384618, 0.0, 3.7761652173913043, 5.052553846153847, 3.277721739130435, 2.7486478149100257, 1.2671721212121212, 0.0), # 18 (2.4427246756414007, 5.096277608024691, 4.132580923307627, 2.190524516908212, 1.267273717948718, 0.0, 3.7741817330917877, 5.069094871794872, 3.2857867753623187, 2.7550539488717507, 1.2740694020061727, 0.0), # 19 (2.455449468344552, 5.122223625140291, 4.141432090831191, 2.1954830917874397, 1.2710897435897435, 0.0, 3.772161835748792, 5.084358974358974, 3.29322463768116, 2.7609547272207933, 1.2805559062850727, 0.0), # 20 (2.467629850746269, 5.146475113636362, 4.149510443444731, 2.2000141304347824, 1.2745788461538459, 0.0, 3.7701057065217394, 5.0983153846153835, 3.300021195652174, 2.76634029562982, 1.2866187784090906, 0.0), # 21 (2.479249126914447, 5.168980650953984, 4.156801199657241, 2.2041082125603864, 1.277733333333333, 0.0, 3.7680135265700487, 5.110933333333332, 3.3061623188405798, 2.7712007997714934, 1.292245162738496, 0.0), # 22 (2.4902906009169836, 5.1896888145342315, 4.163289577977721, 2.207755917874396, 1.2805455128205128, 0.0, 3.765885477053141, 5.122182051282051, 3.3116338768115945, 2.7755263853184804, 1.2974222036335579, 0.0), # 23 (2.5007375768217734, 5.208548181818181, 4.168960796915166, 2.210947826086956, 1.2830076923076923, 0.0, 3.763721739130435, 5.132030769230769, 3.3164217391304347, 2.779307197943444, 1.3021370454545453, 0.0), # 24 (2.5105733586967123, 5.225507330246913, 4.173800074978578, 2.213674516908213, 1.2851121794871794, 0.0, 3.761522493961353, 5.1404487179487175, 3.3205117753623195, 2.7825333833190515, 1.3063768325617282, 0.0), # 25 (2.5197812506096966, 5.240514837261503, 4.177792630676949, 2.2159265700483086, 1.2868512820512819, 0.0, 3.759287922705314, 5.147405128205127, 3.3238898550724634, 2.785195087117966, 1.3101287093153757, 0.0), # 26 (2.5283445566286216, 5.2535192803030295, 4.180923682519279, 2.2176945652173914, 1.2882173076923078, 0.0, 3.7570182065217397, 5.152869230769231, 3.3265418478260873, 2.787282455012853, 1.3133798200757574, 0.0), # 27 (2.5362465808213837, 5.26446923681257, 4.183178449014568, 2.2189690821256036, 1.289202564102564, 0.0, 3.7547135265700486, 5.156810256410256, 3.328453623188406, 2.7887856326763782, 1.3161173092031424, 0.0), # 28 (2.5434706272558776, 5.273313284231201, 4.184542148671808, 2.219740700483092, 1.289799358974359, 0.0, 3.752374064009662, 5.159197435897436, 3.329611050724638, 2.789694765781205, 1.3183283210578003, 0.0), # 29 (2.5500000000000003, 5.28, 4.1850000000000005, 2.22, 1.29, 0.0, 3.75, 5.16, 3.3300000000000005, 2.79, 1.32, 0.0), # 30 (2.5561096227621487, 5.285554431818181, 4.184656775362319, 2.219953169934641, 1.2899269858156028, 0.0, 3.7467010078294187, 5.159707943262411, 3.3299297549019613, 2.789771183574879, 1.3213886079545452, 0.0), # 31 (2.562087340153453, 5.2910354545454545, 4.183636811594202, 2.2198135947712414, 1.2897095035460993, 0.0, 3.7416198067632855, 5.158838014184397, 3.329720392156862, 2.7890912077294683, 1.3227588636363636, 0.0), # 32 (2.5679358375959076, 5.296442386363637, 4.181954673913044, 2.2195826470588234, 1.2893498936170211, 0.0, 3.734806146926536, 5.1573995744680845, 3.3293739705882355, 2.787969782608696, 1.3241105965909092, 0.0), # 33 (2.573657800511509, 5.3017745454545455, 4.179624927536232, 2.2192616993464056, 1.2888504964539007, 0.0, 3.7263097784441115, 5.155401985815603, 3.3288925490196086, 2.786416618357488, 1.3254436363636364, 0.0), # 34 (2.5792559143222507, 5.307031249999999, 4.176662137681159, 2.2188521241830066, 1.2882136524822696, 0.0, 3.7161804514409464, 5.152854609929078, 3.32827818627451, 2.784441425120773, 1.3267578124999997, 0.0), # 35 (2.584732864450128, 5.312211818181819, 4.173080869565217, 2.218355294117647, 1.2874417021276596, 0.0, 3.7044679160419793, 5.1497668085106385, 3.3275329411764707, 2.782053913043478, 1.3280529545454547, 0.0), # 36 (2.5900913363171356, 5.317315568181819, 4.168895688405797, 2.2177725816993465, 1.2865369858156026, 0.0, 3.691221922372147, 5.14614794326241, 3.32665887254902, 2.779263792270531, 1.3293288920454547, 0.0), # 37 (2.5953340153452684, 5.322341818181818, 4.16412115942029, 2.2171053594771237, 1.2855018439716313, 0.0, 3.676492220556388, 5.142007375886525, 3.325658039215686, 2.7760807729468597, 1.3305854545454545, 0.0), # 38 (2.600463586956522, 5.327289886363636, 4.158771847826086, 2.216355, 1.2843386170212765, 0.0, 3.66032856071964, 5.137354468085106, 3.3245325, 2.772514565217391, 1.331822471590909, 0.0), # 39 (2.60548273657289, 5.332159090909091, 4.1528623188405795, 2.2155228758169936, 1.2830496453900706, 0.0, 3.6427806929868396, 5.132198581560282, 3.3232843137254906, 2.768574879227053, 1.3330397727272727, 0.0), # 40 (2.6103941496163685, 5.336948749999999, 4.14640713768116, 2.214610359477124, 1.281637269503546, 0.0, 3.623898367482926, 5.126549078014184, 3.3219155392156865, 2.7642714251207727, 1.3342371874999996, 0.0), # 41 (2.6152005115089514, 5.3416581818181825, 4.1394208695652175, 2.2136188235294116, 1.280103829787234, 0.0, 3.6037313343328337, 5.120415319148936, 3.3204282352941177, 2.7596139130434785, 1.3354145454545456, 0.0), # 42 (2.6199045076726346, 5.346286704545454, 4.131918079710145, 2.2125496405228757, 1.2784516666666665, 0.0, 3.5823293436615025, 5.113806666666666, 3.3188244607843136, 2.754612053140096, 1.3365716761363635, 0.0), # 43 (2.624508823529412, 5.350833636363636, 4.123913333333333, 2.211404183006536, 1.2766831205673759, 0.0, 3.5597421455938694, 5.1067324822695035, 3.317106274509804, 2.7492755555555557, 1.337708409090909, 0.0), # 44 (2.6290161445012785, 5.355298295454545, 4.115421195652175, 2.2101838235294116, 1.2748005319148936, 0.0, 3.5360194902548727, 5.0992021276595745, 3.3152757352941173, 2.743614130434783, 1.3388245738636362, 0.0), # 45 (2.6334291560102305, 5.35968, 4.106456231884058, 2.2088899346405224, 1.2728062411347518, 0.0, 3.511211127769449, 5.091224964539007, 3.313334901960784, 2.7376374879227057, 1.33992, 0.0), # 46 (2.637750543478261, 5.363978068181818, 4.0970330072463765, 2.207523888888889, 1.2707025886524823, 0.0, 3.4853668082625355, 5.082810354609929, 3.3112858333333333, 2.7313553381642506, 1.3409945170454545, 0.0), # 47 (2.641982992327366, 5.368191818181819, 4.087166086956522, 2.2060870588235293, 1.268491914893617, 0.0, 3.4585362818590712, 5.073967659574468, 3.309130588235294, 2.7247773913043476, 1.3420479545454547, 0.0), # 48 (2.6461291879795397, 5.3723205681818165, 4.076870036231885, 2.2045808169934644, 1.2661765602836879, 0.0, 3.430769298683991, 5.064706241134751, 3.306871225490197, 2.7179133574879226, 1.3430801420454541, 0.0), # 49 (2.6501918158567777, 5.376363636363634, 4.066159420289855, 2.2030065359477127, 1.263758865248227, 0.0, 3.4021156088622355, 5.055035460992908, 3.3045098039215692, 2.7107729468599033, 1.3440909090909086, 0.0), # 50 (2.6541735613810746, 5.3803203409090905, 4.055048804347826, 2.2013655882352943, 1.2612411702127657, 0.0, 3.3726249625187408, 5.044964680851063, 3.3020483823529414, 2.7033658695652174, 1.3450800852272726, 0.0), # 51 (2.6580771099744247, 5.384189999999999, 4.043552753623188, 2.1996593464052285, 1.258625815602837, 0.0, 3.3423471097784443, 5.034503262411348, 3.2994890196078432, 2.695701835748792, 1.3460474999999998, 0.0), # 52 (2.6619051470588238, 5.387971931818182, 4.031685833333333, 2.197889183006536, 1.2559151418439718, 0.0, 3.3113318007662835, 5.023660567375887, 3.296833774509804, 2.6877905555555555, 1.3469929829545455, 0.0), # 53 (2.6656603580562663, 5.391665454545453, 4.019462608695652, 2.1960564705882355, 1.2531114893617021, 0.0, 3.2796287856071964, 5.0124459574468085, 3.2940847058823532, 2.6796417391304344, 1.3479163636363634, 0.0), # 54 (2.6693454283887466, 5.395269886363636, 4.006897644927536, 2.1941625816993464, 1.2502171985815602, 0.0, 3.2472878144261204, 5.000868794326241, 3.29124387254902, 2.6712650966183573, 1.348817471590909, 0.0), # 55 (2.6729630434782607, 5.398784545454545, 3.994005507246377, 2.1922088888888887, 1.247234609929078, 0.0, 3.214358637347993, 4.988938439716312, 3.2883133333333334, 2.662670338164251, 1.3496961363636362, 0.0), # 56 (2.6765158887468035, 5.402208750000001, 3.980800760869565, 2.1901967647058824, 1.2441660638297871, 0.0, 3.180891004497751, 4.9766642553191485, 3.285295147058824, 2.6538671739130435, 1.3505521875000002, 0.0), # 57 (2.6800066496163684, 5.405541818181817, 3.967297971014492, 2.188127581699346, 1.2410139007092198, 0.0, 3.1469346660003334, 4.964055602836879, 3.2821913725490197, 2.6448653140096616, 1.3513854545454542, 0.0), # 58 (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59 ) passenger_allighting_rate = ( (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 0 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 1 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 2 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 3 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 4 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 5 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 6 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 7 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 8 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 9 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 10 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 11 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 12 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 13 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 14 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 15 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 16 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 17 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 18 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 19 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 20 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 21 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 22 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 23 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 24 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 25 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 26 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 27 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 28 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 29 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 30 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 31 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 32 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 33 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 34 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 35 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 36 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 37 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 38 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 39 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 40 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 41 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 42 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 43 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 44 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 45 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 46 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 47 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 48 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 49 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 50 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 51 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 52 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 53 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 54 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 55 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 56 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 57 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 58 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 59 ) """ parameters for reproducibiliy. More information: https://numpy.org/doc/stable/reference/random/parallel.html """ #initial entropy entropy = 258194110137029475889902652135037600173 #index for seed sequence child child_seed_index = ( 1, # 0 21, # 1 )
py
1a45b307fc9d7140a8ae4ec0638da2d2440c969f
# -*- coding: utf-8 -*- # # michael a.g. aïvázis # orthologue # (c) 1998-2022 all rights reserved # from .SI import meter from .SI import kilo, centi, milli, micro, nano # # definitions of common length units # data taken from Appendix F of Halliday, Resnick, Walker, "Fundamentals of Physics", # fourth edition, John Willey and Sons, 1993 nanometer = nano * meter micrometer = micro * meter millimeter = milli * meter centimeter = centi * meter kilometer = kilo * meter # aliases m = meter nm = nanometer um = micrometer micron = micrometer mm = millimeter cm = centimeter km = kilometer # British units inch = 2.540 * centimeter foot = 12 * inch yard = 3 * foot mile = 5280 * foot mil = 1e-3 * inch fathom = 6 * foot nautical_mile = 1852 * meter # others angstrom = 1e-10 * meter fermi = 1e-15 * meter astronomical_unit = 1.49598e11 * meter light_year = 9.460e12 * kilometer parsec = 3.084e13 * kilometer # end of file
py
1a45b4399c074558e1ecb061366e2ef8659d8017
#!/usr/bin/env python3 # Modules import numpy as np from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB, MultinomialNB from sklearn.svm import SVC import time import torch import torch.nn as nn import torch.optim as optim from xgboost import XGBClassifier class NaiveBayes: """Naive Bayes classifier""" def __init__(self): self.categorical = 'object' self.numerical = 'number' def cat_num_split(self, feature): # Split features into categorical and numberical types feature_cat = feature.select_dtypes(include=self.categorical) feature_num = feature.select_dtypes(include=self.numerical) return feature_cat, feature_num def train(self, train_cat, train_num, train_labels): # Instantiate the classifiers mnb = MultinomialNB() gnb = GaussianNB() # Train classifier if train_cat.shape[1]: mnb.fit(train_cat, train_labels) if train_num.shape[1]: gnb.fit(train_num, train_labels) return mnb, gnb def predict(self, feature_cat, feature_num, mnb, gnb): # Predict labels cat_pred = mnb.predict_proba(feature_cat) if feature_cat.shape[1] else 0 cat_pred *= feature_cat.shape[1] num_pred = gnb.predict_proba(feature_num) if feature_num.shape[1] else 0 num_pred *= feature_num.shape[1] return (cat_pred + num_pred).argmax(axis=1) def __call__(self, train, test): # Record start time print('==============NAIVE BAYES==============') start = time.time() # Split train and test into categorical and numerical features train_cat, train_num = self.cat_num_split(train.iloc[:, :-1]) test_cat, test_num = self.cat_num_split(test.iloc[:, :-1]) # Assign labels train_labels = train.iloc[:, -1] test_labels = test.iloc[:, -1] # Train Gaussian and Multinomial classifiers mnb, gnb = self.train(train_cat, train_num, train_labels) # Predict train and test labels train_pred = self.predict(train_cat, train_num, mnb, gnb) test_pred = self.predict(test_cat, test_num, mnb, gnb) # Print results train_hit = (train_labels == train_pred).sum() test_hit = (test_labels == test_pred).sum() print('Train accuracy: %.2f%%' % (100 * train_hit / len(train))) print('Test accuracy: %.2f%%' % (100 * test_hit / len(test))) print('Time: %.1f seconds' % (time.time() - start)) print('') class Logistic: """Logistic regression classifier""" def __init__(self): pass def train(self, train): # Instantiate the classifiers mlr = LogisticRegression(solver='sag', max_iter=10000, n_jobs=-1, multi_class='multinomial') ovr = LogisticRegression(solver='sag', max_iter=10000, n_jobs=-1, multi_class='ovr') # Train classifier mlr.fit(train.iloc[:, :-1], train.iloc[:, -1]) ovr.fit(train.iloc[:, :-1], train.iloc[:, -1]) return mlr, ovr def predict(self, feature, mlr, ovr): return mlr.predict(feature), ovr.predict(feature) def __call__(self, train, test): # Record start time print('==========LOGISTIC REGRESSION==========') start = time.time() # Train Logistic regression classifiers mlr, ovr = self.train(train) # Predict train and test labels train_pred_mlr, train_pred_ovr = self.predict(train.iloc[:, :-1], mlr, ovr) test_pred_mlr, test_pred_ovr = self.predict(test.iloc[:, :-1], mlr, ovr) # Print results train_hit_mlr = (train.iloc[:, -1] == train_pred_mlr).sum() train_hit_ovr = (train.iloc[:, -1] == train_pred_ovr).sum() test_hit_mlr = (test.iloc[:, -1] == test_pred_mlr).sum() test_hit_ovr = (test.iloc[:, -1] == test_pred_ovr).sum() print('Train accuracy: %5.2f%% (Multinomial), %6.2f%% (One-vs-Rest)' % (100 * train_hit_mlr / len(train), 100 * train_hit_ovr / len(train))) print('Test accuracy: %5.2f%% (Multinomial), %6.2f%% (One-vs-Rest)' % (100 * test_hit_mlr / len(test), 100 * test_hit_ovr / len(test))) print('Time: %.1f seconds' % (time.time() - start)) print('') class SVM: """Support Vector Machine classifier""" def __init__(self): pass def train(self, train): svc = SVC() svc.fit(train.iloc[:, :-1], train.iloc[:, -1]) return svc def predict(self, feature, svc): return svc.predict(feature) def __call__(self, train, test): # Record start time print('========SUPPORT VECTOR MACHINE=========') start = time.time() # Train support vector machine svc = self.train(train) # Predict train and test labels train_pred = self.predict(train.iloc[:, :-1], svc) test_pred = self.predict(test.iloc[:, :-1], svc) # Print results train_hit = (train.iloc[:, -1] == train_pred).sum() test_hit = (test.iloc[:, -1] == test_pred).sum() print('Train accuracy: %5.2f%% (Radial Basis Function)' % (100 * train_hit / len(train))) print('Test accuracy: %5.2f%% (Radial Basis Function)' % (100 * test_hit / len(test))) print('Time: %.1f seconds' % (time.time() - start)) print('') class RandomForest: """Random Forest classifier""" def __init__(self): pass def train(self, train, trees): # Instantiate the classifiers if trees == 1: rfc = RandomForestClassifier(n_estimators=trees, n_jobs=-1, bootstrap=False) else: rfc = RandomForestClassifier(n_estimators=trees, n_jobs=-1, bootstrap=True) # Train classifier rfc.fit(train.iloc[:, :-1], train.iloc[:, -1]) return rfc def predict(self, feature, rfc): return rfc.predict(feature) def __call__(self, train, test): # Record start time print('=============RANDOM FOREST=============') start = time.time() # Train Random Forest classifiers rfc1 = self.train(train, 1) rfc10 = self.train(train, 10) rfc100 = self.train(train, 100) # Predict train and test labels train_pred_1 = self.predict(train.iloc[:, :-1], rfc1) train_pred_10 = self.predict(train.iloc[:, :-1], rfc10) train_pred_100 = self.predict(train.iloc[:, :-1], rfc100) test_pred_1 = self.predict(test.iloc[:, :-1], rfc1) test_pred_10 = self.predict(test.iloc[:, :-1], rfc10) test_pred_100 = self.predict(test.iloc[:, :-1], rfc100) # Print results train_hit_1 = (train.iloc[:, -1] == train_pred_1).sum() train_hit_10 = (train.iloc[:, -1] == train_pred_10).sum() train_hit_100 = (train.iloc[:, -1] == train_pred_100).sum() test_hit_1 = (test.iloc[:, -1] == test_pred_1).sum() test_hit_10 = (test.iloc[:, -1] == test_pred_10).sum() test_hit_100 = (test.iloc[:, -1] == test_pred_100).sum() print('Train accuracy: %5.2f%% (1 Trees), %6.2f%% (10 Trees), %6.2f%% (100 Trees)' % (100 * train_hit_1 / len(train), 100 * train_hit_10 / len(train), 100 * train_hit_100 / len(train))) print('Test accuracy: %5.2f%% (1 Trees), %6.2f%% (10 Trees), %6.2f%% (100 Trees)' % (100 * test_hit_1 / len(test), 100 * test_hit_10 / len(test), 100 * test_hit_100 / len(test))) print('Time: %.1f seconds' % (time.time() - start)) print('') class XGBoost: """XGBoost""" def __init__(self): pass def train(self, train): xgb = XGBClassifier(max_depth=3, n_estimators=100) xgb.fit(train.iloc[:, :-1], train.iloc[:, -1]) return xgb def predict(self, feature, xgb): return xgb.predict(feature) def __call__(self, train, test): # Record start time print('================XGBOOST================') start = time.time() # Train support vector machine xgb = self.train(train) # Predict train and test labels train_pred = self.predict(train.iloc[:, :-1], xgb) test_pred = self.predict(test.iloc[:, :-1], xgb) # Print results train_hit = (train.iloc[:, -1] == train_pred).sum() test_hit = (test.iloc[:, -1] == test_pred).sum() print('Train accuracy: %5.2f%% (Max Depth of 3, 100 Trees)' % (100 * train_hit / len(train))) print('Test accuracy: %5.2f%% (Max Depth of 3, 100 Trees)' % (100 * test_hit / len(test))) print('Time: %.1f seconds' % (time.time() - start)) print('') class NeuralNet: """Neural network with a single hidden layer""" def __init__(self): self.hidden = 100 self.batch = 50 def train(self, train): # Convert train dataset to tensor feature = torch.from_numpy(train.iloc[:, :-1].values).float() label = torch.from_numpy(train.iloc[:, -1].values).long() # Initialize the neural network net = nn.Sequential(nn.Linear(feature.shape[1], self.hidden), nn.ReLU(), nn.Linear(self.hidden, len(set(train.iloc[:, -1])))) # Optimizer optimizer = optim.Adam(net.parameters(), lr=0.01) criterion = nn.CrossEntropyLoss() # Train classifier for i in range(10000): permutation = np.random.choice(feature.shape[0], self.batch) optimizer.zero_grad() output = net(feature[permutation]) loss = criterion(output, label[permutation]) loss.backward() optimizer.step() return net def predict(self, feature, net): feature = torch.from_numpy(feature.values).float() return net(feature) def __call__(self, train, test): # Record start time print('============NEURAL NETWORK=============') start = time.time() # Train support vector machine net = self.train(train) # Predict train and test labels train_pred = self.predict(train.iloc[:, :-1], net) train_pred = train_pred.detach().numpy().argmax(axis=1) test_pred = self.predict(test.iloc[:, :-1], net) test_pred = test_pred.detach().numpy().argmax(axis=1) # Print results train_hit = (train.iloc[:, -1].values == train_pred).sum() test_hit = (test.iloc[:, -1].values == test_pred).sum() print('Train accuracy: %5.2f%% (Single Hidden Layer)' % (100 * train_hit / len(train))) print('Test accuracy: %5.2f%% (Single Hidden Layer)' % (100 * test_hit / len(test))) print('Time: %.1f seconds' % (time.time() - start)) print('')
py
1a45b5767fc1520184599628ff525888fec2e7c4
import csv from datetime import date, timedelta from getpass import getpass import os from bs4 import BeautifulSoup import requests LOGIN_AUTH_TOKEN_NAME = '__RequestVerificationToken' LOGIN_GET_URL = 'https://cjs.shelbycountytn.gov/CJS/Account/Login' LOGIN_POST_URL = 'https://cjs.shelbycountytn.gov/CJS/' class LoginFailed(Exception): pass def login(session): username = os.environ.get('SCCJSP_USERNAME') or input('username: ') password = os.environ.get('SCCJSP_PASSWORD') or getpass('password: ') login_get_resp = session.get(LOGIN_GET_URL) login_get_parsed = BeautifulSoup(login_get_resp.content, 'html.parser') token = login_get_parsed.find('input', {'name': LOGIN_AUTH_TOKEN_NAME}) login_data = { LOGIN_AUTH_TOKEN_NAME: token.get('value'), 'UserName': username, 'Password': password } login_post_resp = session.post(login_get_resp.url, data=login_data) login_post_parsed = BeautifulSoup(login_post_resp.content, 'html.parser') sso_data = { hidden_input.get('name'): hidden_input.get('value') for hidden_input in login_post_parsed.find_all('input', {'type': 'hidden'}) } action = login_post_parsed.find('form').get('action') if action != LOGIN_POST_URL: raise LoginFailed session.post(action, data=sso_data) def get_data(): session = requests.session() login(session) tomorrow_formatted = (date.today() + timedelta(days=1)).strftime("%m/%d/%Y") session.post("https://cjs.shelbycountytn.gov/CJS/Hearing/SearchHearings/HearingSearch", data = { "PortletName": "HearingSearch", "Settings.CaptchaEnabled": "False", "Settings.DefaultLocation": "All Locations", "SearchCriteria.SelectedCourt": "All Locations", "SearchCriteria.SelectedHearingType": "All Hearing Types", "SearchCriteria.SearchByType": "Courtroom", "SearchCriteria.SelectedCourtRoom": "1088", # Division 10 "SearchCriteria.DateFrom": tomorrow_formatted, "SearchCriteria.DateTo": tomorrow_formatted, }) resp = session.post("https://cjs.shelbycountytn.gov/CJS/Hearing/HearingResults/Read", data = { "sort": "", "group": "", "filter": "", "portletId": "27", }) return resp.json() def write_data(data): to_write = data['Data'] with open('data.csv', 'w', newline='') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=to_write[0].keys()) writer.writeheader() writer.writerows(to_write) def main(): data = get_data() write_data(data) if __name__ == "__main__": main()
py
1a45b5b10d379947c7a1c8d67b675eda6f078241
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for utils.py.""" from __future__ import annotations import base64 import copy import datetime import os import urllib from core import feconf from core import python_utils from core import utils from core.constants import constants from core.tests import test_utils from typing import Any, Dict, List class UtilsTests(test_utils.GenericTestBase): """Test the core utility methods.""" def test_get_comma_sep_string_from_list(self) -> None: """Test get_comma_sep_string_from_list method.""" alist = ['a', 'b', 'c', 'd'] results = ['', 'a', 'a and b', 'a, b and c', 'a, b, c and d'] for i in range(len(alist) + 1): comma_sep_string = utils.get_comma_sep_string_from_list(alist[:i]) self.assertEqual(comma_sep_string, results[i]) def test_to_ascii(self) -> None: """Test to_ascii method.""" parsed_str = utils.to_ascii('abc') self.assertEqual(parsed_str, 'abc') parsed_str = utils.to_ascii('¡Hola!') self.assertEqual(parsed_str, 'Hola!') parsed_str = utils.to_ascii( u'Klüft skräms inför på fédéral électoral große') self.assertEqual( parsed_str, 'Kluft skrams infor pa federal electoral groe') parsed_str = utils.to_ascii('') self.assertEqual(parsed_str, '') def test_yaml_dict_conversion(self) -> None: """Test yaml_from_dict and dict_from_yaml methods.""" test_dicts = [{}, {'a': 'b'}, {'a': 2}, {'a': ['b', 2, {'c': 3.5}]}] for adict in test_dicts: yaml_str = python_utils.yaml_from_dict(adict) # type: ignore[no-untyped-call] yaml_dict = utils.dict_from_yaml(yaml_str) self.assertEqual(adict, yaml_dict) with self.assertRaisesRegex( # type: ignore[no-untyped-call] utils.InvalidInputException, 'while parsing a flow node\n' 'expected the node content, but found \'<stream end>\'\n'): yaml_str = utils.dict_from_yaml('{') def test_recursively_remove_key_for_empty_dict(self) -> None: """Test recursively_remove_key method for an empty dict.""" d: Dict[str, Any] = {} utils.recursively_remove_key(d, 'a') self.assertEqual(d, {}) def test_recursively_remove_key_for_single_key_dict(self) -> None: """Test recursively_remove_key method for single key dict.""" d = {'a': 'b'} utils.recursively_remove_key(d, 'a') self.assertEqual(d, {}) def test_recursively_remove_key_for_multi_key_dict(self) -> None: """Test recursively_remove_key method for multi key dict.""" d = {'a': 'b', 'c': 'd'} utils.recursively_remove_key(d, 'a') self.assertEqual(d, {'c': 'd'}) def test_recursively_remove_key_for_dict_with_value_dict(self) -> None: """Test recursively_remove_key method for dict with a value dict.""" d = {'a': 'b', 'c': {'a': 'b'}} utils.recursively_remove_key(d, 'a') self.assertEqual(d, {'c': {}}) def test_recursively_remove_key_for_list(self) -> None: """Test recursively_remove_key method for list.""" l = ['a', 'b', {'c': 'd'}] utils.recursively_remove_key(l, 'c') self.assertEqual(l, ['a', 'b', {}]) def test_camelcase_to_hyphenated(self) -> None: """Test camelcase_to_hyphenated method.""" test_cases = [ ('AbcDef', 'abc-def'), ('Abc', 'abc'), ('abc_def', 'abc_def'), ('Abc012Def345', 'abc012-def345'), ('abcDef', 'abc-def'), ] for test_case in test_cases: self.assertEqual( utils.camelcase_to_hyphenated(test_case[0]), test_case[1]) def test_camelcase_to_snakecase(self) -> None: """Test camelcase_to_hyphenated method.""" test_cases = [ ('AbcDef', 'abc_def'), ('Abc', 'abc'), ('abc_def', 'abc_def'), ('Abc012Def345', 'abc012_def345'), ('abcDef', 'abc_def'), ('abc-def', 'abc-def'), ] for test_case in test_cases: self.assertEqual( utils.camelcase_to_snakecase(test_case[0]), test_case[1]) def test_set_url_query_parameter(self) -> None: """Test set_url_query_parameter method.""" self.assertEqual( utils.set_url_query_parameter('http://www.test.com', 'a', 'b'), 'http://www.test.com?a=b' ) self.assertEqual( utils.set_url_query_parameter('http://www.test.com?a=b', 'c', 'd'), 'http://www.test.com?a=b&c=d' ) self.assertEqual( utils.set_url_query_parameter( 'http://test.com?a=b', 'redirectUrl', 'http://redirect.com'), 'http://test.com?a=b&redirectUrl=http%3A%2F%2Fredirect.com' ) with self.assertRaisesRegex( # type: ignore[no-untyped-call] Exception, 'URL query parameter name must be a string' ): utils.set_url_query_parameter('http://test.com?a=b', None, 'value') # type: ignore[arg-type] def test_convert_to_hash(self) -> None: """Test convert_to_hash() method.""" orig_string = 'name_to_convert' full_hash = utils.convert_to_hash(orig_string, 28) abbreviated_hash = utils.convert_to_hash(orig_string, 5) self.assertEqual(len(full_hash), 28) self.assertEqual(len(abbreviated_hash), 5) self.assertEqual(full_hash[:5], abbreviated_hash) self.assertTrue(full_hash.isalnum()) def test_vfs_construct_path(self) -> None: """Test vfs_construct_path method.""" p = utils.vfs_construct_path('a', 'b', 'c') self.assertEqual(p, 'a/b/c') p = utils.vfs_construct_path('a/', '/b', 'c') self.assertEqual(p, '/b/c') p = utils.vfs_construct_path('a/', 'b', 'c') self.assertEqual(p, 'a/b/c') p = utils.vfs_construct_path('a', '/b', 'c') self.assertEqual(p, '/b/c') p = utils.vfs_construct_path('/a', 'b/') self.assertEqual(p, '/a/b/') def test_vfs_normpath(self) -> None: p = utils.vfs_normpath('/foo/../bar') self.assertEqual(p, '/bar') p = utils.vfs_normpath('foo//bar') self.assertEqual(p, 'foo/bar') p = utils.vfs_normpath('foo/bar/..') self.assertEqual(p, 'foo') p = utils.vfs_normpath('/foo//bar//baz//') self.assertEqual(p, '/foo/bar/baz') p = utils.vfs_normpath('') self.assertEqual(p, '.') p = utils.vfs_normpath('//foo//bar//baz//') self.assertEqual(p, '//foo/bar/baz') def test_capitalize_string(self) -> None: test_data: List[List[str]] = [ ['', ''], ['a', 'A'], ['A', 'A'], ['1', '1'], ['lowercase', 'Lowercase'], ['UPPERCASE', 'UPPERCASE'], ['Partially', 'Partially'], ['miDdle', 'MiDdle'], ['2be', '2be'], ] for datum in test_data: self.assertEqual(utils.capitalize_string(datum[0]), datum[1]) def test_generate_random_string(self) -> None: # Generate a random string of length 12. random_string = utils.generate_random_string(12) self.assertIsInstance(random_string, str) self.assertEqual(len(random_string), 12) def test_convert_png_data_url_to_binary_with_incorrect_prefix(self) -> None: with self.assertRaisesRegex( # type: ignore[no-untyped-call] Exception, 'The given string does not represent a PNG data URL' ): utils.convert_png_data_url_to_binary('data:image/jpg;base64,') def test_get_thumbnail_icon_url_for_category(self) -> None: self.assertEqual( utils.get_thumbnail_icon_url_for_category('Architecture'), '/subjects/Architecture.svg') self.assertEqual( utils.get_thumbnail_icon_url_for_category('Graph Theory'), '/subjects/GraphTheory.svg') self.assertEqual( utils.get_thumbnail_icon_url_for_category('Nonexistent'), '/subjects/Lightbulb.svg') def test_are_datetimes_close(self) -> None: initial_time = datetime.datetime(2016, 12, 1, 0, 0, 0) with self.swap(feconf, 'PROXIMAL_TIMEDELTA_SECS', 2): self.assertTrue(utils.are_datetimes_close( datetime.datetime(2016, 12, 1, 0, 0, 1), initial_time)) self.assertFalse(utils.are_datetimes_close( datetime.datetime(2016, 12, 1, 0, 0, 3), initial_time)) def test_conversion_between_string_and_naive_datetime_object(self) -> None: """Tests to make sure converting a naive datetime object to a string and back doesn't alter the naive datetime object data. """ now = datetime.datetime.utcnow() self.assertEqual( utils.convert_string_to_naive_datetime_object( utils.convert_naive_datetime_to_string(now)), now) def test_datetime_conversion_to_string_returns_correct_format(self) -> None: initial_time = datetime.datetime(2016, 12, 1, 1, 2, 3) self.assertEqual( utils.convert_naive_datetime_to_string(initial_time), '12/01/2016, 01:02:03:000000') def test_string_to_datetime_conversion_returns_correct_datetime( self ) -> None: time_string = '12/01/2016, 01:02:03:000000' initial_time = datetime.datetime(2016, 12, 1, 1, 2, 3) self.assertEqual( utils.convert_string_to_naive_datetime_object(time_string), initial_time) def test_create_string_from_largest_unit_in_timedelta_raises_for_zero_diff( self ) -> None: timedelta_object = datetime.timedelta(days=0) with self.assertRaisesRegex( # type: ignore[no-untyped-call] Exception, 'Expected a positive timedelta, received: %s.' % ( timedelta_object.total_seconds())): utils.create_string_from_largest_unit_in_timedelta(timedelta_object) def test_create_string_from_largest_unit_in_timedelta_raises_for_neg_diff( self ) -> None: timedelta_object = datetime.timedelta(days=-40) with self.assertRaisesRegex( # type: ignore[no-untyped-call] Exception, 'Expected a positive timedelta, received: %s.' % ( timedelta_object.total_seconds())): utils.create_string_from_largest_unit_in_timedelta(timedelta_object) def test_create_string_from_largest_unit_in_timedelta_returns_days( self ) -> None: timedelta_object = datetime.timedelta( days=4, hours=1, minutes=1, seconds=1) time_string = ( utils.create_string_from_largest_unit_in_timedelta(timedelta_object) ) self.assertEqual(time_string, '4 days') def test_create_string_from_largest_unit_in_timedelta_returns_a_day( self ) -> None: timedelta_object = datetime.timedelta( days=1, hours=1, minutes=1, seconds=1) time_string = ( utils.create_string_from_largest_unit_in_timedelta(timedelta_object) ) self.assertEqual(time_string, '1 day') def test_create_string_from_largest_unit_in_timedelta_returns_hours( self ) -> None: timedelta_object = datetime.timedelta( days=0, hours=2, minutes=1, seconds=1) time_string = ( utils.create_string_from_largest_unit_in_timedelta(timedelta_object) ) self.assertEqual(time_string, '2 hours') def test_create_string_from_largest_unit_in_timedelta_returns_an_hour( self ) -> None: timedelta_object = datetime.timedelta( days=0, hours=1, minutes=1, seconds=1) time_string = ( utils.create_string_from_largest_unit_in_timedelta(timedelta_object) ) self.assertEqual(time_string, '1 hour') def test_create_string_from_largest_unit_in_timedelta_returns_minutes( self ) -> None: timedelta_object = datetime.timedelta( days=0, hours=0, minutes=4, seconds=1) time_string = ( utils.create_string_from_largest_unit_in_timedelta(timedelta_object) ) self.assertEqual(time_string, '4 minutes') def test_create_string_from_largest_unit_in_timedelta_returns_a_minute( self ) -> None: timedelta_object = datetime.timedelta( days=0, hours=0, minutes=1, seconds=12) time_string = ( utils.create_string_from_largest_unit_in_timedelta(timedelta_object) ) self.assertEqual(time_string, '1 minute') def test_create_string_from_largest_unit_in_timedelta_returns_a_min_for_min( self ) -> None: timedelta_object = datetime.timedelta( days=0, hours=0, minutes=1, seconds=0) time_string = ( utils.create_string_from_largest_unit_in_timedelta(timedelta_object) ) self.assertEqual(time_string, '1 minute') def test_create_string_from_largest_unit_in_timedelta_returns_minute_if_sec( self ) -> None: timedelta_object = datetime.timedelta( days=0, hours=0, minutes=0, seconds=1) time_string = ( utils.create_string_from_largest_unit_in_timedelta(timedelta_object) ) self.assertEqual(time_string, '1 minute') def test_create_string_from_largest_unit_in_timedelta_returns_a_min_if_msec( self ) -> None: timedelta_object = datetime.timedelta( days=0, hours=0, minutes=0, seconds=0, milliseconds=1) time_string = ( utils.create_string_from_largest_unit_in_timedelta(timedelta_object) ) self.assertEqual(time_string, '1 minute') def test_get_hashable_value(self) -> None: json1 = ['foo', 'bar', {'baz': 3}] json2 = ['fee', {'fie': ['foe', 'fum']}] json1_deepcopy = copy.deepcopy(json1) json2_deepcopy = copy.deepcopy(json2) test_set = {utils.get_hashable_value(json1)} self.assertIn(utils.get_hashable_value(json1_deepcopy), test_set) test_set.add(utils.get_hashable_value(json2)) self.assertEqual( test_set, { utils.get_hashable_value(json1_deepcopy), utils.get_hashable_value(json2_deepcopy), }) def test_is_supported_audio_language_code(self) -> None: self.assertTrue(utils.is_supported_audio_language_code('hi-en')) self.assertFalse(utils.is_supported_audio_language_code('unknown')) def test_is_valid_language_code(self) -> None: self.assertTrue(utils.is_valid_language_code('en')) self.assertFalse(utils.is_valid_language_code('unknown')) def test_require_valid_name(self) -> None: name = 'name' utils.require_valid_name(name, 'name_type') invalid_name = 0 with self.assertRaisesRegex(Exception, '0 must be a string.'): # type: ignore[no-untyped-call] # Type ignore is used below because we are providing integer # argument instead of string for invalid_name for testing purposes. utils.require_valid_name(invalid_name, 'name_type') # type: ignore[arg-type] def test_require_valid_meta_tag_content(self) -> None: meta_tag_content = 'name' utils.require_valid_meta_tag_content(meta_tag_content) non_string_meta_tag_content = 0 invalid_type_error = ( 'Expected meta tag content to be a string, received 0') with self.assertRaisesRegex(Exception, invalid_type_error): # type: ignore[no-untyped-call] utils.require_valid_meta_tag_content(non_string_meta_tag_content) # type: ignore[arg-type] lengthy_meta_tag_content = 'a' * 200 max_length_error = ( 'Meta tag content should not be longer than %s characters.' % constants.MAX_CHARS_IN_META_TAG_CONTENT) with self.assertRaisesRegex(Exception, max_length_error): # type: ignore[no-untyped-call] utils.require_valid_meta_tag_content(lengthy_meta_tag_content) def test_require_valid_page_title_fragment_for_web(self) -> None: page_title_fragment_for_web = 'name' utils.require_valid_page_title_fragment_for_web( page_title_fragment_for_web) non_string_page_title_fragment_for_web = 0 invalid_type_error = ( 'Expected page title fragment to be a string, received 0') with self.assertRaisesRegex(Exception, invalid_type_error): # type: ignore[no-untyped-call] utils.require_valid_page_title_fragment_for_web( non_string_page_title_fragment_for_web) # type: ignore[arg-type] lengthy_page_title_fragment_for_web = 'a' * 60 max_length_error = ( 'Page title fragment should not be longer than %s characters.' % constants.MAX_CHARS_IN_PAGE_TITLE_FRAGMENT_FOR_WEB) with self.assertRaisesRegex(Exception, max_length_error): # type: ignore[no-untyped-call] utils.require_valid_page_title_fragment_for_web( lengthy_page_title_fragment_for_web) def test_require_valid_url_fragment(self) -> None: name = 'name' utils.require_valid_url_fragment(name, 'name-type', 20) name_with_spaces = 'name with spaces' name_with_spaces_expected_error = ( 'name-type field contains invalid characters. Only ' 'lowercase words separated by hyphens are allowed. ' 'Received name with spaces.') with self.assertRaisesRegex( # type: ignore[no-untyped-call] Exception, name_with_spaces_expected_error): utils.require_valid_url_fragment( name_with_spaces, 'name-type', 20) name_in_caps = 'NAME' name_in_caps_expected_error = ( 'name-type field contains invalid characters. Only ' 'lowercase words separated by hyphens are allowed. Received NAME.') with self.assertRaisesRegex(Exception, name_in_caps_expected_error): # type: ignore[no-untyped-call] utils.require_valid_url_fragment( name_in_caps, 'name-type', 20) name_with_numbers = 'nam3' name_with_numbers_expected_error = ( 'name-type field contains invalid characters. Only ' 'lowercase words separated by hyphens are allowed. Received nam3.') with self.assertRaisesRegex( # type: ignore[no-untyped-call] Exception, name_with_numbers_expected_error): utils.require_valid_url_fragment( name_with_numbers, 'name-type', 20) long_name = 'a-really-really-really-lengthy-name' long_name_expected_error = ( 'name-type field should not exceed 10 characters, ' 'received %s' % long_name) with self.assertRaisesRegex(Exception, long_name_expected_error): # type: ignore[no-untyped-call] utils.require_valid_url_fragment( long_name, 'name-type', 10) empty_name = '' empty_name_expected_error = 'name-type field should not be empty.' with self.assertRaisesRegex(Exception, empty_name_expected_error): # type: ignore[no-untyped-call] utils.require_valid_url_fragment(empty_name, 'name-type', 20) non_string_name = 0 non_string_name_expected_error = ( 'name-type field must be a string. Received 0.') with self.assertRaisesRegex(Exception, non_string_name_expected_error): # type: ignore[no-untyped-call] utils.require_valid_url_fragment(non_string_name, 'name-type', 20) # type: ignore[arg-type] def test_validate_convert_to_hash(self) -> None: with self.assertRaisesRegex( # type: ignore[no-untyped-call] Exception, 'Expected string, received 1 of type %s' % type(1)): utils.convert_to_hash(1, 10) # type: ignore[arg-type] def test_convert_png_to_data_url_with_non_png_image_raises_error( self ) -> None: favicon_filepath = os.path.join( self.get_static_asset_filepath(), 'assets', 'favicon.ico') # type: ignore[no-untyped-call] with self.assertRaisesRegex( # type: ignore[no-untyped-call] Exception, 'The given string does not represent a PNG image.'): utils.convert_png_to_data_url(favicon_filepath) def test_get_exploration_components_from_dir_with_invalid_path_raises_error( self ) -> None: with self.assertRaisesRegex( # type: ignore[no-untyped-call] Exception, 'Found invalid non-asset file .+' 'There should only be a single non-asset file, and it should have ' 'a .yaml suffix.' ): utils.get_exploration_components_from_dir('core/tests/load_tests') with self.assertRaisesRegex( # type: ignore[no-untyped-call] Exception, 'The only directory in . should be assets/'): utils.get_exploration_components_from_dir('.') def test_get_exploration_components_from_dir_with_multiple_yaml_files( self ) -> None: with self.assertRaisesRegex( # type: ignore[no-untyped-call] Exception, 'More than one non-asset file specified for ' 'core/tests/data/dummy_assets/assets'): utils.get_exploration_components_from_dir( 'core/tests/data/dummy_assets/assets/') def test_get_exploration_components_from_dir_with_no_yaml_file( self ) -> None: with self.assertRaisesRegex( # type: ignore[no-untyped-call] Exception, 'No yaml file specifed for core/tests/data/dummy_assets'): utils.get_exploration_components_from_dir( 'core/tests/data/dummy_assets/') def test_get_asset_dir_prefix_with_prod_mode(self) -> None: with self.swap(constants, 'DEV_MODE', False): self.assertEqual(utils.get_asset_dir_prefix(), '/build') def test_base64_from_int(self) -> None: base64_number = utils.base64_from_int(108) self.assertEqual(base64.b64decode(base64_number), b'[108]') def test_get_supported_audio_language_description_with_invalid_code( self ) -> None: valid_language_code = 'en' expected_language_description = 'English' self.assertEqual( utils.get_supported_audio_language_description(valid_language_code), expected_language_description) invalid_language_code = 'invalid_code' with self.assertRaisesRegex( # type: ignore[no-untyped-call] Exception, 'Unsupported audio language code: invalid_code'): utils.get_supported_audio_language_description( invalid_language_code) def test_is_user_id_valid(self) -> None: self.assertTrue( utils.is_user_id_valid( feconf.SYSTEM_COMMITTER_ID, allow_system_user_id=True)) self.assertTrue( utils.is_user_id_valid( feconf.MIGRATION_BOT_USER_ID, allow_system_user_id=True)) self.assertTrue( utils.is_user_id_valid( feconf.SUGGESTION_BOT_USER_ID, allow_system_user_id=True)) self.assertTrue( utils.is_user_id_valid( 'pid_%s' % ('a' * 32), allow_pseudonymous_id=True)) self.assertTrue( utils.is_user_id_valid('uid_%s' % ('a' * 32))) self.assertFalse( utils.is_user_id_valid('pid_%s' % ('a' * 32))) self.assertFalse( utils.is_user_id_valid('uid_%s%s' % ('a' * 31, 'A'))) self.assertFalse( utils.is_user_id_valid('uid_%s' % ('a' * 31))) self.assertFalse(utils.is_user_id_valid('a' * 36)) def test_is_pseudonymous_id(self) -> None: self.assertTrue(utils.is_pseudonymous_id('pid_' + 'a' * 32)) self.assertFalse(utils.is_pseudonymous_id('uid_' + 'a' * 32)) self.assertFalse(utils.is_pseudonymous_id('uid_' + 'a' * 31 + 'A')) self.assertFalse(utils.is_pseudonymous_id('uid_' + 'a' * 31)) self.assertFalse(utils.is_pseudonymous_id('a' * 36)) def test_snake_case_to_camel_case(self) -> None: camel_case_str1 = utils.snake_case_to_camel_case('user_id_number') camel_case_str2 = utils.snake_case_to_camel_case('hello_world') camel_case_str3 = utils.snake_case_to_camel_case('test1') self.assertEqual(camel_case_str1, 'userIdNumber') self.assertEqual(camel_case_str2, 'helloWorld') self.assertEqual(camel_case_str3, 'test1') def _assert_valid_thumbnail_filename( self, expected_error_substring: str, thumbnail_filename: str ) -> None: """Helper method for test_require_valid_thumbnail_filename.""" with self.assertRaisesRegex( # type: ignore[no-untyped-call] utils.ValidationError, expected_error_substring): utils.require_valid_thumbnail_filename( thumbnail_filename) def test_require_valid_thumbnail_filename(self) -> None: """Test thumbnail filename validation.""" self._assert_valid_thumbnail_filename( 'Expected thumbnail filename to be a string, received 10', 10) # type: ignore[arg-type] self._assert_valid_thumbnail_filename( 'Thumbnail filename should not start with a dot.', '.name') self._assert_valid_thumbnail_filename( 'Thumbnail filename should not include slashes or ' 'consecutive dot characters.', 'file/name') self._assert_valid_thumbnail_filename( 'Thumbnail filename should not include slashes or ' 'consecutive dot characters.', 'file..name') self._assert_valid_thumbnail_filename( 'Thumbnail filename should include an extension.', 'name') self._assert_valid_thumbnail_filename( 'Expected a filename ending in svg, received name.jpg', 'name.jpg') filename = 'filename.svg' utils.require_valid_thumbnail_filename(filename) def _assert_valid_image_filename( self, expected_error_substring: str, image_filename: str ) -> None: """Helper method for test_require_valid_image_filename.""" with self.assertRaisesRegex( # type: ignore[no-untyped-call] utils.ValidationError, expected_error_substring): utils.require_valid_image_filename(image_filename) def test_require_valid_image_filename(self) -> None: """Test image filename validation.""" self._assert_valid_image_filename( 'Expected image filename to be a string, received 10', 10) # type: ignore[arg-type] self._assert_valid_image_filename( 'Image filename should not start with a dot.', '.name') self._assert_valid_image_filename( 'Image filename should not include slashes or ' 'consecutive dot characters.', 'file/name') self._assert_valid_image_filename( 'Image filename should not include slashes or ' 'consecutive dot characters.', 'file..name') self._assert_valid_image_filename( 'Image filename should include an extension.', 'name') filename = 'filename.svg' utils.require_valid_image_filename(filename) def test_get_time_in_millisecs(self) -> None: dt = datetime.datetime(2020, 6, 15) msecs = utils.get_time_in_millisecs(dt) self.assertEqual( dt, datetime.datetime.fromtimestamp( python_utils.divide(msecs, 1000.0))) # type: ignore[no-untyped-call] def test_get_time_in_millisecs_with_complicated_time(self) -> None: dt = datetime.datetime(2020, 6, 15, 5, 18, 23, microsecond=123456) msecs = utils.get_time_in_millisecs(dt) self.assertEqual( dt, datetime.datetime.fromtimestamp( python_utils.divide(msecs, 1000.0))) # type: ignore[no-untyped-call] def test_grouper(self) -> None: self.assertEqual( [list(g) for g in utils.grouper(range(7), 3)], [[0, 1, 2], [3, 4, 5], [6, None, None]]) # Returns an iterable of iterables, so we need to combine them into # strings for easier comparison. self.assertEqual( [''.join(g) for g in utils.grouper('ABCDEFG', 3, fillvalue='x')], ['ABC', 'DEF', 'Gxx']) def test_partition(self) -> None: is_even = lambda n: (n % 2) == 0 evens, odds = ( utils.partition([10, 8, 1, 5, 6, 4, 3, 7], predicate=is_even)) self.assertEqual(list(evens), [10, 8, 6, 4]) self.assertEqual(list(odds), [1, 5, 3, 7]) def test_enumerated_partition(self) -> None: logs = ['ERROR: foo', 'INFO: bar', 'INFO: fee', 'ERROR: fie'] is_error = lambda msg: msg.startswith('ERROR: ') errors, others = ( utils.partition(logs, predicate=is_error, enumerated=True)) self.assertEqual(list(errors), [(0, 'ERROR: foo'), (3, 'ERROR: fie')]) self.assertEqual(list(others), [(1, 'INFO: bar'), (2, 'INFO: fee')]) def test_convert_png_data_url_to_binary(self) -> None: image_data_url = '%s%s' % ( utils.PNG_DATA_URL_PREFIX, urllib.parse.quote(base64.b64encode(b'test123')) ) self.assertEqual( utils.convert_png_data_url_to_binary(image_data_url), b'test123') def test_convert_png_data_url_to_binary_raises_if_prefix_is_missing( self ) -> None: image_data_url = urllib.parse.quote(base64.b64encode(b'test123')) self.assertRaisesRegex( # type: ignore[no-untyped-call] Exception, 'The given string does not represent a PNG data URL.', lambda: utils.convert_png_data_url_to_binary(image_data_url)) def test_quoted_string(self) -> None: self.assertEqual(utils.quoted('a"b\'c'), '"a\\"b\'c"') def test_is_base64_encoded(self) -> None: image = '<svg><path d="%s" /></svg>' % ( 'M150 0 L75 200 L225 200 Z ' * 1000) self.assertFalse(utils.is_base64_encoded(image)) self.assertFalse(utils.is_base64_encoded('hello')) self.assertTrue(utils.is_base64_encoded( base64.b64encode(b'hello').decode('utf-8')) ) def test_url_open(self) -> None: response = utils.url_open('http://www.google.com') self.assertEqual(response.getcode(), 200) # type: ignore[attr-defined] self.assertEqual( response.url, 'http://www.google.com') # type: ignore[attr-defined]
py
1a45b6b21a12dbd0c95cb2b22226c7ab8acc0730
import tensorflow as tf _CSV_COLUMNS = [ 'age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'gender', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income_bracket' ] _CSV_COLUMN_DEFAULTS = [[0], [''], [0], [''], [0], [''], [''], [''], [''], [''], [0], [0], [0], [''], ['']] _NUM_EXAMPLES = { 'train': 32561, 'validation': 16281, } # 1. Read the Census Data # 2. Converting Data into Tensors def input_fn(data_file, num_epochs, shuffle, batch_size): """为Estimator创建一个input function""" assert tf.gfile.Exists(data_file), "{0} not found.".format(data_file) def parse_csv(line): print("Parsing", data_file) # tf.decode_csv会把csv文件转换成很a list of Tensor,一列一个。record_defaults用于指明每一列的缺失值用什么填充 columns = tf.decode_csv(line, record_defaults=_CSV_COLUMN_DEFAULTS) features = dict(zip(_CSV_COLUMNS, columns)) labels = features.pop('income_bracket') return features, tf.equal(labels, '>50K') # tf.equal(x, y) 返回一个bool类型Tensor, 表示x == y, element-wise dataset = tf.data.TextLineDataset(data_file) \ .map(parse_csv, num_parallel_calls=5) if shuffle: dataset = dataset.shuffle(buffer_size=_NUM_EXAMPLES['train'] + _NUM_EXAMPLES['validation']) dataset = dataset.repeat(num_epochs) dataset = dataset.batch(batch_size) iterator = dataset.make_one_shot_iterator() batch_features, batch_labels = iterator.get_next() return batch_features, batch_labels # 3. Select and Engineer Features for Model ## 3.1 Base Categorical Feature Columns # 如果我们知道所有的取值,并且取值不是很多 relationship = tf.feature_column.categorical_column_with_vocabulary_list( 'relationship', [ 'Husband', 'Not-in-family', 'Wife', 'Own-child', 'Unmarried', 'Other-relative' ] ) # 如果不知道有多少取值 occupation = tf.feature_column.categorical_column_with_hash_bucket( 'occupation', hash_bucket_size=1000 ) education = tf.feature_column.categorical_column_with_vocabulary_list( 'education', [ 'Bachelors', 'HS-grad', '11th', 'Masters', '9th', 'Some-college', 'Assoc-acdm', 'Assoc-voc', '7th-8th', 'Doctorate', 'Prof-school', '5th-6th', '10th', '1st-4th', 'Preschool', '12th' ] ) marital_status = tf.feature_column.categorical_column_with_vocabulary_list( 'marital_status', [ 'Married-civ-spouse', 'Divorced', 'Married-spouse-absent', 'Never-married', 'Separated', 'Married-AF-spouse', 'Widowed'] ) workclass = tf.feature_column.categorical_column_with_vocabulary_list( 'workclass', [ 'Self-emp-not-inc', 'Private', 'State-gov', 'Federal-gov', 'Local-gov', '?', 'Self-emp-inc', 'Without-pay', 'Never-worked']) # 3.2 Base Continuous Feature Columns age = tf.feature_column.numeric_column('age') education_num = tf.feature_column.numeric_column('education_num') capital_gain = tf.feature_column.numeric_column('capital_gain') capital_loss = tf.feature_column.numeric_column('capital_loss') hours_per_week = tf.feature_column.numeric_column('hours_per_week') #Sometimes the relationship between a continuous feature and the label is not linear. As a hypothetical example, a person's income may grow with age in the early stage of one's career, then the growth may slow at some point, and finally the income decreases after retirement. In this scenario, using the raw age as a real-valued feature column might not be a good choice because the model can only learn one of the three cases: # 3.2.1 连续特征离散化 # 之所以这么做是因为:有些时候连续特征和label之间不是线性的关系。可能刚开始是正的线性关系,后面又变成了负的线性关系,这样一个折线的关系整体来看就不再是线性关系。 # bucketization 装桶 # 10个边界,11个桶 age_buckets = tf.feature_column.bucketized_column( age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) # 3.3 组合特征/交叉特征 education_x_occupation = tf.feature_column.crossed_column( ['education', 'occupation'], hash_bucket_size=1000) age_buckets_x_education_x_occupation = tf.feature_column.crossed_column( [age_buckets, 'education', 'occupation'], hash_bucket_size=1000 ) # 4. 模型 """ 之前的特征: 1. CategoricalColumn 2. NumericalColumn 3. BucketizedColumn 4. CrossedColumn 这些特征都是FeatureColumn的子类,可以放到一起 """ base_columns = [ education, marital_status, relationship, workclass, occupation, age_buckets, ] crossed_column = [ tf.feature_column.crossed_column( ['education', 'occupation'], hash_bucket_size=1000 ), tf.feature_column.crossed_column( [age_buckets, 'education', 'occupation'], hash_bucket_size=1000 ) ] model_dir = "./model/wide_component" model = tf.estimator.LinearClassifier( model_dir=model_dir, feature_columns=base_columns + crossed_column ) train_file = './data/adult.data' val_file = './data/adult.data' test_file = './data/adult.test' # 5. Train & Evaluate & Predict model.train(input_fn=lambda: input_fn(data_file=train_file, num_epochs=1, shuffle=True, batch_size=512)) results = model.evaluate(input_fn=lambda: input_fn(val_file, 1, False, 512)) for key in sorted(results): print("{0:20}: {1:.4f}".format(key, results[key])) pred_iter = model.predict(input_fn=lambda: input_fn(test_file, 1, False, 1)) for pred in pred_iter: print(pred) break #太多了,只打印一条 test_results = model.evaluate(input_fn=lambda: input_fn(test_file, 1, False, 512)) for key in sorted(test_results): print("{0:20}: {1:.4f}".format(key, test_results[key])) # 6. 正则化 model = tf.estimator.LinearClassifier( feature_columns=base_columns + crossed_column, model_dir=model_dir, optimizer=tf.train.FtrlOptimizer( learning_rate=0.1, l1_regularization_strength=1.0, l2_regularization_strength=1.0 ) ) # if __name__ == '__main__': # print(tf.VERSION) # data_file = './data/adult.data' # next_batch = input_fn(data_file, num_epochs=1, shuffle=True, batch_size=5) # with tf.Session() as sess: # first_batch = sess.run(next_batch) # print(first_batch[0]) # print(first_batch[1])
py
1a45b74174aa85a0a2910f8556ee1fbdaaa9e805
""" ========================================== Statistical functions (:mod:`scipy.stats`) ========================================== .. module:: scipy.stats This module contains a large number of probability distributions as well as a growing library of statistical functions. Each included distribution is an instance of the class rv_continous: For each given name the following methods are available: .. autosummary:: :toctree: generated/ rv_continuous rv_continuous.pdf rv_continuous.logpdf rv_continuous.cdf rv_continuous.logcdf rv_continuous.sf rv_continuous.logsf rv_continuous.ppf rv_continuous.isf rv_continuous.moment rv_continuous.stats rv_continuous.entropy rv_continuous.fit rv_continuous.expect Calling the instance as a function returns a frozen pdf whose shape, location, and scale parameters are fixed. Similarly, each discrete distribution is an instance of the class rv_discrete: .. autosummary:: :toctree: generated/ rv_discrete rv_discrete.rvs rv_discrete.pmf rv_discrete.logpmf rv_discrete.cdf rv_discrete.logcdf rv_discrete.sf rv_discrete.logsf rv_discrete.ppf rv_discrete.isf rv_discrete.stats rv_discrete.moment rv_discrete.entropy rv_discrete.expect Continuous distributions ======================== .. autosummary:: :toctree: generated/ alpha -- Alpha anglit -- Anglit arcsine -- Arcsine beta -- Beta betaprime -- Beta Prime bradford -- Bradford burr -- Burr cauchy -- Cauchy chi -- Chi chi2 -- Chi-squared cosine -- Cosine dgamma -- Double Gamma dweibull -- Double Weibull erlang -- Erlang expon -- Exponential exponweib -- Exponentiated Weibull exponpow -- Exponential Power f -- F (Snecdor F) fatiguelife -- Fatigue Life (Birnbaum-Sanders) fisk -- Fisk foldcauchy -- Folded Cauchy foldnorm -- Folded Normal frechet_r -- Frechet Right Sided, Extreme Value Type II (Extreme LB) or weibull_min frechet_l -- Frechet Left Sided, Weibull_max genlogistic -- Generalized Logistic genpareto -- Generalized Pareto genexpon -- Generalized Exponential genextreme -- Generalized Extreme Value gausshyper -- Gauss Hypergeometric gamma -- Gamma gengamma -- Generalized gamma genhalflogistic -- Generalized Half Logistic gilbrat -- Gilbrat gompertz -- Gompertz (Truncated Gumbel) gumbel_r -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I gumbel_l -- Left Sided Gumbel, etc. halfcauchy -- Half Cauchy halflogistic -- Half Logistic halfnorm -- Half Normal hypsecant -- Hyperbolic Secant invgamma -- Inverse Gamma invgauss -- Inverse Gaussian invweibull -- Inverse Weibull johnsonsb -- Johnson SB johnsonsu -- Johnson SU ksone -- Kolmogorov-Smirnov one-sided (no stats) kstwobign -- Kolmogorov-Smirnov two-sided test for Large N (no stats) laplace -- Laplace logistic -- Logistic loggamma -- Log-Gamma loglaplace -- Log-Laplace (Log Double Exponential) lognorm -- Log-Normal lomax -- Lomax (Pareto of the second kind) maxwell -- Maxwell mielke -- Mielke's Beta-Kappa nakagami -- Nakagami ncx2 -- Non-central chi-squared ncf -- Non-central F nct -- Non-central Student's T norm -- Normal (Gaussian) pareto -- Pareto pearson3 -- Pearson type III powerlaw -- Power-function powerlognorm -- Power log normal powernorm -- Power normal rdist -- R-distribution reciprocal -- Reciprocal rayleigh -- Rayleigh rice -- Rice recipinvgauss -- Reciprocal Inverse Gaussian semicircular -- Semicircular t -- Student's T triang -- Triangular truncexpon -- Truncated Exponential truncnorm -- Truncated Normal tukeylambda -- Tukey-Lambda uniform -- Uniform vonmises -- Von-Mises (Circular) wald -- Wald weibull_min -- Minimum Weibull (see Frechet) weibull_max -- Maximum Weibull (see Frechet) wrapcauchy -- Wrapped Cauchy Multivariate distributions ========================== .. autosummary:: :toctree: generated/ multivariate_normal -- Multivariate normal distribution Discrete distributions ====================== .. autosummary:: :toctree: generated/ bernoulli -- Bernoulli binom -- Binomial boltzmann -- Boltzmann (Truncated Discrete Exponential) dlaplace -- Discrete Laplacian geom -- Geometric hypergeom -- Hypergeometric logser -- Logarithmic (Log-Series, Series) nbinom -- Negative Binomial planck -- Planck (Discrete Exponential) poisson -- Poisson randint -- Discrete Uniform skellam -- Skellam zipf -- Zipf Statistical functions ===================== Several of these functions have a similar version in scipy.stats.mstats which work for masked arrays. .. autosummary:: :toctree: generated/ describe -- Descriptive statistics gmean -- Geometric mean hmean -- Harmonic mean kurtosis -- Fisher or Pearson kurtosis kurtosistest -- mode -- Modal value moment -- Central moment normaltest -- skew -- Skewness skewtest -- tmean -- Truncated arithmetic mean tvar -- Truncated variance tmin -- tmax -- tstd -- tsem -- nanmean -- Mean, ignoring NaN values nanstd -- Standard deviation, ignoring NaN values nanmedian -- Median, ignoring NaN values variation -- Coefficient of variation .. autosummary:: :toctree: generated/ cumfreq _ histogram2 _ histogram _ itemfreq _ percentileofscore _ scoreatpercentile _ relfreq _ .. autosummary:: :toctree: generated/ binned_statistic -- Compute a binned statistic for a set of data. binned_statistic_2d -- Compute a 2-D binned statistic for a set of data. binned_statistic_dd -- Compute a d-D binned statistic for a set of data. .. autosummary:: :toctree: generated/ obrientransform signaltonoise bayes_mvs sem zmap zscore .. autosummary:: :toctree: generated/ threshold trimboth trim1 .. autosummary:: :toctree: generated/ f_oneway pearsonr spearmanr pointbiserialr kendalltau linregress .. autosummary:: :toctree: generated/ ttest_1samp ttest_ind ttest_rel kstest chisquare power_divergence ks_2samp mannwhitneyu tiecorrect rankdata ranksums wilcoxon kruskal friedmanchisquare .. autosummary:: :toctree: generated/ ansari bartlett levene shapiro anderson binom_test fligner mood Contingency table functions =========================== .. autosummary:: :toctree: generated/ chi2_contingency contingency.expected_freq contingency.margins fisher_exact Plot-tests ========== .. autosummary:: :toctree: generated/ ppcc_max ppcc_plot probplot Masked statistics functions =========================== .. toctree:: stats.mstats Univariate and multivariate kernel density estimation (:mod:`scipy.stats.kde`) ============================================================================== .. autosummary:: :toctree: generated/ gaussian_kde For many more stat related functions install the software R and the interface package rpy. """ from __future__ import division, print_function, absolute_import from .stats import * from .distributions import * from .rv import * from .morestats import * from ._binned_statistic import * from .kde import gaussian_kde from . import mstats from .contingency import chi2_contingency from ._multivariate import * #remove vonmises_cython from __all__, I don't know why it is included __all__ = [s for s in dir() if not (s.startswith('_') or s.endswith('cython'))] from numpy.testing import Tester test = Tester().test
py
1a45b788c6711dc642843e5431c68383755a85ed
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum, auto from model_navigator.core import Container from model_navigator.framework import Framework, PyTorch, TensorFlow1, TensorFlow2 class TRITON_LOAD_MODE(Enum): POLL_ONCE = auto() POLL_PERIODICALLY = auto() EXPLICIT = auto() NONE = auto() class TritonServer(Container): image = "nvcr.io/nvidia/tritonserver" tag = "py3" @staticmethod def library_path(framework: Framework): paths = { PyTorch.name: "/opt/tritonserver/lib/pytorch", TensorFlow1.name: "/opt/tritonserver/lib/tensorflow", TensorFlow2.name: "/opt/tritonserver/lib/tensorflow", } return paths[framework.name] @staticmethod def command( framework: Framework, repository_path: str, verbose: bool = False, strict_mode: bool = False, load_mode: TRITON_LOAD_MODE = TRITON_LOAD_MODE.EXPLICIT, metrics: bool = False, ): triton_command = f"tritonserver --model-store={repository_path}" if load_mode in [TRITON_LOAD_MODE.POLL_ONCE, TRITON_LOAD_MODE.POLL_PERIODICALLY]: triton_command += " --model-control-mode=poll" if load_mode == TRITON_LOAD_MODE.POLL_PERIODICALLY: triton_command += " --repository- poll-secs=5" if load_mode == TRITON_LOAD_MODE.EXPLICIT: triton_command += " --model-control-mode=explicit" if verbose: triton_command += " --log-verbose=1" if not strict_mode: triton_command += " --strict-model-config=false" if not metrics: triton_command += " --allow-metrics=false --allow-gpu-metrics=false" if isinstance(framework, (TensorFlow1, TensorFlow2)): version = 1 if framework == TensorFlow1 else 2 triton_command += f" --backend-config=tensorflow,version={version}" return triton_command @staticmethod def api_method(key): methods = { "livenessProbe": "/v2/health/live", "readinessProbe": "/v2/health/ready", } return methods[key]
py
1a45b8dd5b619b740809b86a3bb04f66c028143a
from tree_common import TreeNode, insert, inOrderTraversal from typing import List from ...fundamentals.linked_lists.single_list import Node as LinkedNode """ Use Created linked lists from a tree """ def tree_to_linked_lists(root:TreeNode) -> List[LinkedNode]: result = [LinkedNode] level_to_linked_list(root, result, 0) return result def level_to_linked_list(root:TreeNode, result:List[LinkedNode], level:int): if not root: return if len(result) == level: result.append([LinkedNode(root)]) else: singly_node = result[level] while singly_node.n: singly_node = singly_node.n singly_node.next = LinkedNode(root) level_to_linked_list(root.left, result, level + 1) level_to_linked_list(root.right, result, level + 1) def bst(root): if not root: return [] result = [LinkedNode] singly_node = LinkedNode(root) while singly_node: result.append(singly_node) parents = singly_node prehead = LinkedNode(-1) singly_node = prehead while parents: if parents.data.left: singly_node.next = LinkedNode(parents.data.left) singly_node = singly_node.next if parents.data.right: singly_node.next = LinkedNode(parents.data.right) singly_node = singly_node.next singly_node = prehead.next return result ### not tested if __name__ == "__main__": root = insert(None, 4) n2 = insert(root, 2) n1 = insert(root, 1) n3 = insert(root, 3) n8 = insert(root, 8) n11 = insert(root, 11) n7= insert(root, 7) tree_to_linked_lists(root)
py
1a45b96dcb7e2bb31a8dc13627980e835ce56599
# -*- coding: utf-8 -*- ''' Manage RabbitMQ Plugins ======================= .. versionadded:: 2014.1.0 Example: .. code-block:: yaml some_plugin: rabbitmq_plugin.enabled: [] ''' # Import Python Libs from __future__ import absolute_import, unicode_literals, print_function import logging # Import Salt Libs from salt.exceptions import CommandExecutionError log = logging.getLogger(__name__) def __virtual__(): ''' Only load if RabbitMQ is installed. ''' if __salt__['cmd.has_exec']('rabbitmqctl'): return True return False def enabled(name, runas=None): ''' Ensure the RabbitMQ plugin is enabled. name The name of the plugin runas The user to run the rabbitmq-plugin command as ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} try: plugin_enabled = __salt__['rabbitmq.plugin_is_enabled'](name, runas=runas) except CommandExecutionError as err: ret['result'] = False ret['comment'] = 'Error: {0}'.format(err) return ret if plugin_enabled: ret['comment'] = 'Plugin \'{0}\' is already enabled.'.format(name) return ret if not __opts__['test']: try: __salt__['rabbitmq.enable_plugin'](name, runas=runas) except CommandExecutionError as err: ret['result'] = False ret['comment'] = 'Error: {0}'.format(err) return ret ret['changes'].update({'old': '', 'new': name}) if __opts__['test'] and ret['changes']: ret['result'] = None ret['comment'] = 'Plugin \'{0}\' is set to be enabled.'.format(name) return ret ret['comment'] = 'Plugin \'{0}\' was enabled.'.format(name) return ret def disabled(name, runas=None): ''' Ensure the RabbitMQ plugin is disabled. name The name of the plugin runas The user to run the rabbitmq-plugin command as ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} try: plugin_enabled = __salt__['rabbitmq.plugin_is_enabled'](name, runas=runas) except CommandExecutionError as err: ret['result'] = False ret['comment'] = 'Error: {0}'.format(err) return ret if not plugin_enabled: ret['comment'] = 'Plugin \'{0}\' is already disabled.'.format(name) return ret if not __opts__['test']: try: __salt__['rabbitmq.disable_plugin'](name, runas=runas) except CommandExecutionError as err: ret['result'] = False ret['comment'] = 'Error: {0}'.format(err) return ret ret['changes'].update({'old': name, 'new': ''}) if __opts__['test'] and ret['changes']: ret['result'] = None ret['comment'] = 'Plugin \'{0}\' is set to be disabled.'.format(name) return ret ret['comment'] = 'Plugin \'{0}\' was disabled.'.format(name) return ret
py
1a45ba4ac6d45164b900d0d5ea713a50803881b4
# -*- coding: utf-8 -*- # Scrapy settings for weibo_m project # # For simplicity, this file contains only settings considered important or # commonly used. You can find more settings consulting the documentation: # # https://doc.scrapy.org/en/latest/topics/settings.html # https://doc.scrapy.org/en/latest/topics/downloader-middleware.html # https://doc.scrapy.org/en/latest/topics/spider-middleware.html BOT_NAME = 'weibo_m' SPIDER_MODULES = ['weibo_m.spiders'] NEWSPIDER_MODULE = 'weibo_m.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent #USER_AGENT = 'weibo_m (+http://www.yourdomain.com)' # Obey robots.txt rules ROBOTSTXT_OBEY = False # Configure maximum concurrent requests performed by Scrapy (default: 16) #CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0) # See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs DOWNLOAD_DELAY = 3 # The download delay setting will honor only one of: #CONCURRENT_REQUESTS_PER_DOMAIN = 16 #CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default) #COOKIES_ENABLED = False # Disable Telnet Console (enabled by default) #TELNETCONSOLE_ENABLED = False # Override the default request headers: DEFAULT_REQUEST_HEADERS = { 'Accept': 'application/json, text/plain, */*', 'Accept-Encoding': 'gzip, deflate, sdch', 'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6,ja;q=0.4,zh-TW;q=0.2,mt;q=0.2', 'Connection': 'keep-alive', 'Host': 'm.weibo.cn', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36', 'X-Requested-With': 'XMLHttpRequest', } # Enable or disable spider middlewares # See https://doc.scrapy.org/en/latest/topics/spider-middleware.html #SPIDER_MIDDLEWARES = { # 'weibo_m.middlewares.WeiboMSpiderMiddleware': 543, #} # Enable or disable downloader middlewares # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html # DOWNLOADER_MIDDLEWARES = { # 'weibo_m.middlewares.ProxyMiddleware': 543, # 'weibo_m.middlewares.CookiesMiddleware': 544, # } # Enable or disable extensions # See https://doc.scrapy.org/en/latest/topics/extensions.html #EXTENSIONS = { # 'scrapy.extensions.telnet.TelnetConsole': None, #} # Configure item pipelines # See https://doc.scrapy.org/en/latest/topics/item-pipeline.html ITEM_PIPELINES = { 'weibo_m.pipelines.TimePipeline': 300, 'weibo_m.pipelines.WeiboPipeline': 301, 'weibo_m.pipelines.MongoPipeline': 302, } # Enable and configure the AutoThrottle extension (disabled by default) # See https://doc.scrapy.org/en/latest/topics/autothrottle.html #AUTOTHROTTLE_ENABLED = True # The initial download delay #AUTOTHROTTLE_START_DELAY = 5 # The maximum download delay to be set in case of high latencies #AUTOTHROTTLE_MAX_DELAY = 60 # The average number of requests Scrapy should be sending in parallel to # each remote server #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 # Enable showing throttling stats for every response received: #AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default) # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #HTTPCACHE_ENABLED = True #HTTPCACHE_EXPIRATION_SECS = 0 #HTTPCACHE_DIR = 'httpcache' #HTTPCACHE_IGNORE_HTTP_CODES = [] #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' MONGO_URI = '10.255.1.175' MONGO_DB = 'weibo_1300419694' REDIS_URL = 'redis://:[email protected]:6379/3' START_USER = ['1300419694']
py
1a45bb0bfa8e5aa6a9a9c3686b9c1a3280d7d307
"""URLs config.""" # Django from django.urls import path from posts import views urlpatterns = [ path('', views.list_posts, name='home'), path('create/', views.create_post, name='create'), ]
py
1a45bb7e2278da2c02e89f66b6b0741ed7723b64
#!/usr/bin/env python # Copyright (c) 2014 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or https://www.opensource.org/licenses/mit-license.php. # # Test RPC calls related to blockchain state. Tests correspond to code in # rpc/blockchain.cpp. # import sys; assert sys.version_info < (3,), ur"This script does not run under Python 3. Please use Python 2.7.x." import decimal from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( initialize_chain, assert_equal, start_nodes, connect_nodes_bi, ) class BlockchainTest(BitcoinTestFramework): """ Test blockchain-related RPC calls: - gettxoutsetinfo """ def setup_chain(self): print("Initializing test directory " + self.options.tmpdir) initialize_chain(self.options.tmpdir) def setup_network(self, split=False): self.nodes = start_nodes(2, self.options.tmpdir) connect_nodes_bi(self.nodes, 0, 1) self.is_network_split = False self.sync_all() def run_test(self): node = self.nodes[0] res = node.gettxoutsetinfo() assert_equal(res[u'total_amount'], decimal.Decimal('2181.25000000')) # 150*12.5 + 49*6.25 assert_equal(res[u'transactions'], 200) assert_equal(res[u'height'], 200) assert_equal(res[u'txouts'], 349) # 150*2 + 49 assert_equal(res[u'bytes_serialized'], 14951), # 32*199 + 48*90 + 49*60 + 27*49 assert_equal(len(res[u'bestblock']), 64) assert_equal(len(res[u'hash_serialized']), 64) if __name__ == '__main__': BlockchainTest().main()
py
1a45bb99412a142749dff4e4d9e3755c64174f6b
test = { 'name': 'q1_13', 'points': 1, 'suites': [ { 'cases': [ { 'code': r""" >>> 5 < rmse_example < 10 True """, 'hidden': False, 'locked': False } ], 'scored': True, 'setup': '', 'teardown': '', 'type': 'doctest' } ] }
py
1a45bca210e48ec0b35baa980606b95ab2ce6549
# Copyright (c) 2016-2017, Neil Booth # Copyright (c) 2017, the ElectrumX authors # # All rights reserved. # # The MIT License (MIT) # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. '''Module providing coin abstraction. Anything coin-specific should go in this file and be subclassed where necessary for appropriate handling. ''' from collections import namedtuple import re import struct from decimal import Decimal from hashlib import sha256 from functools import partial import base64 import electrumx.lib.util as util from electrumx.lib.hash import Base58, hash160, double_sha256, hash_to_hex_str from electrumx.lib.hash import HASHX_LEN from electrumx.lib.script import ScriptPubKey, OpCodes import electrumx.lib.tx as lib_tx import electrumx.server.block_processor as block_proc import electrumx.server.daemon as daemon from electrumx.server.session import ElectrumX, DashElectrumX Block = namedtuple("Block", "raw header transactions") OP_RETURN = OpCodes.OP_RETURN class CoinError(Exception): '''Exception raised for coin-related errors.''' class Coin(object): '''Base class of coin hierarchy.''' REORG_LIMIT = 200 # Not sure if these are coin-specific RPC_URL_REGEX = re.compile('.+@(\\[[0-9a-fA-F:]+\\]|[^:]+)(:[0-9]+)?') VALUE_PER_COIN = 100000000 CHUNK_SIZE = 2016 BASIC_HEADER_SIZE = 80 STATIC_BLOCK_HEADERS = True SESSIONCLS = ElectrumX DESERIALIZER = lib_tx.Deserializer DAEMON = daemon.Daemon BLOCK_PROCESSOR = block_proc.BlockProcessor HEADER_VALUES = ('version', 'prev_block_hash', 'merkle_root', 'timestamp', 'bits', 'nonce') HEADER_UNPACK = struct.Struct('< I 32s 32s I I I').unpack_from MEMPOOL_HISTOGRAM_REFRESH_SECS = 500 XPUB_VERBYTES = bytes('????', 'utf-8') XPRV_VERBYTES = bytes('????', 'utf-8') ENCODE_CHECK = Base58.encode_check DECODE_CHECK = Base58.decode_check # Peer discovery PEER_DEFAULT_PORTS = {'t': '50001', 's': '50002'} PEERS = [] @classmethod def lookup_coin_class(cls, name, net): '''Return a coin class given name and network. Raise an exception if unrecognised.''' req_attrs = ['TX_COUNT', 'TX_COUNT_HEIGHT', 'TX_PER_BLOCK'] for coin in util.subclasses(Coin): if (coin.NAME.lower() == name.lower() and coin.NET.lower() == net.lower()): coin_req_attrs = req_attrs.copy() missing = [attr for attr in coin_req_attrs if not hasattr(coin, attr)] if missing: raise CoinError('coin {} missing {} attributes' .format(name, missing)) return coin raise CoinError('unknown coin {} and network {} combination' .format(name, net)) @classmethod def sanitize_url(cls, url): # Remove surrounding ws and trailing /s url = url.strip().rstrip('/') match = cls.RPC_URL_REGEX.match(url) if not match: raise CoinError('invalid daemon URL: "{}"'.format(url)) if match.groups()[1] is None: url += ':{:d}'.format(cls.RPC_PORT) if not url.startswith('http://') and not url.startswith('https://'): url = 'http://' + url return url + '/' @classmethod def genesis_block(cls, block): '''Check the Genesis block is the right one for this coin. Return the block less its unspendable coinbase. ''' header = cls.block_header(block, 0) header_hex_hash = hash_to_hex_str(cls.header_hash(header)) if header_hex_hash != cls.GENESIS_HASH: raise CoinError('genesis block has hash {} expected {}' .format(header_hex_hash, cls.GENESIS_HASH)) return header + bytes(1) @classmethod def hashX_from_script(cls, script): '''Returns a hashX from a script, or None if the script is provably unspendable so the output can be dropped. ''' if script and script[0] == OP_RETURN: return None return sha256(script).digest()[:HASHX_LEN] @staticmethod def lookup_xverbytes(verbytes): '''Return a (is_xpub, coin_class) pair given xpub/xprv verbytes.''' # Order means BTC testnet will override NMC testnet for coin in util.subclasses(Coin): if verbytes == coin.XPUB_VERBYTES: return True, coin if verbytes == coin.XPRV_VERBYTES: return False, coin raise CoinError('version bytes unrecognised') @classmethod def address_to_hashX(cls, address): '''Return a hashX given a coin address.''' return cls.hashX_from_script(cls.pay_to_address_script(address)) @classmethod def P2PKH_address_from_hash160(cls, hash160): '''Return a P2PKH address given a public key.''' assert len(hash160) == 20 return cls.ENCODE_CHECK(cls.P2PKH_VERBYTE + hash160) @classmethod def P2PKH_address_from_pubkey(cls, pubkey): '''Return a coin address given a public key.''' return cls.P2PKH_address_from_hash160(hash160(pubkey)) @classmethod def P2SH_address_from_hash160(cls, hash160): '''Return a coin address given a hash160.''' assert len(hash160) == 20 return cls.ENCODE_CHECK(cls.P2SH_VERBYTES[0] + hash160) @classmethod def hash160_to_P2PKH_script(cls, hash160): return ScriptPubKey.P2PKH_script(hash160) @classmethod def hash160_to_P2PKH_hashX(cls, hash160): return cls.hashX_from_script(cls.hash160_to_P2PKH_script(hash160)) @classmethod def pay_to_address_script(cls, address): '''Return a pubkey script that pays to a pubkey hash. Pass the address (either P2PKH or P2SH) in base58 form. ''' raw = cls.DECODE_CHECK(address) # Require version byte(s) plus hash160. verbyte = -1 verlen = len(raw) - 20 if verlen > 0: verbyte, hash160 = raw[:verlen], raw[verlen:] if verbyte == cls.P2PKH_VERBYTE: return cls.hash160_to_P2PKH_script(hash160) if verbyte in cls.P2SH_VERBYTES: return ScriptPubKey.P2SH_script(hash160) raise CoinError('invalid address: {}'.format(address)) @classmethod def privkey_WIF(cls, privkey_bytes, compressed): '''Return the private key encoded in Wallet Import Format.''' payload = bytearray(cls.WIF_BYTE) + privkey_bytes if compressed: payload.append(0x01) return cls.ENCODE_CHECK(payload) @classmethod def header_hash(cls, header): '''Given a header return hash''' return double_sha256(header) @classmethod def header_prevhash(cls, header): '''Given a header return previous hash''' return header[4:36] @classmethod def static_header_offset(cls, height): '''Given a header height return its offset in the headers file. If header sizes change at some point, this is the only code that needs updating.''' assert cls.STATIC_BLOCK_HEADERS return height * cls.BASIC_HEADER_SIZE @classmethod def static_header_len(cls, height): '''Given a header height return its length.''' return (cls.static_header_offset(height + 1) - cls.static_header_offset(height)) @classmethod def block_header(cls, block, height): '''Returns the block header given a block and its height.''' return block[:cls.static_header_len(height)] @classmethod def block(cls, raw_block, height): '''Return a Block namedtuple given a raw block and its height.''' header = cls.block_header(raw_block, height) txs = cls.DESERIALIZER(raw_block, start=len(header)).read_tx_block() return Block(raw_block, header, txs) @classmethod def decimal_value(cls, value): '''Return the number of standard coin units as a Decimal given a quantity of smallest units. For example 1 BTC is returned for 100 million satoshis. ''' return Decimal(value) / cls.VALUE_PER_COIN @classmethod def electrum_header(cls, header, height): h = dict(zip(cls.HEADER_VALUES, cls.HEADER_UNPACK(header))) # Add the height that is not present in the header itself h['block_height'] = height # Convert bytes to str h['prev_block_hash'] = hash_to_hex_str(h['prev_block_hash']) h['merkle_root'] = hash_to_hex_str(h['merkle_root']) return h class AuxPowMixin(object): STATIC_BLOCK_HEADERS = False DESERIALIZER = lib_tx.DeserializerAuxPow @classmethod def header_hash(cls, header): '''Given a header return hash''' return double_sha256(header[:cls.BASIC_HEADER_SIZE]) @classmethod def block_header(cls, block, height): '''Return the AuxPow block header bytes''' deserializer = cls.DESERIALIZER(block) return deserializer.read_header(height, cls.BASIC_HEADER_SIZE) class EquihashMixin(object): STATIC_BLOCK_HEADERS = False BASIC_HEADER_SIZE = 140 # Excluding Equihash solution DESERIALIZER = lib_tx.DeserializerEquihash HEADER_VALUES = ('version', 'prev_block_hash', 'merkle_root', 'reserved', 'timestamp', 'bits', 'nonce') HEADER_UNPACK = struct.Struct('< I 32s 32s 32s I I 32s').unpack_from @classmethod def electrum_header(cls, header, height): h = dict(zip(cls.HEADER_VALUES, cls.HEADER_UNPACK(header))) # Add the height that is not present in the header itself h['block_height'] = height # Convert bytes to str h['prev_block_hash'] = hash_to_hex_str(h['prev_block_hash']) h['merkle_root'] = hash_to_hex_str(h['merkle_root']) h['reserved'] = hash_to_hex_str(h['reserved']) h['nonce'] = hash_to_hex_str(h['nonce']) return h @classmethod def block_header(cls, block, height): '''Return the block header bytes''' deserializer = cls.DESERIALIZER(block) return deserializer.read_header(height, cls.BASIC_HEADER_SIZE) class ScryptMixin(object): DESERIALIZER = lib_tx.DeserializerTxTime HEADER_HASH = None @classmethod def header_hash(cls, header): '''Given a header return the hash.''' if cls.HEADER_HASH is None: import scrypt cls.HEADER_HASH = lambda x: scrypt.hash(x, x, 1024, 1, 1, 32) version, = util.unpack_le_uint32_from(header) if version > 6: return super().header_hash(header) else: return cls.HEADER_HASH(header) class KomodoMixin(object): P2PKH_VERBYTE = bytes.fromhex("3C") P2SH_VERBYTES = [bytes.fromhex("55")] WIF_BYTE = bytes.fromhex("BC") GENESIS_HASH = ('027e3758c3a65b12aa1046462b486d0a' '63bfa1beae327897f56c5cfb7daaae71') DESERIALIZER = lib_tx.DeserializerZcash class BitcoinMixin(object): SHORTNAME = "BTC" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("0488b21e") XPRV_VERBYTES = bytes.fromhex("0488ade4") P2PKH_VERBYTE = bytes.fromhex("00") P2SH_VERBYTES = [bytes.fromhex("05")] WIF_BYTE = bytes.fromhex("80") GENESIS_HASH = ('000000000019d6689c085ae165831e93' '4ff763ae46a2a6c172b3f1b60a8ce26f') RPC_PORT = 8332 class HOdlcoin(Coin): NAME = "HOdlcoin" SHORTNAME = "HODLC" NET = "mainnet" BASIC_HEADER_SIZE = 88 P2PKH_VERBYTE = bytes.fromhex("28") P2SH_VERBYTES = [bytes.fromhex("05")] WIF_BYTE = bytes.fromhex("a8") GENESIS_HASH = ('008872e5582924544e5c707ee4b839bb' '82c28a9e94e917c94b40538d5658c04b') DESERIALIZER = lib_tx.DeserializerSegWit TX_COUNT = 258858 TX_COUNT_HEIGHT = 382138 TX_PER_BLOCK = 5 class BitcoinCash(BitcoinMixin, Coin): NAME = "BitcoinCash" SHORTNAME = "BCH" TX_COUNT = 246362688 TX_COUNT_HEIGHT = 511484 TX_PER_BLOCK = 400 PEERS = [ 'electroncash.cascharia.com s50002', 'bch.electrumx.cash s t', 'bccarihace4jdcnt.onion t52001 s52002', 'abc1.hsmiths.com t60001 s60002', 'electroncash.checksum0.com s t', 'electrumx-cash.1209k.com s t', 'electrum.leblancnet.us t50011 s50012', 'electroncash.dk s t', 'electrum.imaginary.cash s t', ] class BitcoinSegwit(BitcoinMixin, Coin): NAME = "BitcoinSegwit" DESERIALIZER = lib_tx.DeserializerSegWit MEMPOOL_HISTOGRAM_REFRESH_SECS = 120 TX_COUNT = 318337769 TX_COUNT_HEIGHT = 524213 TX_PER_BLOCK = 1400 PEERS = [ 'btc.smsys.me s995', 'E-X.not.fyi s t', 'elec.luggs.co s443', 'electrum.vom-stausee.de s t', 'electrum3.hachre.de s t', 'electrum.hsmiths.com s t', 'helicarrier.bauerj.eu s t', 'hsmiths4fyqlw5xw.onion s t', 'luggscoqbymhvnkp.onion t80', 'ozahtqwp25chjdjd.onion s t', 'node.arihanc.com s t', 'arihancckjge66iv.onion s t', ] class BitcoinGold(EquihashMixin, BitcoinMixin, Coin): CHUNK_SIZE = 252 NAME = "BitcoinGold" SHORTNAME = "BTG" FORK_HEIGHT = 491407 P2PKH_VERBYTE = bytes.fromhex("26") P2SH_VERBYTES = [bytes.fromhex("17")] DESERIALIZER = lib_tx.DeserializerEquihashSegWit TX_COUNT = 265026255 TX_COUNT_HEIGHT = 499923 TX_PER_BLOCK = 50 REORG_LIMIT = 1000 RPC_PORT = 8338 PEERS = [ 'electrumx-eu.bitcoingold.org s50002 t50001', 'electrumx-us.bitcoingold.org s50002 t50001', 'electrumx-eu.btcgpu.org s50002 t50001', 'electrumx-us.btcgpu.org s50002 t50001' ] @classmethod def header_hash(cls, header): '''Given a header return hash''' height, = util.unpack_le_uint32_from(header, 68) if height >= cls.FORK_HEIGHT: return double_sha256(header) else: return double_sha256(header[:68] + header[100:112]) @classmethod def electrum_header(cls, header, height): h = super().electrum_header(header, height) h['reserved'] = hash_to_hex_str(header[72:100]) h['solution'] = hash_to_hex_str(header[140:]) return h class BitcoinGoldTestnet(BitcoinGold): FORK_HEIGHT = 1 SHORTNAME = "TBTG" XPUB_VERBYTES = bytes.fromhex("043587CF") XPRV_VERBYTES = bytes.fromhex("04358394") P2PKH_VERBYTE = bytes.fromhex("6F") P2SH_VERBYTES = [bytes.fromhex("C4")] WIF_BYTE = bytes.fromhex("EF") TX_COUNT = 0 TX_COUNT_HEIGHT = 1 NET = 'testnet' RPC_PORT = 18338 GENESIS_HASH = ('00000000e0781ebe24b91eedc293adfe' 'a2f557b53ec379e78959de3853e6f9f6') PEERS = [ 'test-node1.bitcoingold.org s50002', 'test-node2.bitcoingold.org s50002', 'test-node3.bitcoingold.org s50002', 'test-node1.btcgpu.org s50002', 'test-node2.btcgpu.org s50002', 'test-node3.btcgpu.org s50002' ] class BitcoinGoldRegtest(BitcoinGold): FORK_HEIGHT = 2000 SHORTNAME = "TBTG" XPUB_VERBYTES = bytes.fromhex("043587CF") XPRV_VERBYTES = bytes.fromhex("04358394") P2PKH_VERBYTE = bytes.fromhex("6F") P2SH_VERBYTES = [bytes.fromhex("C4")] WIF_BYTE = bytes.fromhex("EF") TX_COUNT = 0 TX_COUNT_HEIGHT = 1 NET = 'regtest' RPC_PORT = 18444 GENESIS_HASH = ('0f9188f13cb7b2c71f2a335e3a4fc328' 'bf5beb436012afca590b1a11466e2206') PEERS = [] class Emercoin(Coin): NAME = "Emercoin" SHORTNAME = "EMC" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("0488b21e") XPRV_VERBYTES = bytes.fromhex("0488ade4") P2PKH_VERBYTE = bytes.fromhex("21") P2SH_VERBYTES = [bytes.fromhex("5c")] WIF_BYTE = bytes.fromhex("80") GENESIS_HASH = ('00000000bcccd459d036a588d1008fce' '8da3754b205736f32ddfd35350e84c2d') TX_COUNT = 217380620 TX_COUNT_HEIGHT = 464000 TX_PER_BLOCK = 1700 VALUE_PER_COIN = 1000000 RPC_PORT = 6662 DESERIALIZER = lib_tx.DeserializerTxTimeAuxPow PEERS = [] @classmethod def block_header(cls, block, height): '''Returns the block header given a block and its height.''' deserializer = cls.DESERIALIZER(block) if deserializer.is_merged_block(): return deserializer.read_header(height, cls.BASIC_HEADER_SIZE) return block[:cls.static_header_len(height)] @classmethod def header_hash(cls, header): '''Given a header return hash''' return double_sha256(header[:cls.BASIC_HEADER_SIZE]) class BitcoinTestnetMixin(object): SHORTNAME = "XTN" NET = "testnet" XPUB_VERBYTES = bytes.fromhex("043587cf") XPRV_VERBYTES = bytes.fromhex("04358394") P2PKH_VERBYTE = bytes.fromhex("6f") P2SH_VERBYTES = [bytes.fromhex("c4")] WIF_BYTE = bytes.fromhex("ef") GENESIS_HASH = ('000000000933ea01ad0ee984209779ba' 'aec3ced90fa3f408719526f8d77f4943') REORG_LIMIT = 8000 TX_COUNT = 12242438 TX_COUNT_HEIGHT = 1035428 TX_PER_BLOCK = 21 RPC_PORT = 18332 PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'} class BitcoinCashTestnet(BitcoinTestnetMixin, Coin): '''Bitcoin Testnet for Bitcoin Cash daemons.''' NAME = "BitcoinCash" PEERS = [ 'electrum-testnet-abc.criptolayer.net s50112', 'bchtestnet.arihanc.com t53001 s53002', 'ciiattqkgzebpp6jofjbrkhvhwmgnsfoayljdcrve2p3qmkbv3duaoyd.onion ' 't53001 s53002', ] class BitcoinCashRegtest(BitcoinCashTestnet): NET = "regtest" GENESIS_HASH = ('0f9188f13cb7b2c71f2a335e3a4fc328' 'bf5beb436012afca590b1a11466e2206') PEERS = [] TX_COUNT = 1 TX_COUNT_HEIGHT = 1 class BitcoinSegwitTestnet(BitcoinTestnetMixin, Coin): '''Bitcoin Testnet for Core bitcoind >= 0.13.1.''' NAME = "BitcoinSegwit" DESERIALIZER = lib_tx.DeserializerSegWit PEERS = [ 'electrum.akinbo.org s t', 'he36kyperp3kbuxu.onion s t', 'testnet.hsmiths.com t53011 s53012', 'hsmithsxurybd7uh.onion t53011 s53012', 'testnetnode.arihanc.com s t', 'w3e2orjpiiv2qwem3dw66d7c4krink4nhttngkylglpqe5r22n6n5wid.onion s t', 'testnet.qtornado.com s t', ] class BitcoinSegwitRegtest(BitcoinSegwitTestnet): NAME = "BitcoinSegwit" NET = "regtest" GENESIS_HASH = ('0f9188f13cb7b2c71f2a335e3a4fc328' 'bf5beb436012afca590b1a11466e2206') PEERS = [] TX_COUNT = 1 TX_COUNT_HEIGHT = 1 class BitcoinNolnet(BitcoinCash): '''Bitcoin Unlimited nolimit testnet.''' NET = "nolnet" GENESIS_HASH = ('0000000057e31bd2066c939a63b7b862' '3bd0f10d8c001304bdfc1a7902ae6d35') PEERS = [] REORG_LIMIT = 8000 TX_COUNT = 583589 TX_COUNT_HEIGHT = 8617 TX_PER_BLOCK = 50 RPC_PORT = 28332 PEER_DEFAULT_PORTS = {'t': '52001', 's': '52002'} class Litecoin(Coin): NAME = "Litecoin" SHORTNAME = "LTC" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("0488b21e") XPRV_VERBYTES = bytes.fromhex("0488ade4") P2PKH_VERBYTE = bytes.fromhex("30") P2SH_VERBYTES = [bytes.fromhex("32"), bytes.fromhex("05")] WIF_BYTE = bytes.fromhex("b0") GENESIS_HASH = ('12a765e31ffd4059bada1e25190f6e98' 'c99d9714d334efa41a195a7e7e04bfe2') DESERIALIZER = lib_tx.DeserializerSegWit TX_COUNT = 8908766 TX_COUNT_HEIGHT = 1105256 TX_PER_BLOCK = 10 RPC_PORT = 9332 REORG_LIMIT = 800 PEERS = [ 'elec.luggs.co s444', 'electrum-ltc.bysh.me s t', 'electrum-ltc.ddns.net s t', 'electrum-ltc.wilv.in s t', 'electrum.cryptomachine.com p1000 s t', 'electrum.ltc.xurious.com s t', 'eywr5eubdbbe2laq.onion s50008 t50007', ] class LitecoinTestnet(Litecoin): SHORTNAME = "XLT" NET = "testnet" XPUB_VERBYTES = bytes.fromhex("043587cf") XPRV_VERBYTES = bytes.fromhex("04358394") P2PKH_VERBYTE = bytes.fromhex("6f") P2SH_VERBYTES = [bytes.fromhex("3a"), bytes.fromhex("c4")] WIF_BYTE = bytes.fromhex("ef") GENESIS_HASH = ('4966625a4b2851d9fdee139e56211a0d' '88575f59ed816ff5e6a63deb4e3e29a0') TX_COUNT = 21772 TX_COUNT_HEIGHT = 20800 TX_PER_BLOCK = 2 RPC_PORT = 19332 REORG_LIMIT = 4000 PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'} PEERS = [ 'electrum-ltc.bysh.me s t', 'electrum.ltc.xurious.com s t', ] class Viacoin(AuxPowMixin, Coin): NAME = "Viacoin" SHORTNAME = "VIA" NET = "mainnet" P2PKH_VERBYTE = bytes.fromhex("47") P2SH_VERBYTES = [bytes.fromhex("21")] WIF_BYTE = bytes.fromhex("c7") GENESIS_HASH = ('4e9b54001f9976049830128ec0331515' 'eaabe35a70970d79971da1539a400ba1') TX_COUNT = 113638 TX_COUNT_HEIGHT = 3473674 TX_PER_BLOCK = 30 RPC_PORT = 5222 REORG_LIMIT = 5000 DESERIALIZER = lib_tx.DeserializerAuxPowSegWit PEERS = [ 'vialectrum.bitops.me s t', 'server.vialectrum.org s t', 'vialectrum.viacoin.net s t', 'viax1.bitops.me s t', ] class ViacoinTestnet(Viacoin): SHORTNAME = "TVI" NET = "testnet" P2PKH_VERBYTE = bytes.fromhex("7f") P2SH_VERBYTES = [bytes.fromhex("c4")] WIF_BYTE = bytes.fromhex("ff") GENESIS_HASH = ('00000007199508e34a9ff81e6ec0c477' 'a4cccff2a4767a8eee39c11db367b008') RPC_PORT = 25222 REORG_LIMIT = 2500 PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'} PEERS = [ 'vialectrum.bysh.me s t', ] class ViacoinTestnetSegWit(ViacoinTestnet): NET = "testnet-segwit" DESERIALIZER = lib_tx.DeserializerSegWit # Source: namecoin.org class Namecoin(AuxPowMixin, Coin): NAME = "Namecoin" SHORTNAME = "NMC" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("d7dd6370") XPRV_VERBYTES = bytes.fromhex("d7dc6e31") P2PKH_VERBYTE = bytes.fromhex("34") P2SH_VERBYTES = [bytes.fromhex("0d")] WIF_BYTE = bytes.fromhex("e4") GENESIS_HASH = ('000000000062b72c5e2ceb45fbc8587e' '807c155b0da735e6483dfba2f0a9c770') TX_COUNT = 4415768 TX_COUNT_HEIGHT = 329065 TX_PER_BLOCK = 10 PEERS = [ 'elec.luggs.co s446', ] class NamecoinTestnet(Namecoin): NAME = "Namecoin" SHORTNAME = "XNM" NET = "testnet" P2PKH_VERBYTE = bytes.fromhex("6f") P2SH_VERBYTES = [bytes.fromhex("c4")] WIF_BYTE = bytes.fromhex("ef") GENESIS_HASH = ('00000007199508e34a9ff81e6ec0c477' 'a4cccff2a4767a8eee39c11db367b008') class Dogecoin(AuxPowMixin, Coin): NAME = "Dogecoin" SHORTNAME = "DOGE" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("02facafd") XPRV_VERBYTES = bytes.fromhex("02fac398") P2PKH_VERBYTE = bytes.fromhex("1e") P2SH_VERBYTES = [bytes.fromhex("16")] WIF_BYTE = bytes.fromhex("9e") GENESIS_HASH = ('1a91e3dace36e2be3bf030a65679fe82' '1aa1d6ef92e7c9902eb318182c355691') TX_COUNT = 27583427 TX_COUNT_HEIGHT = 1604979 TX_PER_BLOCK = 20 REORG_LIMIT = 2000 class DogecoinTestnet(Dogecoin): NAME = "Dogecoin" SHORTNAME = "XDT" NET = "testnet" P2PKH_VERBYTE = bytes.fromhex("71") P2SH_VERBYTES = [bytes.fromhex("c4")] WIF_BYTE = bytes.fromhex("f1") GENESIS_HASH = ('bb0a78264637406b6360aad926284d54' '4d7049f45189db5664f3c4d07350559e') # Source: https://github.com/motioncrypto/motion class Motion(Coin): NAME = "Motion" SHORTNAME = "XMN" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("0488B21E") XPRV_VERBYTES = bytes.fromhex("0488ADE4") GENESIS_HASH = ('000001e9dc60dd2618e91f7b90141349' '22c374496b61c1a272519b1c39979d78') P2PKH_VERBYTE = bytes.fromhex("32") P2SH_VERBYTES = [bytes.fromhex("12")] WIF_BYTE = bytes.fromhex("80") TX_COUNT_HEIGHT = 54353 TX_COUNT = 92701 TX_PER_BLOCK = 4 RPC_PORT = 3385 SESSIONCLS = DashElectrumX DAEMON = daemon.DashDaemon @classmethod def header_hash(cls, header): '''Given a header return the hash.''' import x16r_hash return x16r_hash.getPoWHash(header) # Source: https://github.com/dashpay/dash class Dash(Coin): NAME = "Dash" SHORTNAME = "DASH" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("02fe52cc") XPRV_VERBYTES = bytes.fromhex("02fe52f8") GENESIS_HASH = ('00000ffd590b1485b3caadc19b22e637' '9c733355108f107a430458cdf3407ab6') P2PKH_VERBYTE = bytes.fromhex("4c") P2SH_VERBYTES = [bytes.fromhex("10")] WIF_BYTE = bytes.fromhex("cc") TX_COUNT_HEIGHT = 569399 TX_COUNT = 2157510 TX_PER_BLOCK = 4 RPC_PORT = 9998 PEERS = [ 'electrum.dash.org s t', 'electrum.masternode.io s t', 'electrum-drk.club s t', 'dashcrypto.space s t', 'electrum.dash.siampm.com s t', 'wl4sfwq2hwxnodof.onion s t', ] SESSIONCLS = DashElectrumX DAEMON = daemon.DashDaemon @classmethod def header_hash(cls, header): '''Given a header return the hash.''' import x11_hash return x11_hash.getPoWHash(header) class DashTestnet(Dash): SHORTNAME = "tDASH" NET = "testnet" XPUB_VERBYTES = bytes.fromhex("3a805837") XPRV_VERBYTES = bytes.fromhex("3a8061a0") GENESIS_HASH = ('00000bafbc94add76cb75e2ec9289483' '7288a481e5c005f6563d91623bf8bc2c') P2PKH_VERBYTE = bytes.fromhex("8c") P2SH_VERBYTES = [bytes.fromhex("13")] WIF_BYTE = bytes.fromhex("ef") TX_COUNT_HEIGHT = 101619 TX_COUNT = 132681 TX_PER_BLOCK = 1 RPC_PORT = 19998 PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'} PEERS = [ 'electrum.dash.siampm.com s t', 'dasht.random.re s54002 t54001', ] class Argentum(AuxPowMixin, Coin): NAME = "Argentum" SHORTNAME = "ARG" NET = "mainnet" P2PKH_VERBYTE = bytes.fromhex("17") P2SH_VERBYTES = [bytes.fromhex("05")] WIF_BYTE = bytes.fromhex("97") GENESIS_HASH = ('88c667bc63167685e4e4da058fffdfe8' 'e007e5abffd6855de52ad59df7bb0bb2') TX_COUNT = 2263089 TX_COUNT_HEIGHT = 2050260 TX_PER_BLOCK = 2000 RPC_PORT = 13581 class ArgentumTestnet(Argentum): SHORTNAME = "XRG" NET = "testnet" P2PKH_VERBYTE = bytes.fromhex("6f") P2SH_VERBYTES = [bytes.fromhex("c4")] WIF_BYTE = bytes.fromhex("ef") REORG_LIMIT = 2000 class DigiByte(Coin): NAME = "DigiByte" SHORTNAME = "DGB" NET = "mainnet" P2PKH_VERBYTE = bytes.fromhex("1E") P2SH_VERBYTES = [bytes.fromhex("05")] WIF_BYTE = bytes.fromhex("80") GENESIS_HASH = ('7497ea1b465eb39f1c8f507bc877078f' 'e016d6fcb6dfad3a64c98dcc6e1e8496') DESERIALIZER = lib_tx.DeserializerSegWit TX_COUNT = 1046018 TX_COUNT_HEIGHT = 1435000 TX_PER_BLOCK = 1000 RPC_PORT = 12022 class DigiByteTestnet(DigiByte): NET = "testnet" P2PKH_VERBYTE = bytes.fromhex("6f") P2SH_VERBYTES = [bytes.fromhex("c4")] WIF_BYTE = bytes.fromhex("ef") GENESIS_HASH = ('b5dca8039e300198e5fe7cd23bdd1728' 'e2a444af34c447dbd0916fa3430a68c2') RPC_PORT = 15022 REORG_LIMIT = 2000 class FairCoin(Coin): NAME = "FairCoin" SHORTNAME = "FAIR" NET = "mainnet" P2PKH_VERBYTE = bytes.fromhex("5f") P2SH_VERBYTES = [bytes.fromhex("24")] WIF_BYTE = bytes.fromhex("df") GENESIS_HASH = ('beed44fa5e96150d95d56ebd5d262578' '1825a9407a5215dd7eda723373a0a1d7') BASIC_HEADER_SIZE = 108 HEADER_VALUES = ('version', 'prev_block_hash', 'merkle_root', 'payload_hash', 'timestamp', 'creatorId') HEADER_UNPACK = struct.Struct('< I 32s 32s 32s I I').unpack_from TX_COUNT = 505 TX_COUNT_HEIGHT = 470 TX_PER_BLOCK = 1 RPC_PORT = 40405 PEER_DEFAULT_PORTS = {'t': '51811', 's': '51812'} PEERS = [ 'electrum.faircoin.world s', 'electrumfair.punto0.org s', ] @classmethod def block(cls, raw_block, height): '''Return a Block namedtuple given a raw block and its height.''' if height > 0: return super().block(raw_block, height) else: return Block(raw_block, cls.block_header(raw_block, height), []) @classmethod def electrum_header(cls, header, height): h = super().electrum_header(header, height) h['payload_hash'] = hash_to_hex_str(h['payload_hash']) return h class Zcash(EquihashMixin, Coin): NAME = "Zcash" SHORTNAME = "ZEC" NET = "mainnet" P2PKH_VERBYTE = bytes.fromhex("1CB8") P2SH_VERBYTES = [bytes.fromhex("1CBD")] WIF_BYTE = bytes.fromhex("80") GENESIS_HASH = ('00040fe8ec8471911baa1db1266ea15d' 'd06b4a8a5c453883c000b031973dce08') DESERIALIZER = lib_tx.DeserializerZcash TX_COUNT = 329196 TX_COUNT_HEIGHT = 68379 TX_PER_BLOCK = 5 RPC_PORT = 8232 REORG_LIMIT = 800 class ZcashTestnet(Zcash): SHORTNAME = "TAZ" NET = "testnet" P2PKH_VERBYTE = bytes.fromhex("1D25") P2SH_VERBYTES = [bytes.fromhex("1CBA")] WIF_BYTE = bytes.fromhex("EF") GENESIS_HASH = ('05a60a92d99d85997cce3b87616c089f' '6124d7342af37106edc76126334a2c38') TX_COUNT = 242312 TX_COUNT_HEIGHT = 321685 TX_PER_BLOCK = 2 RPC_PORT = 18232 class SnowGem(EquihashMixin, Coin): NAME = "SnowGem" SHORTNAME = "SNG" NET = "mainnet" P2PKH_VERBYTE = bytes.fromhex("1C28") P2SH_VERBYTES = [bytes.fromhex("1C2D")] WIF_BYTE = bytes.fromhex("80") GENESIS_HASH = ('00068b35729d9d2b0c294ff1fe9af009' '4740524311a131de40e7f705e4c29a5b') DESERIALIZER = lib_tx.DeserializerZcash TX_COUNT = 140698 TX_COUNT_HEIGHT = 102802 TX_PER_BLOCK = 2 RPC_PORT = 16112 REORG_LIMIT = 800 CHUNK_SIZE = 200 @classmethod def electrum_header(cls, header, height): h = super().electrum_header(header, height) h['n_solution'] = base64.b64encode(lib_tx.Deserializer( header, start=140)._read_varbytes()).decode('utf8') return h class BitcoinZ(EquihashMixin, Coin): NAME = "BitcoinZ" SHORTNAME = "BTCZ" NET = "mainnet" P2PKH_VERBYTE = bytes.fromhex("1CB8") P2SH_VERBYTES = [bytes.fromhex("1CBD")] WIF_BYTE = bytes.fromhex("80") GENESIS_HASH = ('f499ee3d498b4298ac6a64205b8addb7' 'c43197e2a660229be65db8a4534d75c1') DESERIALIZER = lib_tx.DeserializerZcash TX_COUNT = 171976 TX_COUNT_HEIGHT = 81323 TX_PER_BLOCK = 3 RPC_PORT = 1979 REORG_LIMIT = 800 class Hush(EquihashMixin, Coin): NAME = "Hush" SHORTNAME = "HUSH" NET = "mainnet" P2PKH_VERBYTE = bytes.fromhex("1CB8") P2SH_VERBYTES = [bytes.fromhex("1CBD")] WIF_BYTE = bytes.fromhex("80") GENESIS_HASH = ('0003a67bc26fe564b75daf11186d3606' '52eb435a35ba3d9d3e7e5d5f8e62dc17') DESERIALIZER = lib_tx.DeserializerZcash TX_COUNT = 329196 TX_COUNT_HEIGHT = 68379 TX_PER_BLOCK = 5 RPC_PORT = 8822 REORG_LIMIT = 800 class Zclassic(EquihashMixin, Coin): NAME = "Zclassic" SHORTNAME = "ZCL" NET = "mainnet" P2PKH_VERBYTE = bytes.fromhex("1CB8") P2SH_VERBYTES = [bytes.fromhex("1CBD")] WIF_BYTE = bytes.fromhex("80") GENESIS_HASH = ('0007104ccda289427919efc39dc9e4d4' '99804b7bebc22df55f8b834301260602') DESERIALIZER = lib_tx.DeserializerZcash TX_COUNT = 329196 TX_COUNT_HEIGHT = 68379 TX_PER_BLOCK = 5 RPC_PORT = 8023 REORG_LIMIT = 800 class Koto(Coin): NAME = "Koto" SHORTNAME = "KOTO" NET = "mainnet" P2PKH_VERBYTE = bytes.fromhex("1836") P2SH_VERBYTES = [bytes.fromhex("183B")] WIF_BYTE = bytes.fromhex("80") GENESIS_HASH = ('6d424c350729ae633275d51dc3496e16' 'cd1b1d195c164da00f39c499a2e9959e') DESERIALIZER = lib_tx.DeserializerZcash TX_COUNT = 158914 TX_COUNT_HEIGHT = 67574 TX_PER_BLOCK = 3 RPC_PORT = 8432 REORG_LIMIT = 800 PEERS = [ 'fr.kotocoin.info s t', 'electrum.kotocoin.info s t', ] class KotoTestnet(Koto): SHORTNAME = "TOKO" NET = "testnet" P2PKH_VERBYTE = bytes.fromhex("18A4") P2SH_VERBYTES = [bytes.fromhex("1839")] WIF_BYTE = bytes.fromhex("EF") GENESIS_HASH = ('bf84afbde20c2d213b68b231ddb585ab' '616ef7567226820f00d9b397d774d2f0') TX_COUNT = 91144 TX_COUNT_HEIGHT = 89662 TX_PER_BLOCK = 1 RPC_PORT = 18432 PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'} PEERS = [ 'testnet.kotocoin.info s t', ] class Komodo(KomodoMixin, EquihashMixin, Coin): NAME = "Komodo" SHORTNAME = "KMD" NET = "mainnet" TX_COUNT = 693629 TX_COUNT_HEIGHT = 491777 TX_PER_BLOCK = 2 RPC_PORT = 7771 REORG_LIMIT = 800 PEERS = [] class Monaize(KomodoMixin, EquihashMixin, Coin): NAME = "Monaize" SHORTNAME = "MNZ" NET = "mainnet" TX_COUNT = 256 TX_COUNT_HEIGHT = 128 TX_PER_BLOCK = 2 RPC_PORT = 14337 REORG_LIMIT = 800 PEERS = [] class Einsteinium(Coin): NAME = "Einsteinium" SHORTNAME = "EMC2" NET = "mainnet" P2PKH_VERBYTE = bytes.fromhex("21") P2SH_VERBYTES = [bytes.fromhex("05")] WIF_BYTE = bytes.fromhex("b0") GENESIS_HASH = ('4e56204bb7b8ac06f860ff1c845f03f9' '84303b5b97eb7b42868f714611aed94b') DESERIALIZER = lib_tx.DeserializerSegWit TX_COUNT = 2087559 TX_COUNT_HEIGHT = 1358517 TX_PER_BLOCK = 2 RPC_PORT = 41879 REORG_LIMIT = 2000 class Blackcoin(ScryptMixin, Coin): NAME = "Blackcoin" SHORTNAME = "BLK" NET = "mainnet" P2PKH_VERBYTE = bytes.fromhex("19") P2SH_VERBYTES = [bytes.fromhex("55")] WIF_BYTE = bytes.fromhex("99") GENESIS_HASH = ('000001faef25dec4fbcf906e6242621d' 'f2c183bf232f263d0ba5b101911e4563') DAEMON = daemon.LegacyRPCDaemon TX_COUNT = 4594999 TX_COUNT_HEIGHT = 1667070 TX_PER_BLOCK = 3 RPC_PORT = 15715 REORG_LIMIT = 5000 class Bitbay(ScryptMixin, Coin): NAME = "Bitbay" SHORTNAME = "BAY" NET = "mainnet" P2PKH_VERBYTE = bytes.fromhex("19") P2SH_VERBYTES = [bytes.fromhex("55")] WIF_BYTE = bytes.fromhex("99") GENESIS_HASH = ('0000075685d3be1f253ce777174b1594' '354e79954d2a32a6f77fe9cba00e6467') TX_COUNT = 4594999 TX_COUNT_HEIGHT = 1667070 TX_PER_BLOCK = 3 RPC_PORT = 19914 REORG_LIMIT = 5000 class Peercoin(Coin): NAME = "Peercoin" SHORTNAME = "PPC" NET = "mainnet" P2PKH_VERBYTE = bytes.fromhex("37") P2SH_VERBYTES = [bytes.fromhex("75")] WIF_BYTE = bytes.fromhex("b7") GENESIS_HASH = ('0000000032fe677166d54963b62a4677' 'd8957e87c508eaa4fd7eb1c880cd27e3') DESERIALIZER = lib_tx.DeserializerTxTime DAEMON = daemon.LegacyRPCDaemon TX_COUNT = 1207356 TX_COUNT_HEIGHT = 306425 TX_PER_BLOCK = 4 RPC_PORT = 9902 REORG_LIMIT = 5000 class Reddcoin(Coin): NAME = "Reddcoin" SHORTNAME = "RDD" NET = "mainnet" P2PKH_VERBYTE = bytes.fromhex("3d") P2SH_VERBYTES = [bytes.fromhex("05")] WIF_BYTE = bytes.fromhex("bd") GENESIS_HASH = ('b868e0d95a3c3c0e0dadc67ee587aaf9' 'dc8acbf99e3b4b3110fad4eb74c1decc') DESERIALIZER = lib_tx.DeserializerReddcoin TX_COUNT = 5413508 TX_COUNT_HEIGHT = 1717382 TX_PER_BLOCK = 3 RPC_PORT = 45443 class TokenPay(ScryptMixin, Coin): NAME = "TokenPay" SHORTNAME = "TPAY" NET = "mainnet" P2PKH_VERBYTE = bytes.fromhex("41") P2SH_VERBYTES = [bytes.fromhex("7e")] WIF_BYTE = bytes.fromhex("b3") GENESIS_HASH = ('000008b71ab32e585a23f0de642dc113' '740144e94c0ece047751e9781f953ae9') DESERIALIZER = lib_tx.DeserializerTokenPay DAEMON = daemon.LegacyRPCDaemon TX_COUNT = 147934 TX_COUNT_HEIGHT = 73967 TX_PER_BLOCK = 100 RPC_PORT = 8800 REORG_LIMIT = 500 XPUB_VERBYTES = bytes.fromhex("0488B21E") XPRV_VERBYTES = bytes.fromhex("0488ADE4") PEERS = [ "electrum-us.tpay.ai s", "electrum-eu.tpay.ai s", ] class Vertcoin(Coin): NAME = "Vertcoin" SHORTNAME = "VTC" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("0488B21E") XPRV_VERBYTES = bytes.fromhex("0488ADE4") P2PKH_VERBYTE = bytes.fromhex("47") P2SH_VERBYTES = [bytes.fromhex("05")] WIF_BYTE = bytes.fromhex("80") GENESIS_HASH = ('4d96a915f49d40b1e5c2844d1ee2dccb' '90013a990ccea12c492d22110489f0c4') DESERIALIZER = lib_tx.DeserializerSegWit TX_COUNT = 2383423 TX_COUNT_HEIGHT = 759076 TX_PER_BLOCK = 3 RPC_PORT = 5888 REORG_LIMIT = 1000 class Monacoin(Coin): NAME = "Monacoin" SHORTNAME = "MONA" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("0488B21E") XPRV_VERBYTES = bytes.fromhex("0488ADE4") P2PKH_VERBYTE = bytes.fromhex("32") P2SH_VERBYTES = [bytes.fromhex("37"), bytes.fromhex("05")] WIF_BYTE = bytes.fromhex("B0") GENESIS_HASH = ('ff9f1c0116d19de7c9963845e129f9ed' '1bfc0b376eb54fd7afa42e0d418c8bb6') DESERIALIZER = lib_tx.DeserializerSegWit TX_COUNT = 2568580 TX_COUNT_HEIGHT = 1029766 TX_PER_BLOCK = 2 RPC_PORT = 9402 REORG_LIMIT = 1000 PEERS = [ 'electrumx.tamami-foundation.org s t', 'electrumx2.tamami-foundation.org s t', 'electrumx3.tamami-foundation.org s t', 'electrumx1.monacoin.nl s t', 'electrumx2.monacoin.nl s t', 'electrumx1.monacoin.ninja s t', 'electrumx2.monacoin.ninja s t', 'electrumx2.movsign.info s t', 'electrum-mona.bitbank.cc s t', 'ri7rzlmdaf4eqbza.onion s t', ] class MonacoinTestnet(Monacoin): SHORTNAME = "XMN" NET = "testnet" XPUB_VERBYTES = bytes.fromhex("043587CF") XPRV_VERBYTES = bytes.fromhex("04358394") P2PKH_VERBYTE = bytes.fromhex("6F") P2SH_VERBYTES = [bytes.fromhex("75"), bytes.fromhex("C4")] WIF_BYTE = bytes.fromhex("EF") GENESIS_HASH = ('a2b106ceba3be0c6d097b2a6a6aacf9d' '638ba8258ae478158f449c321061e0b2') TX_COUNT = 83602 TX_COUNT_HEIGHT = 83252 TX_PER_BLOCK = 1 RPC_PORT = 19402 REORG_LIMIT = 1000 PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'} PEERS = [ 'electrumx1.testnet.monacoin.ninja s t', 'electrumx1.testnet.monacoin.nl s t', ] class Crown(AuxPowMixin, Coin): NAME = "Crown" SHORTNAME = "CRW" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("0488b21e") XPRV_VERBYTES = bytes.fromhex("0488ade4") P2PKH_VERBYTE = bytes.fromhex("00") P2SH_VERBYTES = [bytes.fromhex("1c")] WIF_BYTE = bytes.fromhex("80") GENESIS_HASH = ('0000000085370d5e122f64f4ab19c686' '14ff3df78c8d13cb814fd7e69a1dc6da') TX_COUNT = 13336629 TX_COUNT_HEIGHT = 1268206 TX_PER_BLOCK = 10 RPC_PORT = 9341 REORG_LIMIT = 1000 PEERS = [ 'sgp-crwseed.crowndns.info s t', 'blr-crwseed.crowndns.info s t', 'sfo-crwseed.crowndns.info s t', 'nyc-crwseed.crowndns.info s t', 'ams-crwseed.crowndns.info s t', 'tor-crwseed.crowndns.info s t', 'lon-crwseed.crowndns.info s t', 'fra-crwseed.crowndns.info s t', ] class Fujicoin(Coin): NAME = "Fujicoin" SHORTNAME = "FJC" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("0488b21e") XPRV_VERBYTES = bytes.fromhex("0488ade4") P2PKH_VERBYTE = bytes.fromhex("24") P2SH_VERBYTES = [bytes.fromhex("10")] WIF_BYTE = bytes.fromhex("a4") GENESIS_HASH = ('adb6d9cfd74075e7f91608add4bd2a2e' 'a636f70856183086842667a1597714a0') DESERIALIZER = lib_tx.DeserializerSegWit TX_COUNT = 170478 TX_COUNT_HEIGHT = 1521676 TX_PER_BLOCK = 1 RPC_PORT = 3776 REORG_LIMIT = 1000 class Neblio(ScryptMixin, Coin): NAME = "Neblio" SHORTNAME = "NEBL" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("0488b21e") XPRV_VERBYTES = bytes.fromhex("0488ade4") P2PKH_VERBYTE = bytes.fromhex("35") P2SH_VERBYTES = [bytes.fromhex("70")] WIF_BYTE = bytes.fromhex("80") GENESIS_HASH = ('7286972be4dbc1463d256049b7471c25' '2e6557e222cab9be73181d359cd28bcc') TX_COUNT = 23675 TX_COUNT_HEIGHT = 22785 TX_PER_BLOCK = 1 RPC_PORT = 6326 REORG_LIMIT = 1000 class Bitzeny(Coin): NAME = "Bitzeny" SHORTNAME = "ZNY" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("0488b21e") XPRV_VERBYTES = bytes.fromhex("0488ade4") P2PKH_VERBYTE = bytes.fromhex("51") P2SH_VERBYTES = [bytes.fromhex("05")] WIF_BYTE = bytes.fromhex("80") GENESIS_HASH = ('000009f7e55e9e3b4781e22bd87a7cfa' '4acada9e4340d43ca738bf4e9fb8f5ce') ESTIMATE_FEE = 0.001 RELAY_FEE = 0.001 DAEMON = daemon.FakeEstimateFeeDaemon TX_COUNT = 1408733 TX_COUNT_HEIGHT = 1015115 TX_PER_BLOCK = 1 RPC_PORT = 9252 REORG_LIMIT = 1000 @classmethod def header_hash(cls, header): '''Given a header return the hash.''' import zny_yescrypt return zny_yescrypt.getPoWHash(header) class CanadaeCoin(AuxPowMixin, Coin): NAME = "CanadaeCoin" SHORTNAME = "CDN" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("0488b21e") XPRV_VERBYTES = bytes.fromhex("0488ade4") P2PKH_VERBYTE = bytes.fromhex("1C") P2SH_VERBYTES = [bytes.fromhex("05")] WIF_BYTE = bytes.fromhex("9c") GENESIS_HASH = ('863626dadaef221e2e2f30ff3dacae44' 'cabdae9e0028058072181b3fb675d94a') ESTIMATE_FEE = 0.0001 RELAY_FEE = 0.0001 DAEMON = daemon.FakeEstimateFeeDaemon TX_COUNT = 3455905 TX_COUNT_HEIGHT = 3645419 TX_PER_BLOCK = 1 RPC_PORT = 34330 REORG_LIMIT = 1000 class Denarius(Coin): NAME = "Denarius" SHORTNAME = "DNR" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("0488b21e") XPRV_VERBYTES = bytes.fromhex("0488ade4") P2PKH_VERBYTE = bytes.fromhex("1E") # Address starts with a D P2SH_VERBYTES = [bytes.fromhex("5A")] WIF_BYTE = bytes.fromhex("9E") # WIF starts with a 6 GENESIS_HASH = ('00000d5dbbda01621cfc16bbc1f9bf32' '64d641a5dbf0de89fd0182c2c4828fcd') DESERIALIZER = lib_tx.DeserializerTxTime TX_COUNT = 4230 RPC_PORT = 32339 ESTIMATE_FEE = 0.00001 RELAY_FEE = 0.00001 DAEMON = daemon.FakeEstimateFeeDaemon TX_COUNT_HEIGHT = 306187 TX_PER_BLOCK = 4000 @classmethod def header_hash(cls, header): '''Given a header return the hash.''' import tribus_hash return tribus_hash.getPoWHash(header) class DenariusTestnet(Denarius): NET = "testnet" XPUB_VERBYTES = bytes.fromhex("043587cf") XPRV_VERBYTES = bytes.fromhex("04358394") P2PKH_VERBYTE = bytes.fromhex("12") P2SH_VERBYTES = [bytes.fromhex("74")] WIF_BYTE = bytes.fromhex("ef") GENESIS_HASH = ('000086bfe8264d241f7f8e5393f74778' '4b8ca2aa98bdd066278d590462a4fdb4') RPC_PORT = 32338 REORG_LIMIT = 2000 class Sibcoin(Dash): NAME = "Sibcoin" SHORTNAME = "SIB" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("0488b21e") XPRV_VERBYTES = bytes.fromhex("0488ade4") P2PKH_VERBYTE = bytes.fromhex("3F") P2SH_VERBYTES = [bytes.fromhex("28")] WIF_BYTE = bytes.fromhex("80") GENESIS_HASH = ('00000c492bf73490420868bc577680bf' 'c4c60116e7e85343bc624787c21efa4c') DAEMON = daemon.DashDaemon TX_COUNT = 1000 TX_COUNT_HEIGHT = 10000 TX_PER_BLOCK = 1 RPC_PORT = 1944 REORG_LIMIT = 1000 PEERS = [] @classmethod def header_hash(cls, header): ''' Given a header return the hash for sibcoin. Need to download `x11_gost_hash` module Source code: https://github.com/ivansib/x11_gost_hash ''' import x11_gost_hash return x11_gost_hash.getPoWHash(header) class Chips(Coin): NAME = "Chips" SHORTNAME = "CHIPS" NET = "mainnet" P2PKH_VERBYTE = bytes.fromhex("3c") P2SH_VERBYTES = [bytes.fromhex("55")] WIF_BYTE = bytes.fromhex("bc") GENESIS_HASH = ('0000006e75f6aa0efdbf7db03132aa4e' '4d0c84951537a6f5a7c39a0a9d30e1e7') DESERIALIZER = lib_tx.DeserializerSegWit TX_COUNT = 145290 TX_COUNT_HEIGHT = 318637 TX_PER_BLOCK = 2 RPC_PORT = 57776 REORG_LIMIT = 800 class Feathercoin(Coin): NAME = "Feathercoin" SHORTNAME = "FTC" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("0488BC26") XPRV_VERBYTES = bytes.fromhex("0488DAEE") P2PKH_VERBYTE = bytes.fromhex("0E") P2SH_VERBYTES = [bytes.fromhex("05")] WIF_BYTE = bytes.fromhex("8E") GENESIS_HASH = ('12a765e31ffd4059bada1e25190f6e98' 'c99d9714d334efa41a195a7e7e04bfe2') DESERIALIZER = lib_tx.DeserializerSegWit TX_COUNT = 3170843 TX_COUNT_HEIGHT = 1981777 TX_PER_BLOCK = 2 RPC_PORT = 9337 REORG_LIMIT = 2000 PEERS = [ 'electrumx-ch-1.feathercoin.ch s t', ] class UFO(Coin): NAME = "UniformFiscalObject" SHORTNAME = "UFO" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("0488B21E") XPRV_VERBYTES = bytes.fromhex("0488ADE4") P2PKH_VERBYTE = bytes.fromhex("1B") P2SH_VERBYTES = [bytes.fromhex("44")] WIF_BYTE = bytes.fromhex("9B") GENESIS_HASH = ('ba1d39b4928ab03d813d952daf65fb77' '97fcf538a9c1b8274f4edc8557722d13') DESERIALIZER = lib_tx.DeserializerSegWit TX_COUNT = 1608926 TX_COUNT_HEIGHT = 1300154 TX_PER_BLOCK = 2 RPC_PORT = 9888 REORG_LIMIT = 2000 PEERS = [ 'electrumx1.ufobject.com s t', ] class Newyorkcoin(AuxPowMixin, Coin): NAME = "Newyorkcoin" SHORTNAME = "NYC" NET = "mainnet" P2PKH_VERBYTE = bytes.fromhex("3c") P2SH_VERBYTES = [bytes.fromhex("16")] WIF_BYTE = bytes.fromhex("bc") GENESIS_HASH = ('5597f25c062a3038c7fd815fe46c67de' 'dfcb3c839fbc8e01ed4044540d08fe48') DAEMON = daemon.LegacyRPCDaemon TX_COUNT = 5161944 TX_COUNT_HEIGHT = 3948743 TX_PER_BLOCK = 2 REORG_LIMIT = 2000 class NewyorkcoinTestnet(Newyorkcoin): SHORTNAME = "tNYC" NET = "testnet" P2PKH_VERBYTE = bytes.fromhex("71") P2SH_VERBYTES = [bytes.fromhex("c4")] WIF_BYTE = bytes.fromhex("f1") GENESIS_HASH = ('24463e4d3c625b0a9059f309044c2cf0' 'd7e196cf2a6ecce901f24f681be33c8f') DAEMON = daemon.LegacyRPCDaemon TX_COUNT = 5161944 TX_COUNT_HEIGHT = 3948743 TX_PER_BLOCK = 2 REORG_LIMIT = 2000 class Bitcore(BitcoinMixin, Coin): NAME = "Bitcore" SHORTNAME = "BTX" P2PKH_VERBYTE = bytes.fromhex("03") P2SH_VERBYTES = [bytes.fromhex("7D")] WIF_BYTE = bytes.fromhex("80") DESERIALIZER = lib_tx.DeserializerSegWit GENESIS_HASH = ('604148281e5c4b7f2487e5d03cd60d8e' '6f69411d613f6448034508cea52e9574') TX_COUNT = 126979 TX_COUNT_HEIGHT = 126946 TX_PER_BLOCK = 2 RPC_PORT = 8556 class GameCredits(Coin): NAME = "GameCredits" SHORTNAME = "GAME" NET = "mainnet" P2PKH_VERBYTE = bytes.fromhex("26") P2SH_VERBYTES = [bytes.fromhex("05")] WIF_BYTE = bytes.fromhex("a6") GENESIS_HASH = ('91ec5f25ee9a0ffa1af7d4da4db9a552' '228dd2dc77cdb15b738be4e1f55f30ee') DESERIALIZER = lib_tx.DeserializerSegWit TX_COUNT = 316796 TX_COUNT_HEIGHT = 2040250 TX_PER_BLOCK = 2 RPC_PORT = 40001 REORG_LIMIT = 1000 class Machinecoin(Coin): NAME = "Machinecoin" SHORTNAME = "MAC" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("0488b21e") XPRV_VERBYTES = bytes.fromhex("0488ade4") P2PKH_VERBYTE = bytes.fromhex("32") P2SH_VERBYTES = [bytes.fromhex("26"), bytes.fromhex("05")] WIF_BYTE = bytes.fromhex("b2") GENESIS_HASH = ('6a1f879bcea5471cbfdee1fd0cb2ddcc' '4fed569a500e352d41de967703e83172') DESERIALIZER = lib_tx.DeserializerSegWit TX_COUNT = 137641 TX_COUNT_HEIGHT = 513020 TX_PER_BLOCK = 2 RPC_PORT = 40332 REORG_LIMIT = 800 class BitcoinAtom(Coin): NAME = "BitcoinAtom" SHORTNAME = "BCA" NET = "mainnet" P2PKH_VERBYTE = bytes.fromhex("17") P2SH_VERBYTES = [bytes.fromhex("0a")] WIF_BYTE = bytes.fromhex("80") GENESIS_HASH = ('000000000019d6689c085ae165831e93' '4ff763ae46a2a6c172b3f1b60a8ce26f') STATIC_BLOCK_HEADERS = False DESERIALIZER = lib_tx.DeserializerBitcoinAtom HEADER_SIZE_POST_FORK = 84 BLOCK_PROOF_OF_STAKE = 0x01 BLOCK_PROOF_OF_STAKE_FLAGS = b'\x01\x00\x00\x00' TX_COUNT = 295158744 TX_COUNT_HEIGHT = 589197 TX_PER_BLOCK = 10 RPC_PORT = 9136 REORG_LIMIT = 5000 @classmethod def header_hash(cls, header): '''Given a header return hash''' header_to_be_hashed = header[:cls.BASIC_HEADER_SIZE] # New block header format has some extra flags in the end if len(header) == cls.HEADER_SIZE_POST_FORK: flags, = util.unpack_le_uint32_from(header, len(header) - 4) # Proof of work blocks have special serialization if flags & cls.BLOCK_PROOF_OF_STAKE != 0: header_to_be_hashed += cls.BLOCK_PROOF_OF_STAKE_FLAGS return double_sha256(header_to_be_hashed) @classmethod def block_header(cls, block, height): '''Return the block header bytes''' deserializer = cls.DESERIALIZER(block) return deserializer.read_header(height, cls.BASIC_HEADER_SIZE) class Decred(Coin): NAME = "Decred" SHORTNAME = "DCR" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("02fda926") XPRV_VERBYTES = bytes.fromhex("02fda4e8") P2PKH_VERBYTE = bytes.fromhex("073f") P2SH_VERBYTES = [bytes.fromhex("071a")] WIF_BYTE = bytes.fromhex("230e") GENESIS_HASH = ('298e5cc3d985bfe7f81dc135f360abe0' '89edd4396b86d2de66b0cef42b21d980') BASIC_HEADER_SIZE = 180 HEADER_HASH = lib_tx.DeserializerDecred.blake256 DESERIALIZER = lib_tx.DeserializerDecred DAEMON = daemon.DecredDaemon BLOCK_PROCESSOR = block_proc.DecredBlockProcessor ENCODE_CHECK = partial(Base58.encode_check, hash_fn=lib_tx.DeserializerDecred.blake256d) DECODE_CHECK = partial(Base58.decode_check, hash_fn=lib_tx.DeserializerDecred.blake256d) HEADER_VALUES = ('version', 'prev_block_hash', 'merkle_root', 'stake_root', 'vote_bits', 'final_state', 'voters', 'fresh_stake', 'revocations', 'pool_size', 'bits', 'sbits', 'block_height', 'size', 'timestamp', 'nonce', 'extra_data', 'stake_version') HEADER_UNPACK = struct.Struct( '< i 32s 32s 32s H 6s H B B I I Q I I I I 32s I').unpack_from TX_COUNT = 4629388 TX_COUNT_HEIGHT = 260628 TX_PER_BLOCK = 17 REORG_LIMIT = 1000 RPC_PORT = 9109 @classmethod def header_hash(cls, header): '''Given a header return the hash.''' return cls.HEADER_HASH(header) @classmethod def block(cls, raw_block, height): '''Return a Block namedtuple given a raw block and its height.''' if height > 0: return super().block(raw_block, height) else: return Block(raw_block, cls.block_header(raw_block, height), []) @classmethod def electrum_header(cls, header, height): h = super().electrum_header(header, height) h['stake_root'] = hash_to_hex_str(h['stake_root']) h['final_state'] = hash_to_hex_str(h['final_state']) h['extra_data'] = hash_to_hex_str(h['extra_data']) return h class DecredTestnet(Decred): SHORTNAME = "tDCR" NET = "testnet" XPUB_VERBYTES = bytes.fromhex("043587d1") XPRV_VERBYTES = bytes.fromhex("04358397") P2PKH_VERBYTE = bytes.fromhex("0f21") P2SH_VERBYTES = [bytes.fromhex("0efc")] WIF_BYTE = bytes.fromhex("22de") GENESIS_HASH = ( '4261602a9d07d80ad47621a64ba6a07754902e496777edc4ff581946bd7bc29c') BASIC_HEADER_SIZE = 180 ALLOW_ADVANCING_ERRORS = True TX_COUNT = 217380620 TX_COUNT_HEIGHT = 464000 TX_PER_BLOCK = 1800 REORG_LIMIT = 1000 RPC_PORT = 19109 class Axe(Dash): NAME = "Axe" SHORTNAME = "AXE" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("02fe52cc") XPRV_VERBYTES = bytes.fromhex("02fe52f8") P2PKH_VERBYTE = bytes.fromhex("37") P2SH_VERBYTES = [bytes.fromhex("10")] WIF_BYTE = bytes.fromhex("cc") GENESIS_HASH = ('00000c33631ca6f2f61368991ce2dc03' '306b5bb50bf7cede5cfbba6db38e52e6') DAEMON = daemon.DashDaemon TX_COUNT = 18405 TX_COUNT_HEIGHT = 30237 TX_PER_BLOCK = 1 RPC_PORT = 9337 REORG_LIMIT = 1000 PEERS = [] @classmethod def header_hash(cls, header): ''' Given a header return the hash for AXE. Need to download `axe_hash` module Source code: https://github.com/AXErunners/axe_hash ''' import x11_hash return x11_hash.getPoWHash(header) class Xuez(Coin): NAME = "Xuez" SHORTNAME = "XUEZ" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("022d2533") XPRV_VERBYTES = bytes.fromhex("0221312b") P2PKH_VERBYTE = bytes.fromhex("48") P2SH_VERBYTES = [bytes.fromhex("12")] WIF_BYTE = bytes.fromhex("d4") GENESIS_HASH = ('000000e1febc39965b055e8e0117179a' '4d18e24e7aaa0c69864c4054b4f29445') TX_COUNT = 30000 TX_COUNT_HEIGHT = 15000 TX_PER_BLOCK = 1 RPC_PORT = 41799 REORG_LIMIT = 1000 BASIC_HEADER_SIZE = 112 PEERS = [] @classmethod def header_hash(cls, header): ''' Given a header return the hash for Xuez. Need to download `xevan_hash` module Source code: https://github.com/xuez/xuez ''' version, = util.unpack_le_uint32_from(header) import xevan_hash if version == 1: return xevan_hash.getPoWHash(header[:80]) else: return xevan_hash.getPoWHash(header) @classmethod def electrum_header(cls, header, height): h = super().electrum_header(header, height) if h['version'] > 1: h['nAccumulatorCheckpoint'] = hash_to_hex_str(header[80:]) return h class Pac(Coin): NAME = "PAC" SHORTNAME = "PAC" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("0488B21E") XPRV_VERBYTES = bytes.fromhex("0488ADE4") GENESIS_HASH = ('00000354655ff039a51273fe61d3b493' 'bd2897fe6c16f732dbc4ae19f04b789e') P2PKH_VERBYTE = bytes.fromhex("37") P2SH_VERBYTES = [bytes.fromhex("0A")] WIF_BYTE = bytes.fromhex("CC") TX_COUNT_HEIGHT = 14939 TX_COUNT = 23708 TX_PER_BLOCK = 2 RPC_PORT = 7111 PEERS = [ 'electrum.paccoin.io s t', 'electro-pac.paccoin.io s t' ] SESSIONCLS = DashElectrumX DAEMON = daemon.DashDaemon ESTIMATE_FEE = 0.00001 RELAY_FEE = 0.00001 @classmethod def header_hash(cls, header): '''Given a header return the hash.''' import x11_hash return x11_hash.getPoWHash(header) class PacTestnet(Pac): SHORTNAME = "tPAC" NET = "testnet" XPUB_VERBYTES = bytes.fromhex("043587CF") XPRV_VERBYTES = bytes.fromhex("04358394") GENESIS_HASH = ('00000da63bd9478b655ef6bf1bf76cd9' 'af05202ab68643f9091e049b2b5280ed') P2PKH_VERBYTE = bytes.fromhex("78") P2SH_VERBYTES = [bytes.fromhex("0E")] WIF_BYTE = bytes.fromhex("EF") TX_COUNT_HEIGHT = 16275 TX_COUNT = 16275 TX_PER_BLOCK = 1 RPC_PORT = 17111 class Polis(Coin): NAME = "Polis" SHORTNAME = "POLIS" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("03E25D7E") XPRV_VERBYTES = bytes.fromhex("03E25945") GENESIS_HASH = ('000009701eb781a8113b1af1d814e2f0' '60f6408a2c990db291bc5108a1345c1e') P2PKH_VERBYTE = bytes.fromhex("37") P2SH_VERBYTES = [bytes.fromhex("38")] WIF_BYTE = bytes.fromhex("3c") TX_COUNT_HEIGHT = 111111 TX_COUNT = 256128 TX_PER_BLOCK = 4 RPC_PORT = 24127 PEERS = [ 'electrum1-polis.polispay.org', 'electrum2-polis.polispay.org' ] SESSIONCLS = DashElectrumX DAEMON = daemon.DashDaemon @classmethod def header_hash(cls, header): '''Given a header return the hash.''' import x11_hash return x11_hash.getPoWHash(header) class ColossusXT(Coin): NAME = "ColossusXT" SHORTNAME = "COLX" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("022D2533") XPRV_VERBYTES = bytes.fromhex("0221312B") GENESIS_HASH = ('a0ce8206c908357008c1b9a8ba2813af' 'f0989ca7f72d62b14e652c55f02b4f5c') P2PKH_VERBYTE = bytes.fromhex("1E") P2SH_VERBYTES = [bytes.fromhex("0D")] WIF_BYTE = bytes.fromhex("D4") TX_COUNT_HEIGHT = 356500 TX_COUNT = 761041 TX_PER_BLOCK = 4 RPC_PORT = 51473 PEERS = [ 'electrum1-colx.polispay.org', 'electrum2-colx.polispay.org' ] SESSIONCLS = DashElectrumX DAEMON = daemon.DashDaemon @classmethod def header_hash(cls, header): '''Given a header return the hash.''' import quark_hash return quark_hash.getPoWHash(header) class GoByte(Coin): NAME = "GoByte" SHORTNAME = "GBX" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("0488B21E") XPRV_VERBYTES = bytes.fromhex("0488ADE4") GENESIS_HASH = ('0000033b01055cf8df90b01a14734cae' '92f7039b9b0e48887b4e33a469d7bc07') P2PKH_VERBYTE = bytes.fromhex("26") P2SH_VERBYTES = [bytes.fromhex("0A")] WIF_BYTE = bytes.fromhex("C6") TX_COUNT_HEIGHT = 115890 TX_COUNT = 245030 TX_PER_BLOCK = 4 RPC_PORT = 12454 PEERS = [ 'electrum1-gbx.polispay.org', 'electrum2-gbx.polispay.org' ] SESSIONCLS = DashElectrumX DAEMON = daemon.DashDaemon @classmethod def header_hash(cls, header): '''Given a header return the hash.''' import neoscrypt return neoscrypt.getPoWHash(header) class Monoeci(Coin): NAME = "Monoeci" SHORTNAME = "XMCC" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("0488B21E") XPRV_VERBYTES = bytes.fromhex("0488ADE4") GENESIS_HASH = ('0000005be1eb05b05fb45ae38ee9c144' '1514a65343cd146100a574de4278f1a3') P2PKH_VERBYTE = bytes.fromhex("32") P2SH_VERBYTES = [bytes.fromhex("49")] WIF_BYTE = bytes.fromhex("4D") TX_COUNT_HEIGHT = 140000 TX_COUNT = 140000 TX_PER_BLOCK = 4 RPC_PORT = 24156 PEERS = [ 'electrum1-gbx.polispay.org', 'electrum2-gbx.polispay.org' ] SESSIONCLS = DashElectrumX DAEMON = daemon.DashDaemon @classmethod def header_hash(cls, header): '''Given a header return the hash.''' import x11_hash return x11_hash.getPoWHash(header) class Minexcoin(EquihashMixin, Coin): NAME = "Minexcoin" SHORTNAME = "MNX" NET = "mainnet" P2PKH_VERBYTE = bytes.fromhex("4b") P2SH_VERBYTES = [bytes.fromhex("05")] WIF_BYTE = bytes.fromhex("80") GENESIS_HASH = ('490a36d9451a55ed197e34aca7414b35' 'd775baa4a8e896f1c577f65ce2d214cb') STATIC_BLOCK_HEADERS = True BASIC_HEADER_SIZE = 209 HEADER_SIZE_NO_SOLUTION = 140 TX_COUNT = 327963 TX_COUNT_HEIGHT = 74495 TX_PER_BLOCK = 5 RPC_PORT = 8022 CHUNK_SIZE = 960 PEERS = [ 'elex01-ams.turinex.eu s t', 'eu.minexpool.nl s t' ] @classmethod def electrum_header(cls, header, height): h = super().electrum_header(header, height) h['solution'] = hash_to_hex_str(header[cls.HEADER_SIZE_NO_SOLUTION:]) return h @classmethod def block_header(cls, block, height): '''Return the block header bytes''' deserializer = cls.DESERIALIZER(block) return deserializer.read_header(height, cls.HEADER_SIZE_NO_SOLUTION) class Groestlcoin(Coin): NAME = "Groestlcoin" SHORTNAME = "GRS" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("0488b21e") XPRV_VERBYTES = bytes.fromhex("0488ade4") P2PKH_VERBYTE = bytes.fromhex("24") P2SH_VERBYTES = [bytes.fromhex("05")] WIF_BYTE = bytes.fromhex("80") GENESIS_HASH = ('00000ac5927c594d49cc0bdb81759d0d' 'a8297eb614683d3acb62f0703b639023') DESERIALIZER = lib_tx.DeserializerGroestlcoin TX_COUNT = 115900 TX_COUNT_HEIGHT = 1601528 TX_PER_BLOCK = 5 RPC_PORT = 1441 PEERS = [ 'electrum1.groestlcoin.org s t', 'electrum2.groestlcoin.org s t', '6brsrbiinpc32tfc.onion t', 'xkj42efxrcy6vbfw.onion t', ] def grshash(data): import groestlcoin_hash return groestlcoin_hash.getHash(data, len(data)) @classmethod def header_hash(cls, header): '''Given a header return the hash.''' return cls.grshash(header) ENCODE_CHECK = partial(Base58.encode_check, hash_fn=grshash) DECODE_CHECK = partial(Base58.decode_check, hash_fn=grshash) class GroestlcoinTestnet(Groestlcoin): SHORTNAME = "TGRS" NET = "testnet" XPUB_VERBYTES = bytes.fromhex("043587cf") XPRV_VERBYTES = bytes.fromhex("04358394") P2PKH_VERBYTE = bytes.fromhex("6f") P2SH_VERBYTES = [bytes.fromhex("c4")] WIF_BYTE = bytes.fromhex("ef") GENESIS_HASH = ('000000ffbb50fc9898cdd36ec163e6ba' '23230164c0052a28876255b7dcf2cd36') RPC_PORT = 17766 PEERS = [ 'electrum-test1.groestlcoin.org s t', 'electrum-test2.groestlcoin.org s t', '7frvhgofuf522b5i.onion t', 'aocojvqcybdoxekv.onion t', ] class Pivx(Coin): NAME = "Pivx" SHORTNAME = "PIVX" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("022D2533") XPRV_VERBYTES = bytes.fromhex("0221312B") P2PKH_VERBYTE = bytes.fromhex("1e") P2SH_VERBYTES = [bytes.fromhex("0d")] WIF_BYTE = bytes.fromhex("d4") GENESIS_HASH = ('0000041e482b9b9691d98eefb4847340' '5c0b8ec31b76df3797c74a78680ef818') BASIC_HEADER_SIZE = 80 HDR_V4_SIZE = 112 HDR_V4_HEIGHT = 863787 HDR_V4_START_OFFSET = HDR_V4_HEIGHT * BASIC_HEADER_SIZE TX_COUNT = 2930206 TX_COUNT_HEIGHT = 1299212 TX_PER_BLOCK = 2 RPC_PORT = 51473 @classmethod def static_header_offset(cls, height): assert cls.STATIC_BLOCK_HEADERS if height >= cls.HDR_V4_HEIGHT: relative_v4_offset = (height - cls.HDR_V4_HEIGHT) * cls.HDR_V4_SIZE return cls.HDR_V4_START_OFFSET + relative_v4_offset else: return height * cls.BASIC_HEADER_SIZE @classmethod def header_hash(cls, header): version, = util.unpack_le_uint32_from(header) if version >= 4: return super().header_hash(header) else: import quark_hash return quark_hash.getPoWHash(header) class PivxTestnet(Pivx): SHORTNAME = "tPIVX" NET = "testnet" XPUB_VERBYTES = bytes.fromhex("3a8061a0") XPRV_VERBYTES = bytes.fromhex("3a805837") P2PKH_VERBYTE = bytes.fromhex("8B") P2SH_VERBYTES = [bytes.fromhex("13")] WIF_BYTE = bytes.fromhex("EF") GENESIS_HASH = ( '0000041e482b9b9691d98eefb48473405c0b8ec31b76df3797c74a78680ef818') BASIC_HEADER_SIZE = 80 HDR_V4_SIZE = 112 HDR_V4_HEIGHT = 863787 HDR_V4_START_OFFSET = HDR_V4_HEIGHT * BASIC_HEADER_SIZE TX_COUNT = 2157510 TX_COUNT_HEIGHT = 569399 TX_PER_BLOCK = 2 RPC_PORT = 51472 class Bitg(Coin): NAME = "BitcoinGreen" SHORTNAME = "BITG" NET = "mainnet" XPUB_VERBYTES = bytes.fromhex("0488b21e") XPRV_VERBYTES = bytes.fromhex("0488ade4") P2PKH_VERBYTE = bytes.fromhex("26") P2SH_VERBYTES = [bytes.fromhex("06")] WIF_BYTE = bytes.fromhex("2e") GENESIS_HASH = ( '000008467c3a9c587533dea06ad9380cded3ed32f9742a6c0c1aebc21bf2bc9b') DAEMON = daemon.DashDaemon TX_COUNT = 1000 TX_COUNT_HEIGHT = 10000 TX_PER_BLOCK = 1 RPC_PORT = 9332 REORG_LIMIT = 1000 SESSIONCLS = DashElectrumX DAEMON = daemon.DashDaemon @classmethod def header_hash(cls, header): '''Given a header return the hash.''' import quark_hash return quark_hash.getPoWHash(header) class tBitg(Bitg): SHORTNAME = "tBITG" NET = "testnet" XPUB_VERBYTES = bytes.fromhex("043587cf") XPRV_VERBYTES = bytes.fromhex("04358394") P2PKH_VERBYTE = bytes.fromhex("62") P2SH_VERBYTES = [bytes.fromhex("0c")] WIF_BYTE = bytes.fromhex("6c") GENESIS_HASH = ( '000008467c3a9c587533dea06ad9380cded3ed32f9742a6c0c1aebc21bf2bc9b') RPC_PORT = 19332
py
1a45bd32c61d61d757f4b9c04846b5c58220752f
from collections import OrderedDict, deque, namedtuple import numpy as np from sklearn.model_selection import train_test_split from torchlib.dataset.utils import create_data_loader from torchlib.deep_rl import BaseAgent class Dataset(object): def __init__(self): self._states = [] self._actions = [] self._next_states = [] self._rewards = [] self._dones = [] @property def is_empty(self): return len(self) == 0 def __len__(self): return len(self._states) ################## ### Statistics ### ################## @property def state_mean(self): return np.mean(self._states, axis=0).astype(np.float32) @property def state_std(self): return np.std(self._states, axis=0).astype(np.float32) @property def action_mean(self): return np.mean(self._actions, axis=0).astype(np.float32) @property def action_std(self): return np.std(self._actions, axis=0).astype(np.float32) @property def delta_state_mean(self): return np.mean(np.array(self._next_states) - np.array(self._states), axis=0).astype(np.float32) @property def delta_state_std(self): return np.std(np.array(self._next_states) - np.array(self._states), axis=0).astype(np.float32) ################### ### Adding data ### ################### def add(self, state, action, next_state, reward, done): """ Add (s, a, r, s') to this dataset """ if not self.is_empty: # ensure the state, action, next_state are of the same dimension assert len(self._states[-1]) == len(np.ravel(state)) assert len(self._actions[-1]) == len(np.ravel(action)) assert len(self._next_states[-1]) == len(np.ravel(next_state)) self._states.append(np.ravel(state)) self._actions.append(np.ravel(action)) self._next_states.append(np.ravel(next_state)) self._rewards.append(reward) self._dones.append(done) def append(self, other_dataset): """ Append other_dataset to this dataset """ if not self.is_empty and not other_dataset.is_empty: # ensure the state, action, next_state are of the same dimension assert len(self._states[-1]) == len(other_dataset._states[-1]) assert len(self._actions[-1]) == len(other_dataset._actions[-1]) assert len(self._next_states[-1]) == len(other_dataset._next_states[-1]) self._states += other_dataset._states self._actions += other_dataset._actions self._next_states += other_dataset._next_states self._rewards += other_dataset._rewards self._dones += other_dataset._dones ############################ ### Iterate through data ### ############################ def rollout_iterator(self): """ Iterate through all the rollouts in the dataset sequentially """ end_indices = np.nonzero(self._dones)[0] + 1 states = np.asarray(self._states) actions = np.asarray(self._actions) next_states = np.asarray(self._next_states) rewards = np.asarray(self._rewards) dones = np.asarray(self._dones) start_idx = 0 for end_idx in end_indices: indices = np.arange(start_idx, end_idx) yield states[indices], actions[indices], next_states[indices], rewards[indices], dones[indices] start_idx = end_idx def random_iterator(self, batch_size): """ Iterate once through all (s, a, r, s') in batches in a random order """ all_indices = np.nonzero(np.logical_not(self._dones))[0] np.random.shuffle(all_indices) states = np.asarray(self._states) actions = np.asarray(self._actions) next_states = np.asarray(self._next_states) rewards = np.asarray(self._rewards) dones = np.asarray(self._dones) i = 0 while i < len(all_indices): indices = all_indices[i:i + batch_size] yield states[indices], actions[indices], next_states[indices], rewards[indices], dones[indices] i += batch_size def log(self): end_idxs = np.nonzero(self._dones)[0] + 1 returns = [] start_idx = 0 for end_idx in end_idxs: rewards = self._rewards[start_idx:end_idx] returns.append(np.sum(rewards)) start_idx = end_idx stats = OrderedDict({ 'ReturnAvg': np.mean(returns), 'ReturnStd': np.std(returns), 'ReturnMin': np.min(returns), 'ReturnMax': np.max(returns) }) return stats Transition = namedtuple('Transition', ('state', 'action', 'reward')) ImitationTransition = namedtuple('ImitationTransition', ('state', 'action', 'reward', 'best_action')) class EpisodicDataset(object): def __init__(self, maxlen=10000): self.memory = deque() # current state self._states = [] self._actions = [] self._rewards = [] self.size = 0 # initial state self.initial_state = deque(maxlen=maxlen) self.maxlen = maxlen def __len__(self): return self.size @property def num_trajectories(self): return len(self.memory) @property def is_empty(self): return len(self) == 0 @property def state_mean(self): states = [] for trajectory in self.memory: states.append(trajectory.state) return np.mean(np.concatenate(states, axis=0), axis=0) @property def state_std(self): states = [] for trajectory in self.memory: states.append(trajectory.state) return np.std(np.concatenate(states, axis=0), axis=0) @property def action_mean(self): actions = [] for trajectory in self.memory: actions.append(trajectory.action) return np.mean(np.concatenate(actions, axis=0), axis=0).astype(np.float32) @property def action_std(self): actions = [] for trajectory in self.memory: actions.append(trajectory.action) return np.std(np.concatenate(actions, axis=0), axis=0).astype(np.float32) @property def delta_state_mean(self): delta_states = [] for trajectory in self.memory: states = trajectory.state delta_states.append(states[1:] - states[:-1]) return np.mean(np.concatenate(delta_states, axis=0), axis=0) @property def delta_state_std(self): delta_states = [] for trajectory in self.memory: states = trajectory.state delta_states.append(states[1:] - states[:-1]) return np.std(np.concatenate(delta_states, axis=0), axis=0) @property def reward_mean(self): rewards = [] for trajectory in self.memory: rewards.append(trajectory.reward) return np.mean(np.concatenate(rewards, axis=0), axis=0).astype(np.float32) @property def reward_std(self): rewards = [] for trajectory in self.memory: rewards.append(trajectory.reward) return np.std(np.concatenate(rewards, axis=0), axis=0).astype(np.float32) def add(self, state, action, next_state, reward, done): self._states.append(np.ravel(state)) if isinstance(action, np.ndarray) and len(action.shape) != 0: self._actions.append(np.ravel(action)) else: self._actions.append(action) self._rewards.append(np.ravel(reward)) self.size += 1 if done: self._states.append(next_state) self.memory.append(Transition(state=np.array(self._states), action=np.array(self._actions), reward=np.array(self._rewards))) self._states = [] self._actions = [] self._rewards = [] def append(self, other_dataset): self.memory.extend(other_dataset.memory) self.size += other_dataset.size while self.size > self.maxlen: trajectory = self.memory.popleft() self.size -= len(trajectory.action) def get_initial_states(self): init_states = [] for trajectory in self.memory: init_states.append(trajectory.state[0].copy()) return init_states def rollout_iterator(self): for trajectory in self.memory: states = trajectory.state[:-1] next_states = trajectory.state[1:] actions = trajectory.action rewards = trajectory.reward dones = [False] * actions.shape[0] dones[-1] = True yield states, actions, next_states, rewards, dones def random_iterator(self, batch_size, train_val_split_ratio=0.2): states = [] actions = [] rewards = [] next_states = [] dones = [] for trajectory in self.memory: states.append(trajectory.state[:-1]) actions.append(trajectory.action) next_states.append(trajectory.state[1:]) rewards.append(trajectory.reward) done = [False] * trajectory.action.shape[0] done[-1] = True dones.append(np.array(done)) states = np.concatenate(states, axis=0) actions = np.concatenate(actions, axis=0) next_states = np.concatenate(next_states, axis=0) rewards = np.concatenate(rewards, axis=0) dones = np.concatenate(dones, axis=0) input_tuple = (states, actions, next_states, rewards, dones) output_tuple = train_test_split(*input_tuple, test_size=train_val_split_ratio) train_tuple = output_tuple[0::2] val_tuple = output_tuple[1::2] # in training, we drop last batch to avoid batch size 1 that may crash batch_norm layer. train_data_loader = create_data_loader(train_tuple, batch_size=batch_size, shuffle=True, drop_last=True) val_data_loader = create_data_loader(val_tuple, batch_size=batch_size, shuffle=False, drop_last=False) return train_data_loader, val_data_loader def log(self): returns = [] for trajectory in self.memory: returns.append(np.sum(trajectory.reward)) stats = OrderedDict({ 'ReturnAvg': np.mean(returns), 'ReturnStd': np.std(returns), 'ReturnMin': np.min(returns), 'ReturnMax': np.max(returns) }) return stats class StateActionPairDataset(object): def __init__(self, max_size): self.states = deque(maxlen=max_size) self.actions = deque(maxlen=max_size) def __len__(self): return len(self.states) @property def maxlen(self): return self.states.maxlen @property def is_empty(self): return len(self) == 0 def add(self, state, action): self.states.append(state) self.actions.append(action) @property def state_stats(self): states = np.array(self.states) return np.mean(states, axis=0), np.std(states, axis=0) @property def action_stats(self): actions = np.array(self.actions) return np.mean(actions, axis=0), np.std(actions, axis=0) def random_iterator(self, batch_size, train_val_split_ratio=0.2): states = np.array(self.states) actions = np.array(self.actions) input_tuple = (states, actions) output_tuple = train_test_split(*input_tuple, test_size=train_val_split_ratio) train_tuple = output_tuple[0::2] val_tuple = output_tuple[1::2] # in training, we drop last batch to avoid batch size 1 that may crash batch_norm layer. train_data_loader = create_data_loader(train_tuple, batch_size=batch_size, shuffle=True, drop_last=True) val_data_loader = create_data_loader(val_tuple, batch_size=batch_size, shuffle=False, drop_last=False) return train_data_loader, val_data_loader def gather_rollouts(env, policy: BaseAgent, num_rollouts, max_rollout_length) -> EpisodicDataset: dataset = EpisodicDataset() for _ in range(num_rollouts): state = env.reset() done = False t = 0 while not done: t += 1 if state.dtype == np.float: state = state.astype(np.float32) action = policy.predict(state) if isinstance(action, np.ndarray) and action.dtype == np.float: action = action.astype(np.float32) next_state, reward, done, _ = env.step(action) if next_state.dtype == np.float: next_state = next_state.astype(np.float32) done = done or (t >= max_rollout_length) dataset.add(state, action, next_state, reward, done) state = next_state return dataset
py
1a45be108eed6505f8b96ba3b01c2031edd741ad
#!/usr/bin/env python3 # Copyright (c) 2016 The Magmelldollar Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.mininode import * from test_framework.test_framework import MagmelldollarTestFramework from test_framework.util import * from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment from test_framework.siphash import siphash256 from test_framework.script import CScript, OP_TRUE VB_TOP_BITS = 0x20000000 ''' CompactBlocksTest -- test compact blocks (BIP 152) Version 1 compact blocks are pre-segwit (txids) Version 2 compact blocks are post-segwit (wtxids) ''' # TestNode: A peer we use to send messages to magmelldollard, and store responses. class TestNode(SingleNodeConnCB): def __init__(self): SingleNodeConnCB.__init__(self) self.last_sendcmpct = [] self.last_headers = None self.last_inv = None self.last_cmpctblock = None self.block_announced = False self.last_getdata = None self.last_getblocktxn = None self.last_block = None self.last_blocktxn = None # Store the hashes of blocks we've seen announced. # This is for synchronizing the p2p message traffic, # so we can eg wait until a particular block is announced. self.set_announced_blockhashes = set() def on_sendcmpct(self, conn, message): self.last_sendcmpct.append(message) def on_block(self, conn, message): self.last_block = message def on_cmpctblock(self, conn, message): self.last_cmpctblock = message self.block_announced = True self.last_cmpctblock.header_and_shortids.header.calc_sha256() self.set_announced_blockhashes.add(self.last_cmpctblock.header_and_shortids.header.sha256) def on_headers(self, conn, message): self.last_headers = message self.block_announced = True for x in self.last_headers.headers: x.calc_sha256() self.set_announced_blockhashes.add(x.sha256) def on_inv(self, conn, message): self.last_inv = message for x in self.last_inv.inv: if x.type == 2: self.block_announced = True self.set_announced_blockhashes.add(x.hash) def on_getdata(self, conn, message): self.last_getdata = message def on_getblocktxn(self, conn, message): self.last_getblocktxn = message def on_blocktxn(self, conn, message): self.last_blocktxn = message # Requires caller to hold mininode_lock def received_block_announcement(self): return self.block_announced def clear_block_announcement(self): with mininode_lock: self.block_announced = False self.last_inv = None self.last_headers = None self.last_cmpctblock = None def get_headers(self, locator, hashstop): msg = msg_getheaders() msg.locator.vHave = locator msg.hashstop = hashstop self.connection.send_message(msg) def send_header_for_blocks(self, new_blocks): headers_message = msg_headers() headers_message.headers = [CBlockHeader(b) for b in new_blocks] self.send_message(headers_message) def request_headers_and_sync(self, locator, hashstop=0): self.clear_block_announcement() self.get_headers(locator, hashstop) assert(wait_until(self.received_block_announcement, timeout=30)) assert(self.received_block_announcement()) self.clear_block_announcement() # Block until a block announcement for a particular block hash is # received. def wait_for_block_announcement(self, block_hash, timeout=30): def received_hash(): return (block_hash in self.set_announced_blockhashes) return wait_until(received_hash, timeout=timeout) class CompactBlocksTest(MagmelldollarTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True # Node0 = pre-segwit, node1 = segwit-aware self.num_nodes = 2 self.utxos = [] def setup_network(self): self.nodes = [] # Start up node0 to be a version 1, pre-segwit node. self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [["-debug", "-logtimemicros=1", "-bip9params=segwit:0:0"], ["-debug", "-logtimemicros", "-txindex"]]) connect_nodes(self.nodes[0], 1) def build_block_on_tip(self, node, segwit=False): height = node.getblockcount() tip = node.getbestblockhash() mtp = node.getblockheader(tip)['mediantime'] block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1) block.nVersion = VB_TOP_BITS if segwit: add_witness_commitment(block) block.solve() return block # Create 10 more anyone-can-spend utxo's for testing. def make_utxos(self): # Doesn't matter which node we use, just use node0. block = self.build_block_on_tip(self.nodes[0]) self.test_node.send_and_ping(msg_block(block)) assert(int(self.nodes[0].getbestblockhash(), 16) == block.sha256) self.nodes[0].generate(100) total_value = block.vtx[0].vout[0].nValue out_value = total_value // 10 tx = CTransaction() tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b'')) for i in range(10): tx.vout.append(CTxOut(out_value, CScript([OP_TRUE]))) tx.rehash() block2 = self.build_block_on_tip(self.nodes[0]) block2.vtx.append(tx) block2.hashMerkleRoot = block2.calc_merkle_root() block2.solve() self.test_node.send_and_ping(msg_block(block2)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256) self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)]) return # Test "sendcmpct" (between peers preferring the same version): # - No compact block announcements unless sendcmpct is sent. # - If sendcmpct is sent with version > preferred_version, the message is ignored. # - If sendcmpct is sent with boolean 0, then block announcements are not # made with compact blocks. # - If sendcmpct is then sent with boolean 1, then new block announcements # are made with compact blocks. # If old_node is passed in, request compact blocks with version=preferred-1 # and verify that it receives block announcements via compact block. def test_sendcmpct(self, node, test_node, preferred_version, old_node=None): # Make sure we get a SENDCMPCT message from our peer def received_sendcmpct(): return (len(test_node.last_sendcmpct) > 0) got_message = wait_until(received_sendcmpct, timeout=30) assert(received_sendcmpct()) assert(got_message) with mininode_lock: # Check that the first version received is the preferred one assert_equal(test_node.last_sendcmpct[0].version, preferred_version) # And that we receive versions down to 1. assert_equal(test_node.last_sendcmpct[-1].version, 1) test_node.last_sendcmpct = [] tip = int(node.getbestblockhash(), 16) def check_announcement_of_new_block(node, peer, predicate): peer.clear_block_announcement() block_hash = int(node.generate(1)[0], 16) peer.wait_for_block_announcement(block_hash, timeout=30) assert(peer.block_announced) assert(got_message) with mininode_lock: assert predicate(peer), ( "block_hash={!r}, cmpctblock={!r}, inv={!r}".format( block_hash, peer.last_cmpctblock, peer.last_inv)) # We shouldn't get any block announcements via cmpctblock yet. check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None) # Try one more time, this time after requesting headers. test_node.request_headers_and_sync(locator=[tip]) check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None and p.last_inv is not None) # Test a few ways of using sendcmpct that should NOT # result in compact block announcements. # Before each test, sync the headers chain. test_node.request_headers_and_sync(locator=[tip]) # Now try a SENDCMPCT message with too-high version sendcmpct = msg_sendcmpct() sendcmpct.version = preferred_version+1 sendcmpct.announce = True test_node.send_and_ping(sendcmpct) check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None) # Headers sync before next test. test_node.request_headers_and_sync(locator=[tip]) # Now try a SENDCMPCT message with valid version, but announce=False sendcmpct.version = preferred_version sendcmpct.announce = False test_node.send_and_ping(sendcmpct) check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None) # Headers sync before next test. test_node.request_headers_and_sync(locator=[tip]) # Finally, try a SENDCMPCT message with announce=True sendcmpct.version = preferred_version sendcmpct.announce = True test_node.send_and_ping(sendcmpct) check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None) # Try one more time (no headers sync should be needed!) check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None) # Try one more time, after turning on sendheaders test_node.send_and_ping(msg_sendheaders()) check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None) # Try one more time, after sending a version-1, announce=false message. sendcmpct.version = preferred_version-1 sendcmpct.announce = False test_node.send_and_ping(sendcmpct) check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None) # Now turn off announcements sendcmpct.version = preferred_version sendcmpct.announce = False test_node.send_and_ping(sendcmpct) check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None and p.last_headers is not None) if old_node is not None: # Verify that a peer using an older protocol version can receive # announcements from this node. sendcmpct.version = preferred_version-1 sendcmpct.announce = True old_node.send_and_ping(sendcmpct) # Header sync old_node.request_headers_and_sync(locator=[tip]) check_announcement_of_new_block(node, old_node, lambda p: p.last_cmpctblock is not None) # This test actually causes magmelldollard to (reasonably!) disconnect us, so do this last. def test_invalid_cmpctblock_message(self): self.nodes[0].generate(101) block = self.build_block_on_tip(self.nodes[0]) cmpct_block = P2PHeaderAndShortIDs() cmpct_block.header = CBlockHeader(block) cmpct_block.prefilled_txn_length = 1 # This index will be too high prefilled_txn = PrefilledTransaction(1, block.vtx[0]) cmpct_block.prefilled_txn = [prefilled_txn] self.test_node.send_and_ping(msg_cmpctblock(cmpct_block)) assert(int(self.nodes[0].getbestblockhash(), 16) == block.hashPrevBlock) # Compare the generated shortids to what we expect based on BIP 152, given # magmelldollard's choice of nonce. def test_compactblock_construction(self, node, test_node, version, use_witness_address): # Generate a bunch of transactions. node.generate(101) num_transactions = 25 address = node.getnewaddress() if use_witness_address: # Want at least one segwit spend, so move all funds to # a witness address. address = node.addwitnessaddress(address) value_to_send = node.getbalance() node.sendtoaddress(address, satoshi_round(value_to_send-Decimal(0.1))) node.generate(1) segwit_tx_generated = False for i in range(num_transactions): txid = node.sendtoaddress(address, 0.1) hex_tx = node.gettransaction(txid)["hex"] tx = FromHex(CTransaction(), hex_tx) if not tx.wit.is_null(): segwit_tx_generated = True if use_witness_address: assert(segwit_tx_generated) # check that our test is not broken # Wait until we've seen the block announcement for the resulting tip tip = int(node.getbestblockhash(), 16) assert(test_node.wait_for_block_announcement(tip)) # Now mine a block, and look at the resulting compact block. test_node.clear_block_announcement() block_hash = int(node.generate(1)[0], 16) # Store the raw block in our internal format. block = FromHex(CBlock(), node.getblock("%02x" % block_hash, False)) [tx.calc_sha256() for tx in block.vtx] block.rehash() # Don't care which type of announcement came back for this test; just # request the compact block if we didn't get one yet. wait_until(test_node.received_block_announcement, timeout=30) assert(test_node.received_block_announcement()) with mininode_lock: if test_node.last_cmpctblock is None: test_node.clear_block_announcement() inv = CInv(4, block_hash) # 4 == "CompactBlock" test_node.send_message(msg_getdata([inv])) wait_until(test_node.received_block_announcement, timeout=30) assert(test_node.received_block_announcement()) # Now we should have the compactblock header_and_shortids = None with mininode_lock: assert(test_node.last_cmpctblock is not None) # Convert the on-the-wire representation to absolute indexes header_and_shortids = HeaderAndShortIDs(test_node.last_cmpctblock.header_and_shortids) # Check that we got the right block! header_and_shortids.header.calc_sha256() assert_equal(header_and_shortids.header.sha256, block_hash) # Make sure the prefilled_txn appears to have included the coinbase assert(len(header_and_shortids.prefilled_txn) >= 1) assert_equal(header_and_shortids.prefilled_txn[0].index, 0) # Check that all prefilled_txn entries match what's in the block. for entry in header_and_shortids.prefilled_txn: entry.tx.calc_sha256() # This checks the non-witness parts of the tx agree assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256) # And this checks the witness wtxid = entry.tx.calc_sha256(True) if version == 2: assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True)) else: # Shouldn't have received a witness assert(entry.tx.wit.is_null()) # Check that the cmpctblock message announced all the transactions. assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx)) # And now check that all the shortids are as expected as well. # Determine the siphash keys to use. [k0, k1] = header_and_shortids.get_siphash_keys() index = 0 while index < len(block.vtx): if (len(header_and_shortids.prefilled_txn) > 0 and header_and_shortids.prefilled_txn[0].index == index): # Already checked prefilled transactions above header_and_shortids.prefilled_txn.pop(0) else: tx_hash = block.vtx[index].sha256 if version == 2: tx_hash = block.vtx[index].calc_sha256(True) shortid = calculate_shortid(k0, k1, tx_hash) assert_equal(shortid, header_and_shortids.shortids[0]) header_and_shortids.shortids.pop(0) index += 1 # Test that magmelldollard requests compact blocks when we announce new blocks # via header or inv, and that responding to getblocktxn causes the block # to be successfully reconstructed. # Post-segwit: upgraded nodes would only make this request of cb-version-2, # NODE_WITNESS peers. Unupgraded nodes would still make this request of # any cb-version-1-supporting peer. def test_compactblock_requests(self, node, test_node, version, segwit): # Try announcing a block with an inv or header, expect a compactblock # request for announce in ["inv", "header"]: block = self.build_block_on_tip(node, segwit=segwit) with mininode_lock: test_node.last_getdata = None if announce == "inv": test_node.send_message(msg_inv([CInv(2, block.sha256)])) else: test_node.send_header_for_blocks([block]) success = wait_until(lambda: test_node.last_getdata is not None, timeout=30) assert(success) assert_equal(len(test_node.last_getdata.inv), 1) assert_equal(test_node.last_getdata.inv[0].type, 4) assert_equal(test_node.last_getdata.inv[0].hash, block.sha256) # Send back a compactblock message that omits the coinbase comp_block = HeaderAndShortIDs() comp_block.header = CBlockHeader(block) comp_block.nonce = 0 [k0, k1] = comp_block.get_siphash_keys() coinbase_hash = block.vtx[0].sha256 if version == 2: coinbase_hash = block.vtx[0].calc_sha256(True) comp_block.shortids = [ calculate_shortid(k0, k1, coinbase_hash) ] test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) # Expect a getblocktxn message. with mininode_lock: assert(test_node.last_getblocktxn is not None) absolute_indexes = test_node.last_getblocktxn.block_txn_request.to_absolute() assert_equal(absolute_indexes, [0]) # should be a coinbase request # Send the coinbase, and verify that the tip advances. if version == 2: msg = msg_witness_blocktxn() else: msg = msg_blocktxn() msg.block_transactions.blockhash = block.sha256 msg.block_transactions.transactions = [block.vtx[0]] test_node.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), block.sha256) # Create a chain of transactions from given utxo, and add to a new block. def build_block_with_transactions(self, node, utxo, num_transactions): block = self.build_block_on_tip(node) for i in range(num_transactions): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b'')) tx.vout.append(CTxOut(utxo[2] - 100000, CScript([OP_TRUE]))) tx.rehash() utxo = [tx.sha256, 0, tx.vout[0].nValue] block.vtx.append(tx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() return block # Test that we only receive getblocktxn requests for transactions that the # node needs, and that responding to them causes the block to be # reconstructed. def test_getblocktxn_requests(self, node, test_node, version): with_witness = (version==2) def test_getblocktxn_response(compact_block, peer, expected_result): msg = msg_cmpctblock(compact_block.to_p2p()) peer.send_and_ping(msg) with mininode_lock: assert(peer.last_getblocktxn is not None) absolute_indexes = peer.last_getblocktxn.block_txn_request.to_absolute() assert_equal(absolute_indexes, expected_result) def test_tip_after_message(node, peer, msg, tip): peer.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), tip) # First try announcing compactblocks that won't reconstruct, and verify # that we receive getblocktxn messages back. utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 5) self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block, use_witness=with_witness) test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5]) msg_bt = msg_blocktxn() if with_witness: msg_bt = msg_witness_blocktxn() # serialize with witnesses msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:]) test_tip_after_message(node, test_node, msg_bt, block.sha256) utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 5) self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) # Now try interspersing the prefilled transactions comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=with_witness) test_getblocktxn_response(comp_block, test_node, [2, 3, 4]) msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5]) test_tip_after_message(node, test_node, msg_bt, block.sha256) # Now try giving one transaction ahead of time. utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 5) self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) test_node.send_and_ping(msg_tx(block.vtx[1])) assert(block.vtx[1].hash in node.getrawmempool()) # Prefill 4 out of the 6 transactions, and verify that only the one # that was not in the mempool is requested. comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=with_witness) test_getblocktxn_response(comp_block, test_node, [5]) msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]]) test_tip_after_message(node, test_node, msg_bt, block.sha256) # Now provide all transactions to the node before the block is # announced and verify reconstruction happens immediately. utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 10) self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) for tx in block.vtx[1:]: test_node.send_message(msg_tx(tx)) test_node.sync_with_ping() # Make sure all transactions were accepted. mempool = node.getrawmempool() for tx in block.vtx[1:]: assert(tx.hash in mempool) # Clear out last request. with mininode_lock: test_node.last_getblocktxn = None # Send compact block comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness) test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256) with mininode_lock: # Shouldn't have gotten a request for any transaction assert(test_node.last_getblocktxn is None) # Incorrectly responding to a getblocktxn shouldn't cause the block to be # permanently failed. def test_incorrect_blocktxn_response(self, node, test_node, version): if (len(self.utxos) == 0): self.make_utxos() utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 10) self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) # Relay the first 5 transactions from the block in advance for tx in block.vtx[1:6]: test_node.send_message(msg_tx(tx)) test_node.sync_with_ping() # Make sure all transactions were accepted. mempool = node.getrawmempool() for tx in block.vtx[1:6]: assert(tx.hash in mempool) # Send compact block comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2)) test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) absolute_indexes = [] with mininode_lock: assert(test_node.last_getblocktxn is not None) absolute_indexes = test_node.last_getblocktxn.block_txn_request.to_absolute() assert_equal(absolute_indexes, [6, 7, 8, 9, 10]) # Now give an incorrect response. # Note that it's possible for magmelldollard to be smart enough to know we're # lying, since it could check to see if the shortid matches what we're # sending, and eg disconnect us for misbehavior. If that behavior # change were made, we could just modify this test by having a # different peer provide the block further down, so that we're still # verifying that the block isn't marked bad permanently. This is good # enough for now. msg = msg_blocktxn() if version==2: msg = msg_witness_blocktxn() msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:]) test_node.send_and_ping(msg) # Tip should not have updated assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) # We should receive a getdata request success = wait_until(lambda: test_node.last_getdata is not None, timeout=10) assert(success) assert_equal(len(test_node.last_getdata.inv), 1) assert(test_node.last_getdata.inv[0].type == 2 or test_node.last_getdata.inv[0].type == 2|MSG_WITNESS_FLAG) assert_equal(test_node.last_getdata.inv[0].hash, block.sha256) # Deliver the block if version==2: test_node.send_and_ping(msg_witness_block(block)) else: test_node.send_and_ping(msg_block(block)) assert_equal(int(node.getbestblockhash(), 16), block.sha256) def test_getblocktxn_handler(self, node, test_node, version): # magmelldollard will not send blocktxn responses for blocks whose height is # more than 10 blocks deep. MAX_GETBLOCKTXN_DEPTH = 10 chain_height = node.getblockcount() current_height = chain_height while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH): block_hash = node.getblockhash(current_height) block = FromHex(CBlock(), node.getblock(block_hash, False)) msg = msg_getblocktxn() msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), []) num_to_request = random.randint(1, len(block.vtx)) msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request))) test_node.send_message(msg) success = wait_until(lambda: test_node.last_blocktxn is not None, timeout=10) assert(success) [tx.calc_sha256() for tx in block.vtx] with mininode_lock: assert_equal(test_node.last_blocktxn.block_transactions.blockhash, int(block_hash, 16)) all_indices = msg.block_txn_request.to_absolute() for index in all_indices: tx = test_node.last_blocktxn.block_transactions.transactions.pop(0) tx.calc_sha256() assert_equal(tx.sha256, block.vtx[index].sha256) if version == 1: # Witnesses should have been stripped assert(tx.wit.is_null()) else: # Check that the witness matches assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True)) test_node.last_blocktxn = None current_height -= 1 # Next request should send a full block response, as we're past the # allowed depth for a blocktxn response. block_hash = node.getblockhash(current_height) msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0]) with mininode_lock: test_node.last_block = None test_node.last_blocktxn = None test_node.send_and_ping(msg) with mininode_lock: test_node.last_block.block.calc_sha256() assert_equal(test_node.last_block.block.sha256, int(block_hash, 16)) assert_equal(test_node.last_blocktxn, None) def test_compactblocks_not_at_tip(self, node, test_node): # Test that requesting old compactblocks doesn't work. MAX_CMPCTBLOCK_DEPTH = 5 new_blocks = [] for i in range(MAX_CMPCTBLOCK_DEPTH + 1): test_node.clear_block_announcement() new_blocks.append(node.generate(1)[0]) wait_until(test_node.received_block_announcement, timeout=30) test_node.clear_block_announcement() test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))])) success = wait_until(lambda: test_node.last_cmpctblock is not None, timeout=30) assert(success) test_node.clear_block_announcement() node.generate(1) wait_until(test_node.received_block_announcement, timeout=30) test_node.clear_block_announcement() with mininode_lock: test_node.last_block = None test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))])) success = wait_until(lambda: test_node.last_block is not None, timeout=30) assert(success) with mininode_lock: test_node.last_block.block.calc_sha256() assert_equal(test_node.last_block.block.sha256, int(new_blocks[0], 16)) # Generate an old compactblock, and verify that it's not accepted. cur_height = node.getblockcount() hashPrevBlock = int(node.getblockhash(cur_height-5), 16) block = self.build_block_on_tip(node) block.hashPrevBlock = hashPrevBlock block.solve() comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block) test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) tips = node.getchaintips() found = False for x in tips: if x["hash"] == block.hash: assert_equal(x["status"], "headers-only") found = True break assert(found) # Requesting this block via getblocktxn should silently fail # (to avoid fingerprinting attacks). msg = msg_getblocktxn() msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0]) with mininode_lock: test_node.last_blocktxn = None test_node.send_and_ping(msg) with mininode_lock: assert(test_node.last_blocktxn is None) def activate_segwit(self, node): node.generate(144*3) assert_equal(get_bip9_status(node, "segwit")["status"], 'active') def test_end_to_end_block_relay(self, node, listeners): utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 10) [l.clear_block_announcement() for l in listeners] # ToHex() won't serialize with witness, but this block has no witnesses # anyway. TODO: repeat this test with witness tx's to a segwit node. node.submitblock(ToHex(block)) for l in listeners: wait_until(lambda: l.received_block_announcement(), timeout=30) with mininode_lock: for l in listeners: assert(l.last_cmpctblock is not None) l.last_cmpctblock.header_and_shortids.header.calc_sha256() assert_equal(l.last_cmpctblock.header_and_shortids.header.sha256, block.sha256) # Test that we don't get disconnected if we relay a compact block with valid header, # but invalid transactions. def test_invalid_tx_in_compactblock(self, node, test_node, use_segwit): assert(len(self.utxos)) utxo = self.utxos[0] block = self.build_block_with_transactions(node, utxo, 5) del block.vtx[3] block.hashMerkleRoot = block.calc_merkle_root() if use_segwit: # If we're testing with segwit, also drop the coinbase witness, # but include the witness commitment. add_witness_commitment(block) block.vtx[0].wit.vtxinwit = [] block.solve() # Now send the compact block with all transactions prefilled, and # verify that we don't get disconnected. comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=use_segwit) msg = msg_cmpctblock(comp_block.to_p2p()) test_node.send_and_ping(msg) # Check that the tip didn't advance assert(int(node.getbestblockhash(), 16) is not block.sha256) test_node.sync_with_ping() # Helper for enabling cb announcements # Send the sendcmpct request and sync headers def request_cb_announcements(self, peer, node, version): tip = node.getbestblockhash() peer.get_headers(locator=[int(tip, 16)], hashstop=0) msg = msg_sendcmpct() msg.version = version msg.announce = True peer.send_and_ping(msg) def test_compactblock_reconstruction_multiple_peers(self, node, stalling_peer, delivery_peer): assert(len(self.utxos)) def announce_cmpct_block(node, peer): utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 5) cmpct_block = HeaderAndShortIDs() cmpct_block.initialize_from_block(block) msg = msg_cmpctblock(cmpct_block.to_p2p()) peer.send_and_ping(msg) with mininode_lock: assert(peer.last_getblocktxn is not None) return block, cmpct_block block, cmpct_block = announce_cmpct_block(node, stalling_peer) for tx in block.vtx[1:]: delivery_peer.send_message(msg_tx(tx)) delivery_peer.sync_with_ping() mempool = node.getrawmempool() for tx in block.vtx[1:]: assert(tx.hash in mempool) delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) assert_equal(int(node.getbestblockhash(), 16), block.sha256) self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) # Now test that delivering an invalid compact block won't break relay block, cmpct_block = announce_cmpct_block(node, stalling_peer) for tx in block.vtx[1:]: delivery_peer.send_message(msg_tx(tx)) delivery_peer.sync_with_ping() cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [ CTxInWitness() ] cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)] cmpct_block.use_witness = True delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) assert(int(node.getbestblockhash(), 16) != block.sha256) msg = msg_blocktxn() msg.block_transactions.blockhash = block.sha256 msg.block_transactions.transactions = block.vtx[1:] stalling_peer.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), block.sha256) def run_test(self): # Setup the p2p connections and start up the network thread. self.test_node = TestNode() self.segwit_node = TestNode() self.old_node = TestNode() # version 1 peer <--> segwit node connections = [] connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node)) connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], self.segwit_node, services=NODE_NETWORK|NODE_WITNESS)) connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], self.old_node, services=NODE_NETWORK)) self.test_node.add_connection(connections[0]) self.segwit_node.add_connection(connections[1]) self.old_node.add_connection(connections[2]) NetworkThread().start() # Start up network handling in another thread # Test logic begins here self.test_node.wait_for_verack() # We will need UTXOs to construct transactions in later tests. self.make_utxos() print("Running tests, pre-segwit activation:") print("\tTesting SENDCMPCT p2p message... ") self.test_sendcmpct(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_sendcmpct(self.nodes[1], self.segwit_node, 2, old_node=self.old_node) sync_blocks(self.nodes) print("\tTesting compactblock construction...") self.test_compactblock_construction(self.nodes[0], self.test_node, 1, False) sync_blocks(self.nodes) self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, False) sync_blocks(self.nodes) print("\tTesting compactblock requests... ") self.test_compactblock_requests(self.nodes[0], self.test_node, 1, False) sync_blocks(self.nodes) self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, False) sync_blocks(self.nodes) print("\tTesting getblocktxn requests...") self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2) sync_blocks(self.nodes) print("\tTesting getblocktxn handler...") self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2) self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1) sync_blocks(self.nodes) print("\tTesting compactblock requests/announcements not at chain tip...") self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node) sync_blocks(self.nodes) self.test_compactblocks_not_at_tip(self.nodes[1], self.segwit_node) self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node) sync_blocks(self.nodes) print("\tTesting handling of incorrect blocktxn responses...") self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_incorrect_blocktxn_response(self.nodes[1], self.segwit_node, 2) sync_blocks(self.nodes) # End-to-end block relay tests print("\tTesting end-to-end block relay...") self.request_cb_announcements(self.test_node, self.nodes[0], 1) self.request_cb_announcements(self.old_node, self.nodes[1], 1) self.request_cb_announcements(self.segwit_node, self.nodes[1], 2) self.test_end_to_end_block_relay(self.nodes[0], [self.segwit_node, self.test_node, self.old_node]) self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node]) print("\tTesting handling of invalid compact blocks...") self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False) self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, False) self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, False) print("\tTesting reconstructing compact blocks from all peers...") self.test_compactblock_reconstruction_multiple_peers(self.nodes[1], self.segwit_node, self.old_node) sync_blocks(self.nodes) # Advance to segwit activation print ("\nAdvancing to segwit activation\n") self.activate_segwit(self.nodes[1]) print ("Running tests, post-segwit activation...") print("\tTesting compactblock construction...") self.test_compactblock_construction(self.nodes[1], self.old_node, 1, True) self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, True) sync_blocks(self.nodes) print("\tTesting compactblock requests (unupgraded node)... ") self.test_compactblock_requests(self.nodes[0], self.test_node, 1, True) print("\tTesting getblocktxn requests (unupgraded node)...") self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1) # Need to manually sync node0 and node1, because post-segwit activation, # node1 will not download blocks from node0. print("\tSyncing nodes...") assert(self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash()) while (self.nodes[0].getblockcount() > self.nodes[1].getblockcount()): block_hash = self.nodes[0].getblockhash(self.nodes[1].getblockcount()+1) self.nodes[1].submitblock(self.nodes[0].getblock(block_hash, False)) assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash()) print("\tTesting compactblock requests (segwit node)... ") self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, True) print("\tTesting getblocktxn requests (segwit node)...") self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2) sync_blocks(self.nodes) print("\tTesting getblocktxn handler (segwit node should return witnesses)...") self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2) self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1) # Test that if we submitblock to node1, we'll get a compact block # announcement to all peers. # (Post-segwit activation, blocks won't propagate from node0 to node1 # automatically, so don't bother testing a block announced to node0.) print("\tTesting end-to-end block relay...") self.request_cb_announcements(self.test_node, self.nodes[0], 1) self.request_cb_announcements(self.old_node, self.nodes[1], 1) self.request_cb_announcements(self.segwit_node, self.nodes[1], 2) self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node]) print("\tTesting handling of invalid compact blocks...") self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False) self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, True) self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, True) print("\tTesting invalid index in cmpctblock message...") self.test_invalid_cmpctblock_message() if __name__ == '__main__': CompactBlocksTest().main()
py
1a45bead7109ab57510bb8a5c5e33ada2fc87746
# -*- coding: utf-8 -*- # # hello-world documentation build configuration file, created by # sphinx-quickstart on Sat May 19 21:13:39 2018. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys # sys.path.insert(0, os.path.abspath('.')) sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.todo', 'sphinx.ext.autodoc', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. # # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'hello-world' copyright = u'2018, c' author = u'c' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = u'1' # The full version, including alpha/beta/rc tags. release = u'1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # # today = '' # # Else, today_fmt is used as the format for a strftime call. # # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The reST default role (used for this markup: `text`) to use for all # documents. # # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. # "<project> v<release> documentation" by default. # # html_title = u'hello-world v1' # A shorter title for the navigation bar. Default is the same as html_title. # # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # # html_logo = None # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # # html_extra_path = [] # If not None, a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. # The empty string is equivalent to '%b %d, %Y'. # # html_last_updated_fmt = None # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # # html_additional_pages = {} # If false, no module index is generated. # # html_domain_indices = True # If false, no index is generated. # # html_use_index = True # If true, the index is split into individual pages for each letter. # # html_split_index = False # If true, links to the reST sources are added to the pages. # # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' # # html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # 'ja' uses this config value. # 'zh' user can custom change `jieba` dictionary path. # # html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. # # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'hello-worlddoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'hello-world.tex', u'hello-world Documentation', u'c', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # # latex_use_parts = False # If true, show page references after internal links. # # latex_show_pagerefs = False # If true, show URL addresses after external links. # # latex_show_urls = False # Documents to append as an appendix to all manuals. # # latex_appendices = [] # It false, will not define \strong, \code, itleref, \crossref ... but only # \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added # packages. # # latex_keep_old_macro_names = True # If false, no module index is generated. # # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'hello-world', u'hello-world Documentation', [author], 1) ] # If true, show URL addresses after external links. # # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'hello-world', u'hello-world Documentation', author, 'hello-world', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # # texinfo_appendices = [] # If false, no module index is generated. # # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # # texinfo_no_detailmenu = False
py
1a45beaeabcbfe8a77957dfdd2c6f2ef8bfa4eb2
import numpy as np from yt.testing import \ fake_random_ds, \ fake_particle_ds, \ assert_equal, \ assert_rel_equal, \ assert_almost_equal from yt import particle_filter def setup(): from yt.config import ytcfg ytcfg["yt","__withintesting"] = "True" def test_extrema(): for nprocs in [1, 2, 4, 8]: ds = fake_random_ds(16, nprocs = nprocs, fields = ("density", "velocity_x", "velocity_y", "velocity_z")) for sp in [ds.sphere("c", (0.25, 'unitary')), ds.r[0.5,:,:]]: mi, ma = sp.quantities["Extrema"]("density") assert_equal(mi, np.nanmin(sp["density"])) assert_equal(ma, np.nanmax(sp["density"])) dd = ds.all_data() mi, ma = dd.quantities["Extrema"]("density") assert_equal(mi, np.nanmin(dd["density"])) assert_equal(ma, np.nanmax(dd["density"])) sp = ds.sphere("max", (0.25, 'unitary')) assert_equal(np.any(np.isnan(sp["radial_velocity"])), False) mi, ma = dd.quantities["Extrema"]("radial_velocity") assert_equal(mi, np.nanmin(dd["radial_velocity"])) assert_equal(ma, np.nanmax(dd["radial_velocity"])) def test_average(): for nprocs in [1, 2, 4, 8]: ds = fake_random_ds(16, nprocs = nprocs, fields = ("density",)) for ad in [ds.all_data(), ds.r[0.5, :, :]]: my_mean = ad.quantities["WeightedAverageQuantity"]("density", "ones") assert_rel_equal(my_mean, ad["density"].mean(), 12) my_mean = ad.quantities["WeightedAverageQuantity"]("density", "cell_mass") a_mean = (ad["density"] * ad["cell_mass"]).sum() / ad["cell_mass"].sum() assert_rel_equal(my_mean, a_mean, 12) def test_variance(): for nprocs in [1, 2, 4, 8]: ds = fake_random_ds(16, nprocs = nprocs, fields = ("density", )) for ad in [ds.all_data(), ds.r[0.5, :, :]]: my_std, my_mean = ad.quantities["WeightedVariance"]("density", "ones") assert_rel_equal(my_mean, ad["density"].mean(), 12) assert_rel_equal(my_std, ad["density"].std(), 12) my_std, my_mean = ad.quantities["WeightedVariance"]("density", "cell_mass") a_mean = (ad["density"] * ad["cell_mass"]).sum() / ad["cell_mass"].sum() assert_rel_equal(my_mean, a_mean, 12) a_std = np.sqrt((ad["cell_mass"] * (ad["density"] - a_mean)**2).sum() / ad["cell_mass"].sum()) assert_rel_equal(my_std, a_std, 12) def test_max_location(): for nprocs in [1, 2, 4, 8]: ds = fake_random_ds(16, nprocs = nprocs, fields = ("density", )) for ad in [ds.all_data(), ds.r[0.5, :, :]]: mv, x, y, z = ad.quantities.max_location(("gas", "density")) assert_equal(mv, ad["density"].max()) mi = np.argmax(ad["density"]) assert_equal(ad["x"][mi], x) assert_equal(ad["y"][mi], y) assert_equal(ad["z"][mi], z) def test_min_location(): for nprocs in [1, 2, 4, 8]: ds = fake_random_ds(16, nprocs = nprocs, fields = ("density", )) for ad in [ds.all_data(), ds.r[0.5, :, :]]: mv, x, y, z = ad.quantities.min_location(("gas", "density")) assert_equal(mv, ad["density"].min()) mi = np.argmin(ad["density"]) assert_equal(ad["x"][mi], x) assert_equal(ad["y"][mi], y) assert_equal(ad["z"][mi], z) def test_sample_at_min_field_values(): for nprocs in [1, 2, 4, 8]: ds = fake_random_ds(16, nprocs = nprocs, fields = ("density", "temperature", "velocity_x")) for ad in [ds.all_data(), ds.r[0.5, :, :]]: mv, temp, vm = ad.quantities.sample_at_min_field_values( "density", ["temperature", "velocity_x"]) assert_equal(mv, ad["density"].min()) mi = np.argmin(ad["density"]) assert_equal(ad["temperature"][mi], temp) assert_equal(ad["velocity_x"][mi], vm) def test_sample_at_max_field_values(): for nprocs in [1, 2, 4, 8]: ds = fake_random_ds(16, nprocs = nprocs, fields = ("density", "temperature", "velocity_x")) for ad in [ds.all_data(), ds.r[0.5, :, :]]: mv, temp, vm = ad.quantities.sample_at_max_field_values( "density", ["temperature", "velocity_x"]) assert_equal(mv, ad["density"].max()) mi = np.argmax(ad["density"]) assert_equal(ad["temperature"][mi], temp) assert_equal(ad["velocity_x"][mi], vm) def test_derived_quantities_with_particle_types(): ds = fake_particle_ds() @particle_filter(requires=["particle_position_x"], filtered_type='all') def low_x(pfilter,data): return data['particle_position_x'].in_units('code_length')<0.5 ds.add_particle_filter('low_x') ad=ds.all_data() for ptype in ['all','low_x']: #Check bulk velocity bulk_vx=(ad[(ptype,'particle_mass')]*ad[(ptype,'particle_velocity_x')]/ad[(ptype,'particle_mass')].sum()).sum() assert_almost_equal(ad.quantities.bulk_velocity(use_gas=False,use_particles=True,particle_type=ptype)[0],bulk_vx,5) #Check center of mass com_x=(ad[(ptype,'particle_mass')]*ad[(ptype,'particle_position_x')]/ad[(ptype,'particle_mass')].sum()).sum() assert_almost_equal(ad.quantities.center_of_mass(use_gas=False,use_particles=True,particle_type=ptype)[0],com_x,5) #Check angular momentum vector l_x=(ad[(ptype,'particle_specific_angular_momentum_x')]*ad[(ptype,'particle_mass')]/ad[(ptype,'particle_mass')].sum()).sum() assert_almost_equal(ad.quantities.angular_momentum_vector(use_gas=False,use_particles=True,particle_type=ptype)[0],l_x,5) #Check spin parameter values assert_almost_equal(ad.quantities.spin_parameter(use_gas=False,use_particles=True),655.7311454765503) assert_almost_equal(ad.quantities.spin_parameter(use_gas=False,use_particles=True,particle_type='low_x'),1309.164886405665)
py
1a45bfceec44ec6506d81fcb14b91508098a39d0
from tensorflow.keras import Input from tensorflow.keras.layers import Dropout, Dense from tensorflow.keras.optimizers import Adam from tensorflow.keras import regularizers from tensorflow.keras.losses import SparseCategoricalCrossentropy from graphgallery import floatx from graphgallery.nn.models import TFKeras class MLP(TFKeras): def __init__(self, in_features, out_features, hids=[16], acts=['relu'], dropout=0.5, weight_decay=5e-4, lr=0.01, bias=False): if len(hids) != len(acts): raise RuntimeError(f"Arguments 'hids' and 'acts' should have the same length." " Or you can set both of them to `[]`.") x = Input(batch_shape=[None, in_features], dtype=floatx(), name='node_attr') h = x for hid, act in zip(hids, acts): h = Dropout(rate=dropout)(h) h = Dense(hid, use_bias=bias, activation=act, kernel_regularizer=regularizers.l2(weight_decay))(h) h = Dropout(rate=dropout)(h) h = Dense(out_features, use_bias=bias, kernel_regularizer=regularizers.l2(weight_decay))(h) super().__init__(inputs=x, outputs=h) self.compile(loss=SparseCategoricalCrossentropy(from_logits=True), optimizer=Adam(lr=lr), metrics=['accuracy'])
py
1a45c050552b606dd7dd138f99cc715f48cdbd2f
import graphistry, os, pandas as pd, streamlit as st from time import sleep from components import GraphistrySt, URLParam from css import all_css from util import getChild ############################################ # # DASHBOARD SETTINGS # ############################################ # Controls how entrypoint.py picks it up app_id = 'app_01' logger = getChild(app_id) urlParams = URLParam(app_id) def info(): return { 'id': app_id, 'name': 'INTRO: fancy graph', 'enabled': True, 'tags': ['demo', 'demo_intro'] } def run(): run_all() ############################################ # # CUSTOM CSS # ############################################ # Have fun! def custom_css(): all_css() # our favorites ############################################ # # SIDEBAR RENDER AERA # ############################################ # Given URL params, render left sidebar form and return combined filter settings ##https://docs.streamlit.io/en/stable/api.html#display-interactive-widgets def sidebar_area(): st.sidebar.title('Pick graph') n_init = urlParams.get_field('N', 100) n = st.sidebar.number_input('Number of nodes', min_value=10, max_value=100000, value=n_init, step=20) urlParams.set_field('N', n) base_url = os.environ['BASE_URL'] edges_df = pd.concat([ pd.DataFrame({ 's': [x for x in range(n)], 'd': [(x + 1) % n for x in range(n)], 'link': [ '<a href="' + base_url + '/?view_index=app1&app1_N=' + str(x % n) + '">' + str(x % n) + " nodes</a>" for x in range(n) ] }), pd.DataFrame({ 's': [x for x in range(n)], 'd': [(x + 6) % n for x in range(n)], 'link': [ '<a href="' + base_url + '/?view_index=app1&app1_N=' + str(x % n) + '">' + str(x % n) + " nodes</a>" for x in range(n) ] }) ], sort=False, ignore_index=True) st.sidebar.title("Filter") option_to_label = { 'all': 'All', 'odd': 'Odds', 'even': 'Evens' } filter_by_node_type_init = urlParams.get_field('filter_by_type', default='all') filter_by_node_type = st.sidebar.selectbox('Filter nodes by:', ('all', 'odd', 'even'), index=('all', 'odd', 'even').index(filter_by_node_type_init), format_func=(lambda option: option_to_label[option])) urlParams.set_field('filter_by_type', filter_by_node_type) filter_by_node_range_init = ( urlParams.get_field('filter_by_node_range_min', default=0), urlParams.get_field('filter_by_node_range_max', default=n)) logger.info('filter_by_node_range_init: %s :: %s', filter_by_node_range_init, type(filter_by_node_range_init)) filter_by_node_range = st.sidebar.slider('Filter for nodes in range:', min_value=0, max_value=n, value=filter_by_node_range_init, step=1) urlParams.set_field('filter_by_node_range_min', filter_by_node_range[0]) urlParams.set_field('filter_by_node_range_max', filter_by_node_range[1]) return { 'n': n, 'edges_df': edges_df, 'node_type': filter_by_node_type, 'node_range': filter_by_node_range } ############################################ # # FILTER PIPELINE # ############################################ # Given filter settings, generate/cache/return dataframes & viz @st.cache(suppress_st_warning=True, allow_output_mutation=True) def run_filters(node_type, node_range, edges_df, n): filtered_edges_df = edges_df if node_type == 'all': pass elif node_type == 'odd': filtered_edges_df = filtered_edges_df[ filtered_edges_df['s'] % 2 == 1 ] filtered_edges_df = filtered_edges_df[ filtered_edges_df['d'] % 2 == 1 ] elif node_type == 'even': filtered_edges_df = filtered_edges_df[ filtered_edges_df['s'] % 2 == 0 ] filtered_edges_df = filtered_edges_df[ filtered_edges_df['d'] % 2 == 0 ] else: raise Exception('Unknown filter1 option result: %s' % node_type) if node_range[0] > 0: filtered_edges_df = filtered_edges_df[ filtered_edges_df['s'] >= node_range[0] ] filtered_edges_df = filtered_edges_df[ filtered_edges_df['d'] >= node_range[0] ] if node_range[1] <= n: filtered_edges_df = filtered_edges_df[ filtered_edges_df['s'] <= node_range[1] ] filtered_edges_df = filtered_edges_df[ filtered_edges_df['d'] <= node_range[1] ] #include viz generation as part of cache url = plot_url(filtered_edges_df, n) return { 'edges_df': filtered_edges_df, 'url': url } ############################################ # # VIZ # ############################################ def plot_url(edges_df, n): nodes_df = pd.DataFrame({ 'n': pd.concat([edges_df['s'], edges_df['d']]).unique() }) nodes_df['nc'] = nodes_df['n'].apply(lambda v: 0x01000000 * round(255 * v / n)) logger.info('Starting graphistry plot') if not GraphistrySt().test_login(): return '' url = graphistry\ .bind(source="s", destination="d")\ .edges(edges_df)\ .nodes(nodes_df)\ .bind(node='n', point_color='nc')\ .settings(url_params={ 'pointSize': 0.3, 'splashAfter': 'false', 'bg': '%23' + 'f0f2f6' })\ .plot(render=False) logger.info('Generated viz, got back urL: %s', url) return url ############################################ # # MAIN RENDER AERA # ############################################ # Given configured filters and computed results (cached), render def main_area(edges_df, url): logger.debug('rendering main area, with url: %s', url) GraphistrySt().render_url(url) ############################################ # # Putting it all together # ############################################ def run_all(): custom_css() try: # Render sidebar and get current settings sidebar_filters = sidebar_area() logger.debug('sidebar_filters: %s', sidebar_filters) # Compute filter pipeline (with auto-caching based on filter setting inputs) # Selective mark these as URL params as well filter_pipeline_result = run_filters(**sidebar_filters) # Render main viz area based on computed filter pipeline results and sidebar settings main_area(**filter_pipeline_result) except Exception as exn: st.write('Error loading dashboard') st.write(exn)
py
1a45c05eab13d026a0c5f44878063fe5ad40f417
from .request_api import RequestAPI from .sonarr_api import SonarrAPI from .radarr_api import RadarrAPI __all__ = ["SonarrAPI", "RadarrAPI", "RequestAPI"]
py
1a45c0c2a95415b08484658e32365d085f0ae52f
import hashlib import io import os import uuid from functools import lru_cache from pathlib import Path import xmltodict from fastapi import Depends from pydantic import BaseModel from .config import Settings class CheckInfoBasesResponse(BaseModel): InfoBasesChanged: bool = False URL: str location: str def envelope(self) -> str: envelope = f""" <soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"> <soap:Body> <m:CheckInfoBasesResponse xmlns:m="{self.location}/WebCommonInfoBases"> <m:return xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"/> <m:InfoBasesChanged xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">{self.InfoBasesChanged}</m:InfoBasesChanged> <m:URL xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">{self.URL}</m:URL> </m:CheckInfoBasesResponse> </soap:Body> </soap:Envelope> """ return envelope class GetInfoBasesResponse(BaseModel): ClientID: str = "00000000-0000-0000-0000-000000000000" InfoBasesCheckCode: str = "00000000-0000-0000-0000-000000000000" InfoBases: str = "0" location: str def envelope(self) -> str: envelope = f""" <soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"> <soap:Body> <m:GetInfoBasesResponse xmlns:m="{self.location}/WebCommonInfoBases"> <m:return xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"/> <m:ClientID xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">{self.ClientID}</m:ClientID> <m:InfoBasesCheckCode xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">{self.InfoBasesCheckCode}</m:InfoBasesCheckCode> <m:InfoBases xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">{self.InfoBases}</m:InfoBases> </m:GetInfoBasesResponse> </soap:Body> </soap:Envelope> """ return envelope class SoapRequest: ... # TODO парсер запроса class InfoBasesList: def __init__(self, content): self.content: str = content hash_object = hashlib.md5(self.content.encode()) self.md5_hash: str = hash_object.hexdigest() class InfoBasesCatalog: def __init__(self): self._name_index = {} self._infobases = [] def add_from_file(self, file: str): p = Path(file) with open(file, encoding="utf-8") as f: content = f.read() infobases = InfoBasesList(content=content) self._name_index[p.stem] = infobases self._infobases.append(infobases) def get_infobases(self, name_id: str) -> InfoBasesList or None: return self._name_index.get(name_id, None) def check_code(): return str(uuid.uuid4()) def parse_xml_body(xml_body: bytes): b = io.BytesIO(xml_body) return xmltodict.parse(b) def get_client_id() -> str: return "af822745-ab05-42ac-b826-80d422afb4b7" def get_wsdl(location: str, path: str) -> str: wsdl = f"""<?xml version="1.0" encoding="UTF-8"?> <definitions xmlns="http://schemas.xmlsoap.org/wsdl/" xmlns:soap12bind="http://schemas.xmlsoap.org/wsdl/soap12/" xmlns:soapbind="http://schemas.xmlsoap.org/wsdl/soap/" xmlns:tns="{location}/WebCommonInfoBases" xmlns:wsp="http://schemas.xmlsoap.org/ws/2004/09/policy" xmlns:wsu="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsd1="{location}/WebCommonInfoBases" name="WebCommonInfoBases" targetNamespace="{location}/WebCommonInfoBases"> <types> <xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:xs1="{location}/WebCommonInfoBases" targetNamespace="{location}/WebCommonInfoBases" elementFormDefault="qualified"> <xs:element name="CheckInfoBases"> <xs:complexType> <xs:sequence> <xs:element name="ClientID" type="xs:string" nillable="true"/> <xs:element name="InfoBasesCheckCode" type="xs:string" nillable="true"/> </xs:sequence> </xs:complexType> </xs:element> <xs:element name="CheckInfoBasesResponse"> <xs:complexType> <xs:sequence> <xs:element name="return" type="xs:string" nillable="true"/> <xs:element name="InfoBasesChanged" type="xs:boolean" nillable="true"/> <xs:element name="URL" type="xs:string" nillable="true"/> </xs:sequence> </xs:complexType> </xs:element> <xs:element name="GetInfoBases"> <xs:complexType> <xs:sequence> <xs:element name="ClientID" type="xs:string" nillable="true"/> </xs:sequence> </xs:complexType> </xs:element> <xs:element name="GetInfoBasesResponse"> <xs:complexType> <xs:sequence> <xs:element name="return" type="xs:string" nillable="true"/> <xs:element name="ClientID" type="xs:string" nillable="true"/> <xs:element name="InfoBasesCheckCode" type="xs:string" nillable="true"/> <xs:element name="InfoBases" type="xs:string" nillable="true"/> </xs:sequence> </xs:complexType> </xs:element> </xs:schema> </types> <message name="CheckInfoBasesRequestMessage"> <part name="parameters" element="tns:CheckInfoBases"/> </message> <message name="CheckInfoBasesResponseMessage"> <part name="parameters" element="tns:CheckInfoBasesResponse"/> </message> <message name="GetInfoBasesRequestMessage"> <part name="parameters" element="tns:GetInfoBases"/> </message> <message name="GetInfoBasesResponseMessage"> <part name="parameters" element="tns:GetInfoBasesResponse"/> </message> <portType name="WebCommonInfoBasesPortType"> <operation name="CheckInfoBases"> <input message="tns:CheckInfoBasesRequestMessage"/> <output message="tns:CheckInfoBasesResponseMessage"/> </operation> <operation name="GetInfoBases"> <input message="tns:GetInfoBasesRequestMessage"/> <output message="tns:GetInfoBasesResponseMessage"/> </operation> </portType> <binding name="WebCommonInfoBasesSoapBinding" type="tns:WebCommonInfoBasesPortType"> <soapbind:binding style="document" transport="http://schemas.xmlsoap.org/soap/http"/> <operation name="CheckInfoBases"> <soapbind:operation style="document" soapAction="{location}/WebCommonInfoBases#WebCommonInfoBases:CheckInfoBases"/> <input> <soapbind:body use="literal"/> </input> <output> <soapbind:body use="literal"/> </output> </operation> <operation name="GetInfoBases"> <soapbind:operation style="document" soapAction="{location}/WebCommonInfoBases#WebCommonInfoBases:GetInfoBases"/> <input> <soapbind:body use="literal"/> </input> <output> <soapbind:body use="literal"/> </output> </operation> </binding> <binding name="WebCommonInfoBasesSoap12Binding" type="tns:WebCommonInfoBasesPortType"> <soap12bind:binding style="document" transport="http://schemas.xmlsoap.org/soap/http"/> <operation name="CheckInfoBases"> <soap12bind:operation style="document" soapAction="{location}/WebCommonInfoBases#WebCommonInfoBases:CheckInfoBases"/> <input> <soap12bind:body use="literal"/> </input> <output> <soap12bind:body use="literal"/> </output> </operation> <operation name="GetInfoBases"> <soap12bind:operation style="document" soapAction="{location}/WebCommonInfoBases#WebCommonInfoBases:GetInfoBases"/> <input> <soap12bind:body use="literal"/> </input> <output> <soap12bind:body use="literal"/> </output> </operation> </binding> <service name="WebCommonInfoBases"> <port name="WebCommonInfoBasesSoap" binding="tns:WebCommonInfoBasesSoapBinding"> <documentation> <wsi:Claim xmlns:wsi="http://ws-i.org/schemas/conformanceClaim/" conformsTo="http://ws-i.org/profiles/basic/1.1"/> </documentation> <soapbind:address location="{location}{path}"/> </port> </service> </definitions> """ return wsdl def parse_infobases_catalog(v8i_catalog) -> InfoBasesCatalog: if not v8i_catalog or not os.path.exists(v8i_catalog): raise ValueError("v8i_catalog:" + str(v8i_catalog)) catalog = InfoBasesCatalog() for subdir, dirs, files in os.walk(v8i_catalog): for file in files: if not file.endswith(".v8i"): continue catalog.add_from_file(os.path.join(v8i_catalog, file)) return catalog @lru_cache() def get_settings() -> Settings: return Settings() @lru_cache() def get_infobases_catalog( settings: Settings = Depends(get_settings), ) -> InfoBasesCatalog: return parse_infobases_catalog(settings.v8i_folder)
py
1a45c0f348711f9b59ba914c0c5c8ea0f8dc128c
import gc import os from os.path import join as pjoin import sys from argparse import ArgumentTypeError from pprint import pprint import yaml from datetime import datetime import logging import numpy as np import numpy.random import tensorflow as tf from PIL import Image import matplotlib as mpl from matplotlib import colors import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.python.client import device_lib logger = logging.getLogger("MCDose."+__name__) def limited_float(low, high): """argparse string to float with requirement on allowable range of values""" def convert(x): try: x = float(x) except ValueError: raise argparse.ArgumentTypeError("{} is not a floating point literal".format(x)) if x < low or x > high: raise argparse.ArgumentTypeError("{} not in allowable range [{}, {}]".format(x, low, high)) return x return convert def randbool(p_true=0.5): p_true = max(0.0, min(1.0, p_true)) return bool(np.random.rand()<p_true) def augment_data(samples, labels): for mode in range(8): if randbool(0.2): samples = data_augmentation(samples, mode) labels = data_augmentation(labels, mode) return samples, labels def data_augmentation(image, mode): if mode == 0: # original return image elif mode == 1: # flip up and down return np.flipud(image) elif mode == 2: # rotate counterwise 90 degree return np.rot90(image) elif mode == 3: # rotate 90 degree and flip up and down image = np.rot90(image) return np.flipud(image) elif mode == 4: # rotate 180 degree return np.rot90(image, k=2) elif mode == 5: # rotate 180 degree and flip image = np.rot90(image, k=2) return np.flipud(image) elif mode == 6: # rotate 270 degree return np.rot90(image, k=3) elif mode == 7: # rotate 270 degree and flip image = np.rot90(image, k=3) return np.flipud(image) def load_images(filelist): # pixel value range 0-255 if not isinstance(filelist, list): im = Image.open(filelist).convert('L') return np.array(im).reshape(1, im.size[1], im.size[0], 1) data = [] for file in filelist: im = Image.open(file).convert('L') data.append(np.array(im).reshape(1, im.size[1], im.size[0], 1)) return data def load_bin(fname, size, add_channel_axis=False, norm=False): """loads binary double-precision data in ZYX order and returns an array in XZY order note: only works with 3d array""" with open(fname, 'rb') as fd: excepts = [] for t in [np.float64, np.float32]: try: arr = np.frombuffer(fd.read(), dtype=t) arr = np.reshape(arr, size[::-1]) break except Exception as e: excepts.append(e) if not (isinstance(arr, np.ndarray) and arr.size): raise RuntimeError(str('\n\n'.join([str(e) for e in excepts]))) arr = arr.transpose(2,0,1).copy("C") if norm: arr /= np.max(arr) if add_channel_axis: arr = np.expand_dims(arr, axis=arr.ndim) return arr def save_bin(fname, arr): """takes array in XZY order and saves to binary file as double-precision in ZYX order note: only works with 3d array""" arr = arr.transpose(1,2,0).copy("C") with open(fname, 'wb') as fd: fd.write(arr) def save_as_image(filepath, arr, cmap=None, scale=1): _min = np.min(np.min(arr, axis=0), axis=0) _max = np.max(np.max(arr, axis=0), axis=0) outimg = (arr-_min)/(_max-_min) if cmap is not None: outimg = cmap(outimg) im = Image.fromarray(np.uint8(outimg*255)) im = im.resize((scale*im.size[0], scale*im.size[1])) # make the image larger im.save(filepath) def save_bin_slices(filepath, low_var, high_var, out_low_var, array_spacing=0): scale = 6 with open(filepath+'.bin', 'wb') as f: output = np.zeros((low_var.shape[0], low_var.shape[1], 2)) spacer = np.max(output)*np.ones((low_var.shape[0], low_var.shape[1], array_spacing)) output = np.concatenate((output,high_var), axis=2) output = np.concatenate((output,spacer), axis=2) output = np.concatenate((output,low_var), axis=2) output = np.concatenate((output,spacer), axis=2) output = np.concatenate((output,out_low_var), axis=2) output = output.transpose(1,2,0) output = output.copy(order='C') f.write(output) # save to bin save_as_image(filepath+'.png', output, cmap=mpl.cm.get_cmap('viridis'), scale=scale) def tf_split_var(mode,images, percent, size, rank=4): k = tf.cast(tf.floor(tf.scalar_mul(percent, tf.cast(tf.size(input=images), tf.float32))), tf.int32) if mode =='low': values, idx = tf.nn.top_k(-images,k) elif mode == 'high': values, idx = tf.nn.top_k(images,k) mask = tf.compat.v1.sparse_to_dense(idx, tf.shape(input=images), sparse_values=0, default_value=1) images = tf.multiply(images,tf.cast(mask, tf.float32)) if rank == 3: # The input is a single image with shape [height, width, channels]. # Calculate the difference of neighboring pixel-values. # The images are shifted one pixel along the height and width by slicing. pixel_dif1 = images[1:, :, :] - images[:-1, :, :] pixel_dif2 = images[:, 1:, :] - images[:, :-1, :] # Sum for all axis. (None is an alias for all axis.) sum_axis = None elif rank == 4: # The input is a batch of images with shape: # [batch, height, width, channels]. # Calculate the difference of neighboring pixel-values. # The images are shifted one pixel along the height and width by slicing. pixel_dif1 = images[:, 1:, :, :] - images[:, :-1, :, :] pixel_dif2 = images[:, :, 1:, :] - images[:, :, :-1, :] # Only sum for the last 3 axis. # This results in a 1-D tensor with the total variation for each image. sum_axis = [1, 2, 3] else: raise ValueError('\'images\' must be either 3 or 4-dimensional.') # Calculate the total variation by taking the absolute value of the # pixel-differences and summing over the appropriate axis. tot_var = ( tf.reduce_sum(input_tensor=tf.abs(pixel_dif1), axis=sum_axis) + tf.reduce_sum(input_tensor=tf.abs(pixel_dif2), axis=sum_axis)) return tot_var class BatchConstructor(): """Construct batches in sequence from a set of example (input, label) paired tensors """ def __init__(self, inputs, labels): """ note: currently only handles 2D inputs. 3D extension should be simple Args: inputs (np.ndarray[N,H,W,C]) labels (np.ndarray[N,H,W,C]) """ self.inputs = inputs self.labels = labels self.mark = 0 def reset(self): """re-init the array of available example indices""" self.mark=0 def iter_batches(self, batch_size): """construct batch in NHWC format where the first axis (N) is constucted of examples drawn from list of unused examples""" remain = len(self.labels) - self.mark mark = 0 while remain > 0: _batch_size = min(remain, batch_size) yield (self.inputs[mark:mark+_batch_size,], self.labels[mark:mark+_batch_size,]) remain -= _batch_size mark += _batch_size class RandomBatchConstructor(): """Construct batches randomly from a set of example (input, label) paired tensors """ def __init__(self, inputs, labels): """ note: currently only handles 2D inputs. 3D extension should be simple Args: inputs (np.ndarray[N,H,W,C]) labels (np.ndarray[N,H,W,C]) """ self.inputs = inputs self.labels = labels self.randorder = np.arange(self.inputs.shape[0]) self.mark = 0 self.initialized = False # lazy loading of index def reset(self): """re-init the array of available example indices""" self.initialized = True np.random.shuffle(self.randorder) self.mark = 0 def make_batch(self, batch_size): """construct batch in NHWC format where the first axis (N) is constucted of random examples drawn from list of unused examples""" if not self.initialized: self.reset() remaining = len(self.randorder) - self.mark _batch_size = min(remaining, batch_size) if remaining < _batch_size: raise RuntimeError('There are not enough examples ({:d}) to fill the requested batch ({:d})'.format(remaining, _batch_size)) selection = self.randorder[self.mark:self.mark+_batch_size] self.mark += _batch_size return (self.inputs[selection,], self.labels[selection,]) class RandomBatchConstructor_MultiTensor(): """Construct batches randomly from a set of example (input, label) paired tensors. This class differs from RandomBatchConstructor in that it handles datasets consisting of more than one tensor of different dims""" def __init__(self, inputs, labels): """ note: currently only handles 2D inputs. 3D extension should be simple Args: inputs ([np.ndarray[N,H,W,C], ...]) labels ([np.ndarray[N,H,W,C], ...]) """ self.inputs = inputs self.labels = labels self.index = [[]]*len(inputs) self.initialized = False # lazy loading of index def reset(self): """re-init the array of available example indices""" self.initialized = True self.index = [] for input in self.inputs: self.index.append( list(range(input.shape[0])) ) def make_batch(self, batch_size, reuse=False): """construct batch in NHWC format where the first axis (N) is constructed of random examples drawn from list of unused examples""" if not self.initialized: self.reset() # randomly select a dataset tensor unseen = list(range(len(self.inputs))) while True: if not len(unseen): raise RuntimeError('There are no remaining examples from which to fill the requested batch') tidx = np.random.choice(unseen) if len(self.index[tidx]): break unseen.remove(tidx) _batch_size = min(len(self.index[tidx]), batch_size) if len(self.index[tidx]) < _batch_size: raise RuntimeError('There are not enough examples ({:d}) to fill the requested batch ({:d})'.format(len(self.index[tidx]), _batch_size)) select = np.random.choice(range(len(self.index[tidx])), _batch_size, replace=False) if not reuse: for idx in sorted(select, reverse=True): del self.index[tidx][idx] return (self.inputs[tidx][select,], self.labels[tidx][select,]) def get_available_gpus(): with tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.01, allow_growth=True))) as sess: tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.WARN) local_device_protos = device_lib.list_local_devices() devs = [x.name for x in local_device_protos if x.device_type=='GPU'] return devs def get_unique_run_name(base, timestamp=False): """get unique directory name with lowest integer prefix""" runs = [int(x.split('-')[0]) for x in os.listdir(base) if os.path.isdir(pjoin(base, x))] runid = max(runs)+1 if len(runs) else 1 runname = '{:04d}'.format(runid) if timestamp: timestamp = datetime.strftime('%Y%m%d-%H%M%S') runname += '-' + timestamp return runname def save_config(dest, config): assert os.path.isdir(dest) dest = pjoin(dest, 'config.yml') with open(dest, 'w') as fd: yaml.safe_dump(config, fd) def load_config(src): if os.path.isdir(src): src = pjoin(src, 'config.yml') with open(src, 'r') as fd: config = yaml.safe_load(fd) logger.info('Config loaded from "{}"'.format(src)) return config def combine_config(c1, c2): d = c1.copy() for k, v in c2.items(): if k not in d or d[k] is None: d[k] = v return d
py
1a45c18787bd78563109ac77f7d2cd7eb7d09bb4
import unittest from swamp.parsers.aleigenparser import AleigenParser class AlEigenParserTestCase(unittest.TestCase): def test_1(self): stdout_contents = """Score C1 C2 CMO 0.073469 101 144 9 11 0 12 1 15 2 16 3 18 4 19 5 20 6 21 7 22 8 23 9 24 10 25 11 26 12 27 13 29 14 30 15 33 16 34 17 37 18 52 19 53 20 54 22 55 23 56 24 57 25 59 26 60 27 63 28 64 29 66 30 67 31 72 32 74 33 75 34 76 35 78 36 79 37 82 38 83 39 85 40 86 41 89 42 93 43 94 178 95 179 96 180 97 181 98 182 99 183 100 184 """ parser = AleigenParser(stdout=stdout_contents) self.assertIsNone(parser.alignment_length) parser.parse() alignment = {0: 11, 1: 12, 2: 15, 3: 16, 4: 18, 5: 19, 6: 20, 7: 21, 8: 22, 9: 23, 10: 24, 11: 25, 12: 26, 13: 27, 14: 29, 15: 30, 16: 33, 17: 34, 18: 37, 19: 52, 20: 53, 22: 54, 23: 55, 24: 56, 25: 57, 26: 59, 27: 60, 28: 63, 29: 64, 30: 66, 31: 67, 32: 72, 33: 74, 34: 75, 35: 76, 36: 78, 37: 79, 38: 82, 39: 83, 40: 85, 41: 86, 42: 89, 43: 93, 178: 94, 179: 95, 180: 96, 181: 97, 182: 98, 183: 99, 184: 100} self.assertEqual(0.073469, parser.con_sco) self.assertEqual(9.0, parser.cmo) self.assertEqual(101, parser.c1) self.assertEqual(144, parser.c2) self.assertEqual(50, parser.alignment_length) self.assertDictEqual(alignment, parser.alignment) self.assertTupleEqual((alignment, 50, 0.073469, 9.0, 101, 144), parser.summary) if __name__ == '__main__': unittest.main()
py
1a45c34a23be5708af03eee80fb456410e4ec507
from unittest import TestCase from shutil import copyfile import os from pysumma.Simulation import Simulation class TestSimulation(TestCase): # Create a new fileManager.txt file with the correct file paths for the system it's run on my_path = os.path.abspath(os.path.dirname(__file__)) filename = 'fileManager.txt' filepath = os.path.join(my_path, filename) filename2 = 'tmp_{}'.format(filename) filepath2 = os.path.join(my_path, filename2) copyfile(filepath, filepath2) with open(filepath2, 'r') as infile: text = infile.readlines() out_text = [] # Replaces the fileManager.txt placeholders with the paths/values for this system for line in text: if '{file version}' in line: line = line.replace('{file version}', "'SUMMA FILE_MANAGER_V1.0'") if '{settings path}' in line: line = line.replace('{settings path}', "'" + my_path + "/'") if '{input path}' in line: line = line.replace('{input path}', "'" + my_path + "/'") if '{output path}' in line: line = line.replace('{output path}', "'" + my_path + "/'") out_text.append(line) with open(filepath2, 'w') as outfile: outfile.writelines(out_text) Simulation_obj = Simulation(filepath2) def read_value_from_file(self, setting_name): with open(self.filepath2) as fileManager_file: for line in fileManager_file: if setting_name in line: return line.split("!")[0].strip().strip("'") def get_filepath_from_value(self, setting_name): value = self.read_value_from_file(setting_name) if not value.endswith('/'): return "/".join(value.split('/')[:-1]) + "/" else: return value def get_filename_from_value(self, setting_name): value = self.read_value_from_file(setting_name) return value.split('/')[-1] def read_text_from_file(self, setting_name): with open(self.read_value_from_file(setting_name)) as file: return ''.join(file.readlines()) # Test the setting_path, input_path, and output_path FM objects (they represent paths, not files) def test_path_FM_objects(self): # Are the names, values, filepaths, and filenames correct upon FileManagerOption object instantiation? fileManagerObject = self.Simulation_obj.setting_path setting_name = 'setting_path' self.assertEqual(fileManagerObject.name, setting_name) self.assertEqual(fileManagerObject.value, self.read_value_from_file(setting_name)) self.assertEqual(fileManagerObject.filepath, self.get_filepath_from_value(setting_name)) self.assertEqual(fileManagerObject.filename, self.get_filename_from_value(setting_name)) fileManagerObject = self.Simulation_obj.input_path setting_name = 'input_path' self.assertEqual(fileManagerObject.name, setting_name) self.assertEqual(fileManagerObject.value, self.read_value_from_file(setting_name)) self.assertEqual(fileManagerObject.filepath, self.get_filepath_from_value(setting_name)) self.assertEqual(fileManagerObject.filename, self.get_filename_from_value(setting_name)) fileManagerObject = self.Simulation_obj.output_path setting_name = 'output_path' self.assertEqual(fileManagerObject.name, setting_name) self.assertEqual(fileManagerObject.value, self.read_value_from_file(setting_name)) self.assertEqual(fileManagerObject.filepath, self.get_filepath_from_value(setting_name)) self.assertEqual(fileManagerObject.filename, self.get_filename_from_value(setting_name)) # Save the old path values old_setting_path_value = self.Simulation_obj.setting_path.value old_input_path_value = self.Simulation_obj.input_path.value old_output_path_value = self.Simulation_obj.output_path.value # Set new values for the path variables new_setting_path_value = self.Simulation_obj.setting_path.value + "settingsample/" new_input_path_value = self.Simulation_obj.input_path.value + "inputsample/" new_output_path_value = self.Simulation_obj.output_path.value + "outputsample/" self.Simulation_obj.setting_path.value = new_setting_path_value self.Simulation_obj.input_path.value = new_input_path_value self.Simulation_obj.output_path.value = new_output_path_value # Did ModelOutput change them in the file? self.assertEqual(self.read_value_from_file('setting_path'), new_setting_path_value) self.assertEqual(self.read_value_from_file('input_path'), new_input_path_value) self.assertEqual(self.read_value_from_file('output_path'), new_output_path_value) # Change the values back self.Simulation_obj.setting_path.value = old_setting_path_value self.Simulation_obj.input_path.value = old_input_path_value self.Simulation_obj.output_path.value = old_output_path_value # Are the values updated correctly? self.assertEqual(self.read_value_from_file('setting_path'), old_setting_path_value) self.assertEqual(self.read_value_from_file('input_path'), old_input_path_value) self.assertEqual(self.read_value_from_file('output_path'), old_output_path_value) def test_FM_ModelOutput_obj(self): # Make sure that the ModelOutput object can read from the master file self.assertNotEqual([], self.Simulation_obj.modeloutput_obj.read_master_file()) # Add a variable that's already in the file with self.assertRaises(ValueError): self.Simulation_obj.modeloutput_obj.add_variable('pptrate') # Add a valid variable and make sure it's in the file self.Simulation_obj.modeloutput_obj.add_variable('aquiferScaleFactor') self.assertIn('aquiferScaleFactor', self.Simulation_obj.modeloutput_obj.read_variables_from_file()) # Remove that variable, make sure it isn't in the file self.Simulation_obj.modeloutput_obj.remove_variable('aquiferScaleFactor') self.assertNotIn('aquiferScaleFactor', self.Simulation_obj.modeloutput_obj.read_variables_from_file())
py
1a45c37a84b99d86db8ae96e9684d298b8bfe420
ALPHABET = "abcdefghijklmnopqrstuvwxyz" def increment(pwd): incremented = "" pos_not_z = len(pwd) - 1 while pwd[pos_not_z] == "z": incremented = "a" + incremented pos_not_z -= 1 incremented = pwd[:pos_not_z] + ALPHABET[ALPHABET.index(pwd[pos_not_z]) + 1] + incremented return incremented def valid(pwd): if any([forbidden in pwd for forbidden in ["i", "l", "o"]]): return False # check for a sequence of 3 consecutive letters contains_seq = False for i in range(0, 6): if pwd[i:i+3] in ALPHABET: contains_seq = True break if not contains_seq: return False # check for 2 pairs pairs = 0 pos = 0 while pos <= 6: if pwd[pos] == pwd[pos + 1]: pairs += 1 pos += 2 else: pos += 1 return pairs >= 2 def get_next_valid(pwd): pwd = increment(pwd) while not valid(pwd): pwd = increment(pwd) return pwd def solve(current_pwd): return get_next_valid(current_pwd) def parse(file_name): with open(file_name, "r") as f: return f.readline().strip() if __name__ == '__main__': print(solve(parse("sample.txt")))
py
1a45c4352ca26ea43c518256a103f9ac3c2fd54e
""" (C) Copyright 2021 IBM Corp. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Created on June 30, 2021 """ import logging import os import torch.nn.functional as F import torch.optim as optim from torch.utils.data.dataloader import DataLoader import pathlib from fuse.data.dataset.dataset_base import FuseDatasetBase from fuse.metrics.classification.metric_roc_curve import FuseMetricROCCurve from fuse.metrics.classification.metric_auc import FuseMetricAUC from fuse.analyzer.analyzer_default import FuseAnalyzerDefault from fuse.data.sampler.sampler_balanced_batch import FuseSamplerBalancedBatch from fuse.losses.loss_default import FuseLossDefault from fuse.managers.callbacks.callback_metric_statistics import FuseMetricStatisticsCallback from fuse.managers.callbacks.callback_tensorboard import FuseTensorboardCallback from fuse.managers.callbacks.callback_time_statistics import FuseTimeStatisticsCallback from fuse.managers.manager_default import FuseManagerDefault from fuse.utils.utils_gpu import FuseUtilsGPU from fuse.utils.utils_logger import fuse_logger_start from fuse.models.heads.head_1d_classifier import FuseHead1dClassifier from fuse_examples.classification.prostate_x.backbone_3d_multichannel import Fuse_model_3d_multichannel,ResNet from fuse_examples.classification.prostate_x.patient_data_source import FuseProstateXDataSourcePatient from fuse_examples.classification.duke_breast_cancer.dataset import duke_breast_cancer_dataset from fuse_examples.classification.duke_breast_cancer.tasks import FuseTask ########################################## # Output Paths # ########################################## # # TODO: path to save model root_path = '.' # TODO: path for duke data # Download instructions can be found in README root_data = './Data/Duke-Breast-Cancer-MRI/manifest-1607053360376/' PATHS = {'force_reset_model_dir': False, # If True will reset model dir automatically - otherwise will prompt 'are you sure' message. 'model_dir': root_path + '/my_model/', 'cache_dir': root_path + '/my_cache/', 'inference_dir': root_path + '/my_model/inference/', 'analyze_dir': root_path + '/my_model/analyze/', 'data_dir':root_path, 'data_path' : root_data + 'Duke-Breast-Cancer-MRI', 'metadata_path': root_data + 'metadata.csv', 'ktrans_path': '', } ################################# # Train Template ################################# ########################################## # Train Common Params ########################################## # ============ # Data # ============ TRAIN_COMMON_PARAMS = {} TRAIN_COMMON_PARAMS['db_name'] = 'DUKE' TRAIN_COMMON_PARAMS['partition_version'] = '11102021TumorSize' TRAIN_COMMON_PARAMS['fold_no'] = 0 TRAIN_COMMON_PARAMS['data.batch_size'] = 50 TRAIN_COMMON_PARAMS['data.train_num_workers'] = 8 TRAIN_COMMON_PARAMS['data.validation_num_workers'] = 8 # =============== # Manager - Train # =============== TRAIN_COMMON_PARAMS['manager.train_params'] = { 'num_gpus': 1, 'num_epochs': 150, 'virtual_batch_size': 1, # number of batches in one virtual batch 'start_saving_epochs': 120, # first epoch to start saving checkpoints from 'gap_between_saving_epochs': 1, # number of epochs between saved checkpoint } TRAIN_COMMON_PARAMS['manager.best_epoch_source'] = [ { 'source': 'metrics.auc.macro_avg', # can be any key from losses or metrics dictionaries 'optimization': 'max', # can be either min/max 'on_equal_values': 'better', # can be either better/worse - whether to consider best epoch when values are equal }, ] TRAIN_COMMON_PARAMS['manager.learning_rate'] = 1e-5 TRAIN_COMMON_PARAMS['manager.weight_decay'] = 1e-3 TRAIN_COMMON_PARAMS['manager.dropout'] = 0.5 TRAIN_COMMON_PARAMS['manager.momentum'] = 0.9 TRAIN_COMMON_PARAMS['manager.resume_checkpoint_filename'] = None TRAIN_COMMON_PARAMS['num_backbone_features_imaging'] = 512 # in order to add relevant tabular feature uncomment: # num_backbone_features_clinical, post_concat_inputs,post_concat_model TRAIN_COMMON_PARAMS['num_backbone_features_clinical'] = None#256 TRAIN_COMMON_PARAMS['post_concat_inputs'] = None#[('data.clinical_features',9),] TRAIN_COMMON_PARAMS['post_concat_model'] = None#(256,256) if TRAIN_COMMON_PARAMS['num_backbone_features_clinical'] is None: TRAIN_COMMON_PARAMS['num_backbone_features'] = TRAIN_COMMON_PARAMS['num_backbone_features_imaging'] else: TRAIN_COMMON_PARAMS['num_backbone_features'] = \ TRAIN_COMMON_PARAMS['num_backbone_features_imaging']+TRAIN_COMMON_PARAMS['num_backbone_features_clinical'] # classification_task: # supported tasks are: 'Staging Tumor Size','Histology Type','is High Tumor Grade Total','PCR' TRAIN_COMMON_PARAMS['classification_task'] = 'Staging Tumor Size' TRAIN_COMMON_PARAMS['task'] = FuseTask(TRAIN_COMMON_PARAMS['classification_task'], 0) TRAIN_COMMON_PARAMS['class_num'] = TRAIN_COMMON_PARAMS['task'].num_classes() # backbone parameters TRAIN_COMMON_PARAMS['backbone_model_dict'] = \ {'input_channels_num': 1, } def train_template(paths: dict, train_common_params: dict): # ============================================================================== # Logger # ============================================================================== fuse_logger_start(output_path=paths['model_dir'], console_verbose_level=logging.INFO, list_of_source_files=[]) lgr = logging.getLogger('Fuse') lgr.info('Fuse Train', {'attrs': ['bold', 'underline']}) lgr.info(f'model_dir={paths["model_dir"]}', {'color': 'magenta'}) lgr.info(f'cache_dir={paths["cache_dir"]}', {'color': 'magenta'}) #Data train_dataset,validation_dataset = duke_breast_cancer_dataset(paths, train_common_params, lgr) ## Create dataloader lgr.info(f'- Create sampler:') sampler = FuseSamplerBalancedBatch(dataset=train_dataset, balanced_class_name='data.ground_truth', num_balanced_classes=train_common_params['class_num'], batch_size=train_common_params['data.batch_size'], balanced_class_weights= [int(train_common_params['data.batch_size']/train_common_params['class_num'])] * train_common_params['class_num'], use_dataset_cache=True) lgr.info(f'- Create sampler: Done') # ## Create dataloader train_dataloader = DataLoader(dataset=train_dataset, batch_sampler=sampler, collate_fn=train_dataset.collate_fn, num_workers=train_common_params['data.train_num_workers']) lgr.info(f'Train Data: Done', {'attrs': 'bold'}) validation_dataloader = DataLoader(dataset=validation_dataset, shuffle=False, drop_last=False, batch_size=train_common_params['data.batch_size'], num_workers=train_common_params['data.validation_num_workers'], collate_fn=validation_dataset.collate_fn) lgr.info(f'Validation Data: Done', {'attrs': 'bold'}) # ============================================================================== # Model # ============================================================================== lgr.info('Model:', {'attrs': 'bold'}) model = Fuse_model_3d_multichannel( conv_inputs=(('data.input', 1),), backbone= ResNet(ch_num=TRAIN_COMMON_PARAMS['backbone_model_dict']['input_channels_num']), # since backbone resnet contains pooling and fc, the feature output is 1D, # hence we use FuseHead1dClassifier as classification head heads=[ FuseHead1dClassifier(head_name='isLargeTumorSize', conv_inputs=[('model.backbone_features', train_common_params['num_backbone_features'])], post_concat_inputs = train_common_params['post_concat_inputs'], post_concat_model = train_common_params['post_concat_model'], dropout_rate=0.25, shared_classifier_head=None, layers_description=None, num_classes=2), ] ) lgr.info('Model: Done', {'attrs': 'bold'}) # ==================================================================================== # Loss # ==================================================================================== lgr.info('Losses: CrossEntropy', {'attrs': 'bold'}) losses = { 'cls_loss': FuseLossDefault(pred_name='model.logits.isLargeTumorSize', target_name='data.ground_truth', callable=F.cross_entropy, weight=1.0), } # ==================================================================================== # Metrics # ==================================================================================== lgr.info('Metrics:', {'attrs': 'bold'}) metrics = { 'auc': FuseMetricAUC(pred_name='model.output.isLargeTumorSize', target_name='data.ground_truth', class_names=train_common_params['task'].class_names()), } ()# ===================================================================================== # Callbacks # ===================================================================================== callbacks = [ FuseTensorboardCallback(model_dir=paths['model_dir']), # save statistics for tensorboard FuseMetricStatisticsCallback(output_path=paths['model_dir'] + "/metrics.csv"), # save statistics for tensorboard in a csv file FuseTimeStatisticsCallback(num_epochs=train_common_params['manager.train_params']['num_epochs'], load_expected_part=0.1) # time profiler ] # ===================================================================================== # Manager - Train # ===================================================================================== lgr.info('Train:', {'attrs': 'bold'}) # create optimizer optimizer = optim.Adam(model.parameters(), lr=train_common_params['manager.learning_rate'], weight_decay=train_common_params['manager.weight_decay']) # # create scheduler scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) # train from scratch manager = FuseManagerDefault(output_model_dir=paths['model_dir'], force_reset=paths['force_reset_model_dir']) # Providing the objects required for the training process. manager.set_objects(net=model, optimizer=optimizer, losses=losses, metrics=metrics, best_epoch_source=train_common_params['manager.best_epoch_source'], lr_scheduler=scheduler, callbacks=callbacks, train_params=train_common_params['manager.train_params']) ## Continue training if train_common_params['manager.resume_checkpoint_filename'] is not None: # Loading the checkpoint including model weights, learning rate, and epoch_index. manager.load_checkpoint(checkpoint=train_common_params['manager.resume_checkpoint_filename'], mode='train') # Start training manager.train(train_dataloader=train_dataloader, validation_dataloader=validation_dataloader) lgr.info('Train: Done', {'attrs': 'bold'}) ###################################### # Inference Common Params ###################################### INFER_COMMON_PARAMS = {} INFER_COMMON_PARAMS['partition_version'] = '11102021TumorSize' INFER_COMMON_PARAMS['db_name'] = 'DUKE' INFER_COMMON_PARAMS['fold_no'] = 0 INFER_COMMON_PARAMS['infer_filename'] = os.path.join(PATHS['inference_dir'], 'validation_set_infer.pickle.gz') INFER_COMMON_PARAMS['checkpoint'] = 'best' # Fuse TIP: possible values are 'best', 'last' or epoch_index. ###################################### # Inference Template ###################################### def infer_template(paths: dict, infer_common_params: dict): #### Logger # fuse_logger_start(output_path=paths['inference_dir'], console_verbose_level=logging.INFO) lgr = logging.getLogger('Fuse') lgr.info('Fuse Inference', {'attrs': ['bold', 'underline']}) lgr.info(f'infer_filename={infer_common_params["infer_filename"]}', {'color': 'magenta'}) #### create infer datasource ## Create data source: infer_data_source = FuseProstateXDataSourcePatient(paths['data_dir'],'validation', db_ver=infer_common_params['partition_version'], db_name = infer_common_params['db_name'], fold_no=infer_common_params['fold_no']) lgr.info(f'db_name={infer_common_params["db_name"]}', {'color': 'magenta'}) ### load dataset data_set_filename = os.path.join(paths["model_dir"], "inference_dataset.pth") dataset = FuseDatasetBase.load(filename=data_set_filename, override_datasource=infer_data_source, override_cache_dest=paths["cache_dir"], num_workers=0) dataloader = DataLoader(dataset=dataset, shuffle=False, drop_last=False, batch_size=50, num_workers=5, collate_fn=dataset.collate_fn) #### Manager for inference manager = FuseManagerDefault() # extract just the global classification per sample and save to a file output_columns = ['model.output.isLargeTumorSize','data.ground_truth'] manager.infer(data_loader=dataloader, input_model_dir=paths['model_dir'], checkpoint=infer_common_params['checkpoint'], output_columns=output_columns, output_file_name = infer_common_params['infer_filename']) ###################################### # Analyze Common Params ###################################### ANALYZE_COMMON_PARAMS = {} ANALYZE_COMMON_PARAMS['infer_filename'] = INFER_COMMON_PARAMS['infer_filename'] ANALYZE_COMMON_PARAMS['output_filename'] = os.path.join(PATHS['analyze_dir'], 'all_metrics_DCE_T0_fold0') ###################################### # Analyze Template ###################################### def analyze_template(paths: dict, analyze_common_params: dict): fuse_logger_start(output_path=None, console_verbose_level=logging.INFO) lgr = logging.getLogger('Fuse') lgr.info('Fuse Analyze', {'attrs': ['bold', 'underline']}) fuse_logger_start(output_path=None, console_verbose_level=logging.INFO) lgr = logging.getLogger('Fuse') lgr.info('Fuse Analyze', {'attrs': ['bold', 'underline']}) # metrics metrics = { 'roc': FuseMetricROCCurve(pred_name='model.output.isLargeTumorSize', target_name='data.ground_truth', output_filename=os.path.join(paths['inference_dir'], 'roc_curve.png')), 'auc': FuseMetricAUC(pred_name='model.output.isLargeTumorSize', target_name='data.ground_truth') } # create analyzer analyzer = FuseAnalyzerDefault() # run results = analyzer.analyze(gt_processors={}, data_pickle_filename=analyze_common_params["infer_filename"], metrics=metrics, output_filename=analyze_common_params['output_filename']) return results ###################################### # Run ###################################### if __name__ == "__main__": # allocate gpus NUM_GPUS = 1 if NUM_GPUS == 0: TRAIN_COMMON_PARAMS['manager.train_params']['device'] = 'cpu' # uncomment if you want to use specific gpus instead of automatically looking for free ones force_gpus = None # [0] FuseUtilsGPU.choose_and_enable_multiple_gpus(NUM_GPUS, force_gpus=force_gpus) RUNNING_MODES = ['train','infer', 'analyze'] # Options: 'train', 'infer', 'analyze' if 'train' in RUNNING_MODES: train_template(paths=PATHS, train_common_params=TRAIN_COMMON_PARAMS) if 'infer' in RUNNING_MODES: infer_template(paths=PATHS, infer_common_params=INFER_COMMON_PARAMS) if 'analyze' in RUNNING_MODES: analyze_template(paths=PATHS,analyze_common_params=ANALYZE_COMMON_PARAMS)
py
1a45c49b5f2680a9ec2881a003f0aab7f8909ba0
""" This file offers the methods to automatically retrieve the graph Thermoplasmatales archaeon E-plasma. The graph is automatically retrieved from the STRING repository. References --------------------- Please cite the following if you use the data: ```bib @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } ``` """ from typing import Dict from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph from ...ensmallen import Graph # pylint: disable=import-error def ThermoplasmatalesArchaeonEPlasma( directed: bool = False, preprocess: bool = True, load_nodes: bool = True, verbose: int = 2, cache: bool = True, cache_path: str = "graphs/string", version: str = "links.v11.5", **additional_graph_kwargs: Dict ) -> Graph: """Return new instance of the Thermoplasmatales archaeon E-plasma graph. The graph is automatically retrieved from the STRING repository. Parameters ------------------- directed: bool = False Wether to load the graph as directed or undirected. By default false. preprocess: bool = True Whether to preprocess the graph to be loaded in optimal time and memory. load_nodes: bool = True, Whether to load the nodes vocabulary or treat the nodes simply as a numeric range. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache: bool = True Whether to use cache, i.e. download files only once and preprocess them only once. cache_path: str = "graphs" Where to store the downloaded graphs. version: str = "links.v11.5" The version of the graph to retrieve. The available versions are: - homology.v11.5 - physical.links.v11.5 - links.v11.5 additional_graph_kwargs: Dict Additional graph kwargs. Returns ----------------------- Instace of Thermoplasmatales archaeon E-plasma graph. References --------------------- Please cite the following if you use the data: ```bib @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } ``` """ return AutomaticallyRetrievedGraph( graph_name="ThermoplasmatalesArchaeonEPlasma", repository="string", version=version, directed=directed, preprocess=preprocess, load_nodes=load_nodes, verbose=verbose, cache=cache, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs )()
py
1a45c902cd380bffe1303c8997a8301e1068ed2e
# pyright:reportUnknownMemberType=false # pyright:reportUnknownArgumentType=false # pyright:reportUnknownLambdaType=false import re from itertools import accumulate from typing import Any, List import ja_core_news_sm from py_pdf_term._common.consts import JAPANESE_REGEX, NOSPACE_REGEX, SYMBOL_REGEX from ..data import Token from .base import BaseLanguageTokenizer SPACES = re.compile(r"\s+") DELIM_SPACE = re.compile(rf"(?<={NOSPACE_REGEX}) (?={NOSPACE_REGEX})") class JapaneseTokenizer(BaseLanguageTokenizer): def __init__(self) -> None: enable_pipes = [] self._model = ja_core_news_sm.load() self._model.select_pipes(enable=enable_pipes) self._ja_regex = re.compile(JAPANESE_REGEX) self._symbol_regex = re.compile(rf"({SYMBOL_REGEX})") def inscope(self, text: str) -> bool: return self._ja_regex.search(text) is not None def tokenize(self, text: str) -> List[Token]: text = SPACES.sub(" ", text).strip() orginal_space_pos = { match.start() - offset for offset, match in enumerate(re.finditer(r" ", text)) if DELIM_SPACE.match(text, match.start()) is not None } text = DELIM_SPACE.sub("", text) text = self._symbol_regex.sub(r" \1 ", text) tokens = list(map(self._create_token, self._model(text))) if not orginal_space_pos: return tokens tokenized_space_pos = set( accumulate(map(lambda token: len(str(token)), tokens)) ) if not orginal_space_pos.issubset(tokenized_space_pos): return tokens pos, i = 0, 0 num_token = len(tokens) + len(orginal_space_pos) while i < num_token: if pos in orginal_space_pos: pos += len(str(tokens[i])) space = Token( "ja", " ", "空白", "*", "*", "*", "SPACE", " ", " ", False, ) tokens.insert(i, space) i += 2 else: pos += len(str(tokens[i])) i += 1 return tokens def _create_token(self, token: Any) -> Token: if self._symbol_regex.fullmatch(token.text): return Token( "ja", token.text, "補助記号", "一般", "*", "*", "SYM", token.text, token.text, False, ) pos_with_categories = token.tag_.split("-") num_categories = len(pos_with_categories) - 1 pos = pos_with_categories[0] category = pos_with_categories[1] if num_categories >= 1 else "*" subcategory = pos_with_categories[2] if num_categories >= 2 else "*" subsubcategory = pos_with_categories[3] if num_categories >= 3 else "*" return Token( "ja", token.text, pos, category, subcategory, subsubcategory, token.pos_, token.lemma_.lower(), token.shape_, token.is_stop, ) class JapaneseTokenClassifier: def is_modifying_particle(self, token: Token) -> bool: return token.surface_form == "の" and token.pos == "助詞" def is_symbol(self, token: Token) -> bool: return token.pos in {"補助記号"} def is_connector_symbol(self, token: Token) -> bool: return token.surface_form in {"・", "-"} and token.pos == "補助記号" def is_meaningless(self, token: Token) -> bool: return self.is_symbol(token) or self.is_modifying_particle(token)
py
1a45c907944b22c4e834f7663145fc06bceeb4a2
import theano.tensor as T def cca_loss(outdim_size, use_all_singular_values): """ The main loss function (inner_cca_objective) is wrapped in this function due to the constraints imposed by Keras on objective functions """ def inner_cca_objective(y_true, y_pred): """ It is the loss function of CCA as introduced in the original paper. There can be other formulations. It is implemented by Theano tensor operations, and does not work on Tensorflow backend y_true is just ignored """ r1 = 1e-4 r2 = 1e-4 eps = 1e-12 o1 = o2 = y_pred.shape[1]//2 # unpack (separate) the output of networks for view 1 and view 2 H1 = y_pred[:, 0:o1].T H2 = y_pred[:, o1:o1+o2].T m = H1.shape[1] H1bar = H1 - (1.0 / m) * T.dot(H1, T.ones([m, m])) H2bar = H2 - (1.0 / m) * T.dot(H2, T.ones([m, m])) SigmaHat12 = (1.0 / (m - 1)) * T.dot(H1bar, H2bar.T) SigmaHat11 = (1.0 / (m - 1)) * T.dot(H1bar, H1bar.T) + r1 * T.eye(o1) SigmaHat22 = (1.0 / (m - 1)) * T.dot(H2bar, H2bar.T) + r2 * T.eye(o2) # Calculating the root inverse of covariance matrices by using eigen decomposition [D1, V1] = T.nlinalg.eigh(SigmaHat11) [D2, V2] = T.nlinalg.eigh(SigmaHat22) # Added to increase stability posInd1 = T.gt(D1, eps).nonzero()[0] D1 = D1[posInd1] V1 = V1[:, posInd1] posInd2 = T.gt(D2, eps).nonzero()[0] D2 = D2[posInd2] V2 = V2[:, posInd2] SigmaHat11RootInv = T.dot(T.dot(V1, T.nlinalg.diag(D1 ** -0.5)), V1.T) SigmaHat22RootInv = T.dot(T.dot(V2, T.nlinalg.diag(D2 ** -0.5)), V2.T) Tval = T.dot(T.dot(SigmaHat11RootInv, SigmaHat12), SigmaHat22RootInv) if use_all_singular_values: # all singular values are used to calculate the correlation corr = T.sqrt(T.nlinalg.trace(T.dot(Tval.T, Tval))) else: # just the top outdim_size singular values are used [U, V] = T.nlinalg.eigh(T.dot(Tval.T, Tval)) U = U[T.gt(U, eps).nonzero()[0]] U = U.sort() corr = T.sum(T.sqrt(U[0:outdim_size])) return -corr return inner_cca_objective
py
1a45cb4e81764f0b7b1fc9e1505c1a742a549ff0
_base_ = [ '../_base_/models/mask_rcnn_swin_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( in_chans=3, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, ape=False, drop_path_rate=0.1, patch_norm=True, use_checkpoint=False ), neck=dict(in_channels=[96, 192, 384, 768]), roi_head=dict( bbox_head=dict(num_classes=1), mask_head=dict( num_classes=1, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=0.1)) ), test_cfg=dict( rcnn=dict( score_thr=0.01, nms=dict(type='nms', iou_threshold=0.5), max_per_img=300 ) ) ) img_norm_cfg = dict( mean=[134.756, 134.756, 134.756], std=[61.680, 61.680, 61.680], to_rgb=True) # augmentation strategy originates from DETR / Sparse RCNN _input_size = 1280 _widths = list(range(int(_input_size / 2), int(_input_size * 1.5), 64)) _ratios = list([v / 10 for v in range(7, 13 + 1, 1)]) _img_scale = [] for _w in _widths: _img_scale += [(_w, int(_w * r)) for r in _ratios] _img_fill_val = 134.756 _autoaug_transforms = [ 'Shear', 'Translate', 'Rotate', 'BrightnessTransform', 'ContrastTransform', 'EqualizeTransform' ] _n_subpolicies = 100 _prob_cands = list([v / 10 for v in range(0, 10 + 1, 1)]) _level_cands = list(range(0, 10 + 1, 1)) _direction_cands = ['horizontal', 'vertical'] import random _autoaug_policies = [] for _ in range(_n_subpolicies): _subpolicy = [] for _ in range(2): _transform = dict( type=random.choice(_autoaug_transforms), prob=random.choice(_prob_cands) ) if _transform['type'] not in ['EqualizeTransform']: _transform['level'] = random.choice(_level_cands) if _transform['type'] in ['Shear', 'Translate', 'Rotate']: _transform['img_fill_val'] = _img_fill_val if _transform['type'] in ['Shear', 'Translate']: _transform['direction'] = random.choice(_direction_cands) _subpolicy.append(_transform) _autoaug_policies.append(_subpolicy) del random train_pipeline = [ dict(type='LoadImageFromFile', color_type='color'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Resize', img_scale=_img_scale, multiscale_mode='value', keep_ratio=False), dict(type='AutoAugment', policies=_autoaug_policies), dict(type='RandomCrop', crop_type='absolute', crop_size=(_input_size, _input_size), allow_negative_crop=True), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile', color_type='color'), dict( type='MultiScaleFlipAug', img_scale=[(int(_input_size), int(_input_size)), (int(_input_size * 1.25), int(_input_size * 1.25)), ], flip=True, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] # Modify dataset related settings img_dir = '/kaggle/working/data/siim/2048x2048' anno_dir = '/kaggle/working/data/siim/mmdet_annos' fold = 0 classes = ('opacity',) data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( img_prefix=img_dir, classes=classes, ann_file=f'{anno_dir}/train.json', pipeline=train_pipeline), val=dict( img_prefix=img_dir, classes=classes, ann_file=f'{anno_dir}/test.json', pipeline=test_pipeline), test=dict( img_prefix=img_dir, classes=classes, ann_file=f'{anno_dir}/test.json', pipeline=test_pipeline), ) # scheduler optimizer = dict(_delete_=True, type='AdamW', lr=0.00001, betas=(0.9, 0.999), weight_decay=0.05, paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.), 'relative_position_bias_table': dict(decay_mult=0.), 'norm': dict(decay_mult=0.)})) lr_config = dict(step=[24, 33]) runner = dict(type='EpochBasedRunnerAmp', max_epochs=36) # do not use mmdet version fp16 fp16 = None optimizer_config = dict( type="DistOptimizerHook", update_interval=1, grad_clip=None, coalesce=True, bucket_size_mb=-1, use_fp16=False, ) # We can use the pre-trained Mask RCNN model to obtain higher performance load_from = 'checkpoints/mask_rcnn_swin_tiny_patch4_window7.pth'
py
1a45cb50b81251b0a8ccf2d47c75d1494cbd1b94
# Please Pass the coded messages from itertools import combinations def solution(l): l.sort(reverse = True) for i in reversed(range(1, len(l) + 1)): for tup in combinations(l, i): if sum(tup) % 3 == 0: return int(''.join(map(str, tup))) return 0
py
1a45cc7f19e0d06d23186ea89db85ec047d52893
""" Lexicon Plesk Provider Author: Jens Reimann, 2018 API Docs: https://docs.plesk.com/en-US/onyx/api-rpc """ from __future__ import absolute_import import logging from collections import OrderedDict import requests from lexicon.providers.base import Provider as BaseProvider try: import xmltodict # optional dependency except ImportError: pass LOGGER = logging.getLogger(__name__) PLEX_URL_SUFFIX = "/enterprise/control/agent.php" NAMESERVER_DOMAINS = [] def provider_parser(subparser): """Configure provider parser for Plesk""" subparser.add_argument( "--auth-username", help="specify username for authentication" ) subparser.add_argument( "--auth-password", help="specify password for authentication" ) subparser.add_argument( "--plesk-server", help="specify URL to the Plesk Web UI, including the port" ) class Provider(BaseProvider): """Provider class for Plesk""" def __init__(self, config): super(Provider, self).__init__(config) self.api_endpoint = self._get_provider_option("plesk_server") if self.api_endpoint.endswith("/"): self.api_endpoint = self.api_endpoint[:-1] if not self.api_endpoint.endswith(PLEX_URL_SUFFIX): self.api_endpoint += PLEX_URL_SUFFIX self.site_name = self.domain assert self.site_name is not None self.domain_id = None self.username = self._get_provider_option("auth_username") assert self.username is not None self.password = self._get_provider_option("auth_password") assert self.password is not None def __simple_request(self, rtype, operation, req): response = self.__plesk_request({rtype: {operation: req}})[rtype][operation] result = response["result"] if isinstance(result, list): for record in result: if record["status"] == "error": raise Exception( "API returned at least one error: %s" % record["errtext"] ) elif response["result"]["status"] == "error": errcode = response["result"]["errcode"] errtext = response["result"]["errtext"] raise Exception("API returned error: %s (%s)" % (errcode, errtext)) return response def __plesk_request(self, request): headers = {} headers["Content-type"] = "text/xml" headers["HTTP_PRETTY_PRINT"] = "TRUE" headers["HTTP_AUTH_LOGIN"] = self.username headers["HTTP_AUTH_PASSWD"] = self.password xml = xmltodict.unparse({"packet": request}, pretty=True) LOGGER.debug("Request: %s", xml) response = requests.post( self.api_endpoint, headers=headers, data=xml, auth=(self.username, self.password), ) data = response.text LOGGER.debug("Response: %s", data) result = xmltodict.parse(data) return result["packet"] def __find_site(self): return self.__simple_request( "site", "get", OrderedDict([("filter", {"name": self.site_name}), ("dataset", {})]), )["result"]["id"] def _authenticate(self): self.domain_id = self.__find_site() if self.domain_id is None: raise Exception("Domain not found") def _create_record(self, rtype, name, content): return self.__create_entry(rtype, name, content, None) def _list_records(self, rtype=None, name=None, content=None): entries = self.__find_dns_entries(rtype, name, content) LOGGER.debug("list_records: %s", entries) return entries def _update_record(self, identifier, rtype=None, name=None, content=None): if identifier is None: entries = self.__find_dns_entries(rtype, name, None) LOGGER.debug("Entries found: %s", entries) if not entries: raise Exception("No entry found for updating") identifier = entries[0]["id"] entry = self.__get_dns_entry(identifier) ids = [] for an_entry in entries: ids.append(an_entry["id"]) self.__delete_dns_records_by_id(ids) else: entry = self.__get_dns_entry(identifier) self.__delete_dns_records_by_id([identifier]) assert entry is not None LOGGER.debug("Updating: %s", entry) if rtype: entry["type"] = rtype if name: entry["host"] = name if content: entry["value"] = content return self.__create_entry( entry["type"], entry["host"], entry["value"], entry["opt"] ) def __create_entry(self, rtype, host, value, opt): entries = self.__find_dns_entries(rtype, self._fqdn_name(host), value) if entries: return True # already exists self.__simple_request( "dns", "add_rec", OrderedDict( [ ("site-id", self.domain_id), ("type", rtype), ("host", self._relative_name(host)), ("value", value), ("opt", opt), ] ), ) return True def _delete_record(self, identifier=None, rtype=None, name=None, content=None): if identifier: self.__delete_dns_records_by_id([identifier]) return True entries = self.__find_dns_entries(rtype, self._fqdn_name(name), content) ids = [] for entry in entries: ids.append(entry["id"]) self.__delete_dns_records_by_id(ids) return bool(ids) def __get_dns_entry(self, identifier): return self.__simple_request("dns", "get_rec", {"filter": {"id": identifier}})[ "result" ]["data"] def __find_dns_entries(self, rtype=None, host=None, value=None): LOGGER.debug("Searching for: %s, %s, %s", rtype, host, value) if value and rtype and rtype in ["CNAME"]: LOGGER.debug("CNAME transformation") value = value.rstrip(".") + "." if host: host = self._fqdn_name(host) result = self.__simple_request( "dns", "get_rec", {"filter": {"site-id": self.domain_id}} ) entries = [] for record in result["result"]: LOGGER.debug("Record: %s", record) if (rtype is not None) and (record["data"]["type"] != rtype): LOGGER.debug( "\tType doesn't match - expected: '%s', found: '%s'", rtype, record["data"]["type"], ) continue if (host is not None) and (record["data"]["host"] != host): LOGGER.debug( "\tHost doesn't match - expected: '%s', found: '%s'", host, record["data"]["host"], ) continue if (value is not None) and (record["data"]["value"] != value): LOGGER.debug( "\tValue doesn't match - expected: '%s', found: '%s'", value, record["data"]["value"], ) continue entry = { "id": record["id"], "type": record["data"]["type"], "name": self._full_name(record["data"]["host"]), "ttl": None, "options": {}, } if record["data"]["type"] in ["CNAME"]: entry["content"] = record["data"]["value"].rstrip(".") else: entry["content"] = record["data"]["value"] if record["data"]["type"] == "MX": entry["options"]["mx"] = {"priority": int(record["data"]["opt"])} entries.append(entry) return entries def __delete_dns_records_by_id(self, ids): if not ids: return req = [] for i in ids: req.append({"del_rec": {"filter": {"id": i}}}) self.__plesk_request({"dns": req}) def _request(self, action="GET", url="/", data=None, query_params=None): # Helper _request is not used for Plesk provider pass
py
1a45cd039d0b425f239d2510dc9c09b6c4af4920
# -*- coding: utf-8 -*- __author__ = "Konstantin Klementiev" __date__ = "08 Mar 2016" import os, sys; sys.path.append(os.path.join('..', '..', '..')) # analysis:ignore import numpy as np import xrt.backends.raycing as raycing import xrt.backends.raycing.sources as rs #import xrt.backends.raycing.apertures as ra import xrt.backends.raycing.oes as roe import xrt.backends.raycing.run as rr import xrt.backends.raycing.materials as rm import xrt.plotter as xrtp import xrt.runner as xrtr import xrt.backends.raycing.screens as rsc showIn3D = False mGold = rm.Material('Au', rho=19.3, kind='FZP') E0, dE = 400, 5 def build_beamline(nrays=1e5): beamLine = raycing.BeamLine(height=0) # source=rs.GeometricSource( # beamLine, 'GeometricSource', (0, 0, 0), # nrays=nrays, distx='flat', dx=0.12, distz='flat', dz=0.12, # dxprime=0, dzprime=0, # distE='flat', energies=(E0-dE, E0+dE), polarization='horizontal') rs.GeometricSource( beamLine, 'GeometricSource', (0, 0, 0), nrays=nrays, distx='annulus', dx=(0, 0.056), dxprime=0, dzprime=0, distE='flat', energies=(E0-dE, E0+dE), polarization='horizontal') beamLine.fsm1 = rsc.Screen(beamLine, 'DiamondFSM1', (0, 10., 0)) # beamLine.fzp = roe.NormalFZP(beamLine, 'FZP', [0, 10., 0], pitch=np.pi/2, # material=mGold, f=2., E=E0, N=50) beamLine.fzp = roe.GeneralFZPin0YZ( beamLine, 'FZP', [0, 10., 0], pitch=np.pi/2, material=mGold, f1='inf', f2=(0, 0, 2.), E=E0, N=500, phaseShift=np.pi) # source.dx = 2 * beamLine.fzp.rn[-1] # source.dz = source.dx beamLine.fzp.order = 1 beamLine.fsm2 = rsc.Screen(beamLine, 'DiamondFSM2', (0, 12., 0)) return beamLine def run_process(beamLine, shineOnly1stSource=False): beamSource = beamLine.sources[0].shine() # beamLine.feFixedMask.propagate(beamSource) beamFSM1 = beamLine.fsm1.expose(beamSource) beamFZPglobal, beamFZPlocal = beamLine.fzp.reflect(beamSource) beamFSM2 = beamLine.fsm2.expose(beamFZPglobal) outDict = {'beamSource': beamSource, 'beamFSM1': beamFSM1, 'beamFZPglobal': beamFZPglobal, 'beamFZPlocal': beamFZPlocal, 'beamFSM2': beamFSM2} if showIn3D: beamLine.prepare_flow() return outDict rr.run_process = run_process def define_plots(beamLine): fwhmFormatStrE = '%.2f' plots = [] # plot = xrtp.XYCPlot( # 'beamFSM1', (1,), xaxis=xrtp.XYCAxis(r'$x$', r'$\mu$m'), # yaxis=xrtp.XYCAxis(r'$z$', r'$\mu$m'), title='FSM1_E') # plot.caxis.fwhmFormatStr = None # plot.saveName = [plot.title + '.png', ] # plots.append(plot) # plot = xrtp.XYCPlot( 'beamFZPlocal', (1, -1), xaxis=xrtp.XYCAxis(r'$x$', r'$\mu$m', bins=512, ppb=1, limits=[-12, 12]), yaxis=xrtp.XYCAxis(r'$y$', r'$\mu$m', bins=512, ppb=1, limits=[-12, 12]), caxis='category', title='localZ') plot.caxis.fwhmFormatStr = None plot.textPanel = plot.ax1dHistX.text( 0.5, 0.02, '', size=14, color='w', transform=plot.ax1dHistX.transAxes, ha='center', va='bottom') plots.append(plot) plot = xrtp.XYCPlot( 'beamFZPlocal', (1, -1), xaxis=xrtp.XYCAxis(r'$x$', r'$\mu$m', bins=512, ppb=1), yaxis=xrtp.XYCAxis(r'$y$', r'$\mu$m', bins=512, ppb=1), caxis='category', title='localFull') plot.caxis.fwhmFormatStr = None plot.textPanel = plot.ax1dHistX.text( 0.5, 0.02, '', size=14, color='w', transform=plot.ax1dHistX.transAxes, ha='center', va='bottom') plots.append(plot) plot = xrtp.XYCPlot( 'beamFSM2', (1,), xaxis=xrtp.XYCAxis(r'$x$', r'nm', bins=256, ppb=1, limits=[-500, 500]), yaxis=xrtp.XYCAxis(r'$z$', r'nm', bins=256, ppb=1, limits=[-500, 500]), caxis='category', title='FSM2_Es') plot.caxis.fwhmFormatStr = fwhmFormatStrE plot.fluxFormatStr = '%.2e' plot.textPanel = plot.ax1dHistX.text( 0.5, 0.02, '', size=14, color='w', transform=plot.ax1dHistX.transAxes, ha='center', va='bottom') plots.append(plot) return plots def plot_generator(plots, beamLine): nShifts = 8 phaseShifts = np.arange(0, nShifts, dtype=float) / nShifts * 2 * np.pi strPhaseShifts = ('0', r'$\pi/4$', r'$\pi/2$', r'$3\pi/4$', r'$\pi$', r'$5\pi/4$', r'$3\pi/2$', r'$7\pi/4$') for iPhaseShift, (phaseShift, strPhaseShift) in\ enumerate(zip(phaseShifts, strPhaseShifts)): beamLine.fzp.set_phase_shift(phaseShift) for plot in plots: plot.saveName = ['FZP-{0}{1}.png'.format( plot.title, iPhaseShift)] try: plot.textPanel.set_text(u'phase shift = {0}'.format( strPhaseShift)) except AttributeError: pass yield def main(): beamLine = build_beamline() if showIn3D: beamLine.glow(scale=[100, 10, 100], centerAt='FZP', colorAxis='xzprime') return plots = define_plots(beamLine) xrtr.run_ray_tracing(plots, repeats=360, generator=plot_generator, beamLine=beamLine, processes='half') #this is necessary to use multiprocessing in Windows, otherwise the new Python #contexts cannot be initialized: if __name__ == '__main__': main()
py
1a45cd2bd911fec1e6fddceb17510cb2b434c3ce
alphabet = """0123456789ABCDEFGHJKLMNPQRSTUVWXYZ -"""
py
1a45cd4905586e9967dac07c0e3391a1f2ebcd61
import deserialize import pytest import mumoco @pytest.fixture def remote(): return mumoco.Remote("myName", "myUrl") def test_default_values(remote): assert remote.name == "myName" assert remote.url == "myUrl" assert remote.verify_ssl is True assert remote.priority == 0 assert remote.force is False assert remote.login is False def test_remote_deserialize(remote): data = { "name": "myName", "url": "myUrl", } temp: mumoco.Remote = deserialize.deserialize(mumoco.Remote, data) assert temp == remote
py
1a45cfe7d18b89811234228c5c0b057056bc31d4
# -*- coding: utf-8 -*- import mne import os.path as op raw_dir = '/brainstudio/MEG/metwo/metwo_101/181206/' raw_files = ['metwo_101_7m_01_raw.fif', 'metwo_101_7m_02_raw.fif', 'metwo_101_04_raw.fif', 'metwo_101_03_raw.fif'] for file in raw_files: file_path = op.join(raw_dir + file) raw_info = mne.io.read_raw_fif(file_path, allow_maxshield=True) events = mne.find_events(raw_info, mask=1) print('Events for file %s:' % file) print('This file had %d events.' % len(events))
py
1a45d1559d34ad1da7e257cc3ed2bdebad7c3359
import redis import os, time, multiprocess, logging, sys import json from compute import Config_ini from compute.log import Log from compute.file import get_algo_local_dir, get_population_dir def get_logger(logger_name, log_file, level=logging.INFO): l = logging.getLogger(logger_name) formatter = logging.Formatter('%(message)s') fileHandler = logging.FileHandler(log_file, mode='a') fileHandler.setFormatter(formatter) l.setLevel(level) l.addHandler(fileHandler) return logging.getLogger(logger_name) class RedisLog(object): MSG_TYPE = ['LOG', 'WRITE_FILE'] def __init__(self, name, db_ip, db_port): pool = redis.ConnectionPool(host=db_ip, port=int(db_port), socket_connect_timeout=1) r = redis.Redis(connection_pool=pool, db=1) connection_flag = True try: r.ping() except Exception as e: connection_flag = False Log.info('Connect redis error, please exit manually,ip:%s db_port:%s errors:%s' % (db_ip, db_port, str(e))) sys.exit() if connection_flag: Log.info('Connect redis successfully...') self.r = r self.name = name def info(self, info): self._writedb('LOG', info) def write_file(self, fdir, fname, data): content = {'fdir': fdir, 'fname': fname, 'data': data} self._writedb('WRITE_FILE', content) def _writedb(self, msg_type, content): assert msg_type in self.MSG_TYPE v = {'name': self.name, 'type': msg_type, 'content': content} v = json.dumps(v).encode('utf-8') self.r.rpush('RUN', v) def _readdb(self): info = self.r.lpop('RUN') if info is None: return None # print(info) info = json.loads(info.decode('utf-8')) return info @staticmethod def run_dispatch_service(): Log.info('Start to read message from Redis') db_ip = Config_ini.log_server db_port = Config_ini.log_server_port alg_local_dir = get_algo_local_dir() population_dir = get_population_dir() def proc_func(): rdb = RedisLog('', db_ip, db_port) log_dict = {} log_dir = os.path.join(alg_local_dir, 'log') if not os.path.exists(log_dir): os.makedirs(log_dir) while True: data = rdb._readdb() if data is not None: name, dtype, content = data['name'], data['type'], data['content'] Log.info('Redis is reading: name:%s, type:%s, content:%s' % (name, dtype, content)) if dtype == 'LOG': # create logger. if name not in log_dict: log_file = os.path.join(log_dir, name) logger = get_logger(name, log_file) log_dict[name] = logger logger = log_dict[name] logger.info(content) # print(content) elif dtype == 'WRITE_FILE': fdir, fname, fdata = content['fdir'], content['fname'], content['data'] if fdir == 'CACHE' or fdir == 'RESULTS': fdir = population_dir if not os.path.exists(fdir): os.makedirs(fdir) with open(os.path.join(fdir, fname), 'a+') as f: f.write(fdata) f.flush() else: assert 0 time.sleep(1) proc = multiprocess.Process(target=proc_func) proc.start()
py
1a45d1a8039053dd3a00779b6f77669d4ca3a13b
"""Decorators Recall the simple closure example we did which allowed us to maintain a count of ho9w many times a function was called: def counter(fn): count = 0 def inner(*args, **kwargs): # using *args. **kwargs means we can call any function fn with any combination of positional and keyword arguments nonlocal count count += 1 print('Function {0} was called {1} times'.format(fn.__name__, count)) return fn(*args, **kwargs) return inner def add(a, b=0): return a + b add = counter(add) result = add(1, 2) # Function add was called 1 times # result = 3 print(result) I essentially modified our add function by wrapping it inside another function that added some functionally to it I can also say that we decorated ourfunction add with the function counter And I call counter a decorator function In general a decorator function: takes a function as an argument returns a closure the closure usually accepts any combination of parameters runs some code in the inner function(closure) the closure function calls the original function using the arguments passed to the closure returns whatever is returned by that function call Decorators and the @ symbool In our previous example, we saw that the counter was a decorator and we could decorate our add function using: add = counter(add) In general, if func is a decorator function, we decorate another function my_func using: my_func = func(my_func) This is so common that Python provides a convenient way of writing that: @counter (is the sameas writing) @func def add(a, b): def my_func(...): return a + b ... is the same as writing is the same as writing def add(a, b): def my_func(...): return a + b ... add = counter(add) my_func = func(my_func) Introspecting Decorated Functions Let's use the same count decorator def counter(fn): count = 0 def inner(*args, **kwargs): # using *args. **kwargs means we can call any function fn with any combination of positional and keyword arguments nonlocal count count += 1 print('Function {0} was called {1} times'.format(fn.__name__, count)) return fn(*args, **kwargs) return inner """ # @counter # if not commented out, python shows it is not defined from itertools import count def mult(a, b, c=1): # returns the product of three values I could have written: return a * b* c # mult = counter (the same thing as @counter) mult.__name__ # mult is now inner # The dunder 'name' property help(mult) # Help on function inner in module __main__: # inner(*args, kwargs) # we have lost our docstring, and even the original function signature # even using the inspect module's signature does not yield better results """ One approach to fixing this We can try to fix this problem, at leat for the docstring and function name as follows: """ def counter(fn): count = 0 def inner(*args, **kwargs): nonlocal count count += 1 print*'unction {0}'
py
1a45d3033e5d0a7625820e07a7d7b7aa35025188
# -*- coding: utf-8 -*- """ this module contains all usable variables for native python type. """ import datetime import re import six from booleano.operations.operands.classes import Variable from booleano.operations.operands.constants import String from booleano.parser.symbol_table_builder import SymbolTableBuilder variable_symbol_table_builder = SymbolTableBuilder() @variable_symbol_table_builder.register(type(None)) @six.python_2_unicode_compatible class NativeVariable(Variable): """ a generic Bindable item that will resolve from the context with his given name. it shall be subclass for more specific operations, but it work as is using the python type operations. it can be lazy if the given context_name is a callable, in this case, the callable will be called with the current context """ operations = { "equality", # ==, != "inequality", # >, <, >=, <= "boolean", # Logical values } def __init__(self, context_name): self.evaluated = False self.context_name = context_name super(NativeVariable, self).__init__() def to_python(self, context): """Return the value of the ``bool`` context item""" self.evaluated = True if callable(self.context_name): return self.context_name(context) return context[self.context_name] def equals(self, value, context): """Does ``value`` equal this variable?""" self.evaluated = True if isinstance(value, (String, six.text_type)): value = self._from_native_string(six.text_type(value)) return self.to_python(context) == value def greater_than(self, value, context): """Does thes variable is greater than ``value``""" self.evaluated = True if isinstance(value, (String, six.text_type)): value = self._from_native_string(six.text_type(value)) return self.to_python(context) > value def less_than(self, value, context): """Does thes variable is lesser than ``value``""" self.evaluated = True if isinstance(value, (String, six.text_type)): value = self._from_native_string(six.text_type(value)) return self.to_python(context) < value def __call__(self, context): """Does this variable evaluate to True?""" self.evaluated = True return bool(self.to_python(context)) def _from_native_string(self, value): """ special case where a variable can interperete a sting to other think (a date, a duration ?) :param value: :return: """ return value def __str__(self): """Return the Unicode representation of this variable.""" return 'Scop variable for %s [%s]' % (self.context_name, self.__class__.__name__) def __repr__(self): """Represent this variable.""" return '<Scop variable for %s [%s]>' % (self.context_name, self.__class__.__name__) @variable_symbol_table_builder.register(list) @variable_symbol_table_builder.register(tuple) class NativeCollectionVariable(NativeVariable): operations = { "equality", # ==, != "inequality", # >, <, >=, <= "boolean", # Logical values "membership", } def belongs_to(self, value, context): """does this variable belong to (in) """ self.evaluated = True return value in self.to_python(context) def is_subset(self, value, context): """ a strict subset contains some element, but not all. (belongs_to can contains all elements) :param value: :param context: :return: """ self.evaluated = True cv = self.to_python(context) return cv != value and value in cv @variable_symbol_table_builder.register(int) @variable_symbol_table_builder.register(float) class NumberVariable(NativeVariable): """ a variable that allow to compare **number** from the context """ @variable_symbol_table_builder.register(bool) class BooleanVariable(NativeVariable): """ a variable that allow to compare **boolean** from the context """ @variable_symbol_table_builder.register(six.text_type) class StringVariable(NativeCollectionVariable): """ a variable that allow to compare **string** from the context """ @variable_symbol_table_builder.register(set) @variable_symbol_table_builder.register(frozenset) class SetVariable(NativeCollectionVariable): """ a variable that allow to compare **set** from the context """ def cast_val(self, value): if not isinstance(value, set): value = {value} return value def belongs_to(self, value, context): """does this variable belong to (in) """ self.evaluated = True cv = self.to_python(context) value = self.cast_val(value) return value <= cv def is_subset(self, value, context): """ a strict subset contains some element, but not all. (belongs_to can contains all elements) :param value: :param context: :return: """ self.evaluated = True cv = self.to_python(context) value = self.cast_val(value) return value < cv class FormatableVariable(NativeVariable): """ a class that accept a extra format in his constructor """ formats = [] def __init__(self, context_name, formats=None): if isinstance(formats, six.text_type): self.formats = (formats, ) elif formats is not None: self.formats = formats super(FormatableVariable, self).__init__(context_name) @variable_symbol_table_builder.register(datetime.timedelta) class DurationVariable(FormatableVariable): """ a variable that allow to compare **duration** from the context (datetime.timedelta) the compartion can be made with a string matching the folowing format : + **days** d **hours** h **minutes** m **seconds** s + **days** days **hours** hours **minutes** minutes **seconds** seconds ie : + duration > "15 d 7 h 8 m 19 s" + duration > "15d 24s" """ formats = [ ( r'^((?P<days>\d+?) ?d(ays)?)? *' r'((?P<hours>\d+?) ?h(r|ours?)?)? *' r'((?P<minutes>\d+?) ?m(inutes)?)? *' r'((?P<seconds>\d+?) ?s(econds)?)? *$' ) ] def __init__(self, context_name, formats=None): super(DurationVariable, self).__init__(context_name, formats) self.regexps = [ re.compile(regex) for regex in self.formats ] def _from_native_string(self, value): """ parse a string as a date using self.formats. :param value: the date as a string. matching one of the format :return: the datetime object :rtype: datetime.datetime """ for regex in self.regexps: match = regex.search(value) if match: res = {unit: int(val) for unit, val in match.groupdict().items() if val is not None} if res: return datetime.timedelta(**res) raise ValueError("bad date format for %s: tied %r" % (value, self.formats)) @variable_symbol_table_builder.register(datetime.datetime) class DateTimeVariable(FormatableVariable): """ a variable that allow to compare **datetime** from the context (datetime.datetime) the compartion can be made with a string matching the folowing format : - %d/%m/%Y %H:%M:%S - %d-%m-%Y %H:%M:%S - %Y/%m/%d %H:%M:%S - %Y-%m-%d %H:%M:%S or you can pass your own formats in the construction .. code:: DateTimeVariable("context_name", formats=["%Y-%m-%d %H:%M:%S"]) """ formats = ( "%d/%m/%Y %H:%M:%S", "%d-%m-%Y %H:%M:%S", "%Y/%m/%d %H:%M:%S", "%Y-%m-%d %H:%M:%S", ) def _from_native_string(self, value): """ parse a string as a date using self.formats. :param value: the date as a string. matching one of the format :return: the datetime object :rtype: datetime.datetime """ for format in self.formats: try: return datetime.datetime.strptime(value, format) except ValueError: pass raise ValueError("bad date format for %s: tied %r" % (value, self.formats)) @variable_symbol_table_builder.register(datetime.date) class DateVariable(DateTimeVariable): """ a variable that allow to compare **date** from the context (datetime.date) the compartion can be made with a string matching the folowing format : - %d/%m/%Y - %d-%m-%Y - %Y/%m/%d - %Y-%m-%d or you can pass your own formats in the construction .. code:: DateVariable("context_name", formats=["%Y %m %d"]) """ formats = ( "%d/%m/%Y", "%d-%m-%Y", "%Y/%m/%d", "%Y-%m-%d", ) def _from_native_string(self, value): return super(DateVariable, self)._from_native_string(value).date()
py
1a45d39c73c88d7d1739dbb51d050175335203b0
import handle_input as input import game_flags as flags import pygame as pg class Food(pg.sprite.Sprite): # Constructor def __init__(self, pos=(-1, -1)): # Call the parent class (Sprite) constructor pg.sprite.Sprite.__init__(self) size = (32, 32) self.pos = pos position = tuple([ele * 32 for ele in reversed(self.pos)]) self.rect = pg.Rect(position, size) self.images = [] for food_img in flags.FOOD: img, _ = input.load_image(flags.FOOD_TYPE, food_img) self.images.append(img) self.index = 0 self.image = self.images[self.index] self.animation_time = 0.1 self.current_time = 0 self.animation_frames = 8 self.current_frame = 0 def update_time_dependent(self, dt): """ Updates the image of Sprite approximately every 0.1 second. Args: dt: Time elapsed between each frame. """ self.current_time += dt if self.current_time >= self.animation_time: self.current_time = 0 self.index = (self.index + 1) % len(self.images) self.image = self.images[self.index] def update_frame_dependent(self): """ Updates the image of Sprite every 6 frame (approximately every 0.1 second if frame rate is 60). """ self.current_frame += 1 if self.current_frame >= self.animation_frames: self.current_frame = 0 self.index = (self.index + 1) % len(self.images) self.image = self.images[self.index] def update(self, dt): """This is the method that's being called when 'all_sprites.update(dt)' is called.""" # Switch between the two update methods by commenting/uncommenting. self.update_time_dependent(dt) # self.update_frame_dependent()
py
1a45d3c1c667b8db5a5838c27b9587aa0c6d4f67
import argparse parser = argparse.ArgumentParser() parser.add_argument("a", help="alphabet size", type=int) parser.add_argument("l", help="sequence length", type=int) parser.add_argument("-name", help="name of output folder") parser.add_argument("-data", help="path to input data", type=str, required=True) parser.add_argument( "-cv", help="estimate lambdas using regularization with regularization parameter chosen with 10-fold crossvalidation", default=True) import numpy as np import scipy as sp import itertools import sys import time import scipy as sp import itertools import os import math import csv import pandas as pd import random as rd import statistics from scipy.sparse import csr_matrix, dia_matrix from scipy.optimize import minimize from scipy.special import comb from scipy.spatial.distance import hamming from scipy.sparse.linalg import LinearOperator from scipy.sparse.linalg import cg import vc_regression as vc def str2bool(v): return v in ("True", "true", "t", "1") args = parser.parse_args() args.cv = str2bool(args.cv) a = args.a l = args.l if args.name == None: args.name = "my_project" outdir = args.name if not os.path.exists(outdir): os.makedirs(outdir) # QC if a**l > 5000000: print("sequence space is to big!") exit() vc.preliminary_preparation(a, l) data = pd.read_csv(args.data, header=None) babel = '' for i in range(len(data)): babel += data[0][i] alphabet = set(babel) AA2N = dict([(sorted(alphabet)[i], i) for i in range(len(alphabet))]) N2AA = {v: k for k, v in AA2N.items()} AA = list(AA2N.keys()) seqsAll = [''.join(seq) for seq in itertools.product(AA, repeat=l)] pd.DataFrame(seqsAll).to_csv( outdir + "/sequences.txt", header=None, index=None) def seqAA2num(seq): return [AA2N[seq[i]] for i in range(len(seq))] #### seqs = [seqAA2num(data[0][i]) for i in range(len(data))] tr = np.array([vc.seq2pos(seqs[i]) for i in range(len(seqs))]) if np.shape(seqs)[1] != l: print("seqs file dimension incompatible!") exit() ys = np.array(data[1]) sig2s = np.array(data[2]) # vc.set_data_as_global_parameters(seqs, ys, sig2s) # vc.construct_A_sparse() # vc.construct_E_sparse() vc.initialize_computation(seqs, ys, sig2s) all_distance_class_Q = all(map(lambda x: x > 0, vc.N_d)) rhod = vc.rho_d.copy() rhod[0] -= np.mean(sig2s) lambdas_naive = sp.linalg.inv(vc.W_kd.T).dot(rhod) lambdas_naive_positive_Q = all(map(lambda x: x > 0, lambdas_naive)) if args.cv is True: print("estimating lambdas with regularization (regularization parameter chosen using 10-fold crossvalidation)...") cv = True elif not all_distance_class_Q: print("certain distance classes missing from data, estimating lambdas using regularization (regularization parameter chosen with 10-fold crossvalidation)...") cv = True elif lambdas_naive_positive_Q: print("estimating lambdas using least squares") cv = False else: print("naive lambdas contain nonpositive values, estimating lambdas using regularization (regularization parameter chosen with 10-fold crossvalidation)...") cv = True betas = 10 ** np.arange(-2, 6, .5) rownames = ["order_" + str(k) for k in range(l + 1)] # Estimate lambdas using 10 fold crossvalidation if cv is True: out = vc.lambdaCV(ys, tr, sig2s, betas, 10) beta_cv = out[1] lda = vc.solve_lambda_single_beta(ys, tr, sig2s, beta_cv) print("lambdas = ", str(lda)) else: lda = lambdas_naive print("lambdas = ", str(lda)) pd.DataFrame(lda, index=rownames).to_csv(outdir + "/lambdas.txt", header=None) mks = [comb(l, k) * (a - 1)**k for k in range(l + 1)] variance_components = np.array([lda[k] * mks[k] for k in range(1, l + 1)]) variance_components /= np.sum(variance_components) print("variance components = ", str(variance_components)) pd.DataFrame(variance_components, index=rownames[1:]).to_csv( outdir + "/variance_components.txt", header=None)
py
1a45d4cab3faa7480053b77309544ab5a9002ac1
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from ..utils import VolumeMask def test_VolumeMask_inputs(): input_map = dict( args=dict(argstr='%s', ), aseg=dict( extensions=None, xor=['in_aseg'], ), copy_inputs=dict(), environ=dict( nohash=True, usedefault=True, ), in_aseg=dict( argstr='--aseg_name %s', extensions=None, xor=['aseg'], ), left_ribbonlabel=dict( argstr='--label_left_ribbon %d', mandatory=True, ), left_whitelabel=dict( argstr='--label_left_white %d', mandatory=True, ), lh_pial=dict( extensions=None, mandatory=True, ), lh_white=dict( extensions=None, mandatory=True, ), rh_pial=dict( extensions=None, mandatory=True, ), rh_white=dict( extensions=None, mandatory=True, ), right_ribbonlabel=dict( argstr='--label_right_ribbon %d', mandatory=True, ), right_whitelabel=dict( argstr='--label_right_white %d', mandatory=True, ), save_ribbon=dict(argstr='--save_ribbon', ), subject_id=dict( argstr='%s', mandatory=True, position=-1, usedefault=True, ), subjects_dir=dict(), ) inputs = VolumeMask.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_VolumeMask_outputs(): output_map = dict( lh_ribbon=dict(extensions=None, ), out_ribbon=dict(extensions=None, ), rh_ribbon=dict(extensions=None, ), ) outputs = VolumeMask.output_spec() for key, metadata in list(output_map.items()): for metakey, value in list(metadata.items()): assert getattr(outputs.traits()[key], metakey) == value
py
1a45d4d77b4c1b8ba6fe65798ef32065c2a3585e
from setuptools import setup, find_packages from codecs import open from os import path here = path.abspath(path.dirname(__file__)) with open(path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() setup( name='message_media_messages', version='2.0.0', description='The MessageMedia Messages API provides a number of endpoints for building powerful two-way messaging applications.', long_description=long_description, author='MessageMedia Developers', author_email='[email protected]', url='https://developers.messagemedia.com', packages=find_packages(), install_requires=[ 'requests>=2.9.1, <3.0', 'jsonpickle>=0.7.1, <1.0', 'cachecontrol>=0.11.7, <1.0', 'python-dateutil>=2.5.3, <3.0' ] )
py
1a45d5e871c0ad713b98e18e5de0d268a7f1322e
import sys import json import numpy as np from flask import Flask, request, jsonify, make_response # from flask import session from flask import render_template, send_from_directory from flask_cors import CORS import lib.recommender_tools as rec_tools from lib.recommender_data import RECCOMEND_DATA from lib.tools import json_response, load_input import lib.look_up_table as look_up_table from gevent.pywsgi import WSGIServer VERBOSE = True REC_DATA = RECCOMEND_DATA() app = Flask(__name__, static_url_path='') CORS(app) # set this bugger by default. app.config['CORS_HEADERS'] = 'Content-Type' @app.route('/js/<path:path>') def send_js(path): # offer up the js and css files for consumption return send_from_directory('templates/js', path) @app.route('/css/<path:path>') def send_css(path): # offer up the js and css files for consumption return send_from_directory('templates/css', path) @app.route('/images/<path:path>') def send_image(path): # offer up the js and css files for consumption return send_from_directory('templates/images', path) @app.route('/', methods=['POST', 'GET']) def page_test(): """ Sanity check for flask application (used in automated tests) """ # get user-name and access rights from IAM html = "<h3>Hello world! 3</h3>" return html @app.route('/demo1', methods=['POST', 'GET']) def initial_form(): """ Sanity check for flask application (used in automated tests) """ return render_template('demo_page.html', port=port) @app.route('/example_form_interface', methods=['GET']) def basic_demo2(): """ """ return render_template('basic_form_and_results.html') @app.route('/list_searchable_parameters', methods=['GET']) def list_searchable_parameters(): print('here', file=sys.stdout) inputs = REC_DATA.list_input_keys_values() print('inputs',inputs, file=sys.stdout) targets = look_up_table.LOOK_UP_TABLE['campaign_objective'] print('targets', targets, file=sys.stdout) return json_response({"inputs": inputs, "targets": targets}) @app.route('/recommend_sort_games', methods=['POST']) def make_recommendation(): """Based on the user's objective, this function selects matches and returns scores and meta data """ event_rates = ['click-through-event', 'first_dropped', 'impression'] # Load the input json_dict = load_input(request) #json_dict = ast.literal_eval(json_str) if VERBOSE: print('json_dict', json_dict, file=sys.stdout) # beware campaign_objective also sent in slice_parameters = json_dict #[{i: json_dict[i]} for i in json_dict if i != 'campaign_objective'] # set default objects if none given objectives = json_dict.get('campaign_objective', look_up_table.LOOK_UP_TABLE['campaign_objective']) if isinstance(objectives, list) is False: objectives = [objectives] print('objectives', objectives, file=sys.stdout) # assure the objectives are reasonable for obj in objectives: assert obj in look_up_table.LOOK_UP_TABLE['campaign_objective'] # identify rows matching the input query params matching_rows = REC_DATA.extract_data_slice(slice_parameters) # summ all events for each line_item_id matching above results gm_kys_view = REC_DATA.sum_events( matching_rows, ['first_key'], event_rates) # get a list of unique game ids uniq_games = list(gm_kys_view.keys()) for game_id in uniq_games: # calculate rates, and scores gm_kys_view[game_id]['click_through_rate'] = REC_DATA.calculates_rates(gm_kys_view[game_id]['click-through-event'], gm_kys_view[game_id]['impression']) gm_kys_view[game_id]['engagement_rate'] = REC_DATA.calculates_rates(gm_kys_view[game_id]['first_dropped'], gm_kys_view[game_id]['impression']) # calculate the specific score for this game gm_kys_view[game_id]['rec_scores'] = REC_DATA.calculate_score([gm_kys_view[game_id][obj] for obj in objectives]) # sort the games based on 'decreasing' score ind_sort = np.argsort([gm_kys_view[game_id]['rec_scores'] for game_id in uniq_games])[::-1] # generate a results list of score and games rec_score = [] for i in ind_sort: game_id = uniq_games[i] # get all the additional feautures for this game game_features = REC_DATA.extract_game_features(game_id=game_id) rec_score.append({'game_id': game_id, 'score': gm_kys_view[game_id]['rec_scores'], 'game_features': game_features }) if VERBOSE: print('rec_score', rec_score, file=sys.stdout) pass return json_response(rec_score) @app.route('/get_data_dump', methods=['GET']) def get_engine_output(): """Returns a dictionary with all data used in the rec ending and their metadata.""" res = {"game_data": REC_DATA.data} return json_response(res) @app.route('/get_feature_dump', methods=['GET']) def get_feature_output(): """Returns a dictionary with all data used in the rec ending and their metadata.""" res = {"game_features": REC_DATA.game_features} return json_response(res) def create_app(): """ Constructor Returns ------- app : flask app """ return app # if __name__ == "__main__": # if len(sys.argv) > 1: # port = int(sys.argv[1]) # else: # port = 80 # # app = create_app(config=None) # # , use_reloader=False) # remember to set debug to False # app.run(host='0.0.0.0', port=port, debug=VERBOSE) if __name__ == '__main__': # Debug/Development # app.run(debug=True, host="0.0.0.0", port="5000") # Production http_server = WSGIServer(('', 5000), app) http_server.serve_forever()
py
1a45d63f0aa5e0cb879ec34284eb51a48227b35d
class LinkTarget(object): """ Represents an element on a page that can be linked to from other documents or other places in the same document. LinkTarget() """ Name=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets the name of the element that this System.Windows.Documents.LinkTarget identifies as a linkable element. Get: Name(self: LinkTarget) -> str Set: Name(self: LinkTarget)=value """
py
1a45d660c44ad2c778fa6d362d55209f2d8b160e
import functools import json import textwrap import mongoengine from .. import util from .. import entities from . import look, verb import architext.strings as strings class LobbyMenu(verb.Verb): '''Helper class that has the method that shows the lobby menu''' def show_lobby_menu(self): out_message = "" self.session.world_list_cache = self.get_worlds_list() world_list = self.session.world_list_cache if world_list: out_message += _('Enter the number of the world you want to enter\n') world_names_with_index = [f' {index: < 4} {world.name: <36} {world.get_connected_users()}{chr(128100)} by {world.creator.name} {"" if world.public else chr(128274)}' for index, world in enumerate(world_list)] out_message += functools.reduce(lambda a, b: '{}\n{}'.format(a, b), world_names_with_index) else: out_message += _('There are not public or known private worlds in this server.') out_message += '\n\n' + _( 'Options:\n' ' + to create a new world.\n' ' ? to see all available actions.' ) self.session.send_to_client(out_message) def get_worlds_list(self): return list(filter(self.has_to_be_listed, entities.World.objects())) def has_to_be_listed(self, world): if world.public: return True elif world.creator == self.session.user: return True elif world in self.session.user.joined_worlds: return True else: return False class LobbyHelp(LobbyMenu): command = '?' verbtype = verb.LOBBYVERB def process(self, message): out_message = _( 'You can use these commands from the lobby:\n' ' + to create a new world.\n' ' - to delete one of your worlds.\n' ' r to reload and show the list of worlds.\n' ' * to deploy a public world snapshot.\n' ' > to import a world from text.\n' ' who to see who is connected right now.\n' '\n' 'Enter the number of a world in the world list to go there.\n' 'Enter the invite code of a world to go there.' ) self.session.send_to_client(out_message) self.finish_interaction() class GoToLobby(LobbyMenu): command = _('exitworld') permissions = verb.NOBOT def process(self, message): self.session.user.leave_world() self.show_lobby_menu() self.finish_interaction() class JoinByInviteCode(LobbyMenu): command = '' verbtype = verb.LOBBYVERB @classmethod def has_world_id_format(cls, string): return len(string.strip()) == 24 @classmethod def can_process(cls, message, session): if super().can_process(message, session) and cls.has_world_id_format(message): return True else: return False def process(self, message): try: chosen_world = entities.World.objects.get(id=message) except entities.World.DoesNotExist: self.session.send_to_client(_("I don't understand that")) self.finish_interaction() return self.session.user.enter_world(chosen_world) self.session.send_to_client(_("Traveling to {world_name}.").format(world_name=chosen_world.name)) look.Look(self.session).show_current_room(show_world_name=True) self.session.send_to_others_in_room(_("Pof! {player_name} appears here.").format(player_name=self.session.user.name)) self.finish_interaction() class EnterWorld(LobbyMenu): command = '' verbtype = verb.LOBBYVERB @classmethod def can_process(self, message, session): if super().can_process(message, session) and message.isnumeric(): return True else: return False def __init__(self, session): super().__init__(session) self.current_process_function = self.process_world_number def process(self, message): self.current_process_function(message) def process_world_number(self, message): try: index = int(message) except ValueError: self.session.send_to_client(strings.not_a_number) self.finish_interaction() return try: chosen_world = self.session.world_list_cache[index] except IndexError: self.session.send_to_client(strings.wrong_value) self.finish_interaction() return try: location_save = self.session.user.get_location_save(chosen_world) self.session.user.enter_world(chosen_world) except mongoengine.errors.DoesNotExist: self.session.send_to_client(_("This world no longer exists. Enter 'r' to reload the lobby.")) self.finish_interaction() return self.session.send_to_client(_('{body}') .format( body=_('Returning to your last location there.') if location_save is not None else _('Going there for the first time!') )) look.Look(self.session).show_current_room(show_world_name=True) self.session.send_to_others_in_room(_("Puufh! {player_name} appears here.").format(player_name=self.session.user.name)) self.finish_interaction() class RefreshLobby(LobbyMenu): verbtype = verb.LOBBYVERB command = 'r' def process(self, message): self.show_lobby_menu() self.finish_interaction() class CreateWorld(LobbyMenu): verbtype = verb.LOBBYVERB command = '+' def process(self, message): starting_room = entities.Room( name=_('The First of Many Rooms'), alias='0', description=_( 'This is the first room of your world. Here you are the Architext!\n' '\n' 'If you don\'t know where to start, just type "help building". There you\'ll find all you need to know to build any kind of world.\n' '\n' 'Remember that you can type "worldinfo" to see the world\'s invite code.' ) ) self.new_world = entities.World(save_on_creation=False, creator=self.session.user, starting_room=starting_room) self.session.send_to_client(_('Enter the name for your new world. ("/" to cancel)')) self.process = self.process_word_name def process_word_name(self, message): if message == "/": self.session.send_to_client(strings.cancelled) self.finish_interaction() return if not message: self.session.send_to_client(strings.is_empty) return self.new_world.name = message self.new_world.save() self.session.send_to_client(_( '┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n' '┃ Your new world is ready ┃\n' '┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n' 'It is a private world 🔒. You can invite your friends sharing this invite code:\n' '\n' '{invite_code}\n' '\n' 'When it is ready, you can make the world public using the editworld command.\n' '\n' 'Press enter to see your new world...' ).format(invite_code=self.new_world.id)) self.process = self.enter_to_continue def enter_to_continue(self, message): self.session.user.enter_world(self.new_world) look.Look(self.session).show_current_room(show_world_name=True) self.finish_interaction() class DeployPublicSnapshot(LobbyMenu): verbtype = verb.LOBBYVERB command = '*' def process(self, message): self.public_snapshots = entities.WorldSnapshot.objects(public=True) if not self.public_snapshots: self.session.send_to_client(_('There are no public worlds to deploy.')) self.finish_interaction() return message = _('Which world do you want to deploy? ("/" to cancel)\n') for index, snapshot in enumerate(self.public_snapshots): message += '{}. {}\n'.format(index, snapshot.name) self.session.send_to_client(message) self.process = self.process_menu_option def process_menu_option(self, message): if message == '/': self.session.send_to_client(strings.cancelled) self.show_lobby_menu() self.finish_interaction() return try: index = int(message) if index < 0: raise ValueError except ValueError: self.session.send_to_client(strings.not_a_number) return try: self.chosen_snapshot = self.public_snapshots[index] except IndexError: self.session.send_to_client(strings.wrong_value) return self.session.send_to_client(_('How do you want to name the new world? ("/" to cancel)')) self.process = self.process_new_world_name def process_new_world_name(self, message): if message == "/": self.session.send_to_client(strings.cancelled) self.finish_interaction() return if not message: self.session.send_to_client(strings.is_empty) return world_name = message self.deploy_at_new_world(self.chosen_snapshot, world_name) self.session.send_to_client(_('Done.')) self.show_lobby_menu() self.finish_interaction() def deploy_at_new_world(self, snapshot, world_name): snapshot_instance = snapshot.snapshoted_state.clone() new_world = entities.World(creator=self.session.user, world_state=snapshot_instance, name=world_name) class DeleteWorld(LobbyMenu): verbtype = verb.LOBBYVERB command = '-' def process(self, message): self.your_worlds = entities.World.objects(creator=self.session.user) if not self.your_worlds: self.session.send_to_client(_("You have not created any world.")) self.finish_interaction() return message = _('Choose the world to delete. YOU WON\'T BE ABLE TO GET IT BACK. Consider making a backup first. ("/" to cancel)\n') for index, world in enumerate(self.your_worlds): message += "{}. {}\n".format(index, world.name) self.session.send_to_client(message) self.process = self.process_menu_option def process_menu_option(self, message): if message == '/': self.session.send_to_client(strings.cancelled) self.show_lobby_menu() self.finish_interaction() return try: index = int(message) if index < 0: raise ValueError except ValueError: self.session.send_to_client(strings.not_a_number) return try: world_to_delete = self.your_worlds[index] except IndexError: self.session.send_to_client(strings.wrong_value) return try: world_to_delete.delete() except entities.CantDelete as e: self.session.send_to_client(_("It can not be deleted: {error}".format(error=e))) else: self.session.send_to_client(_("Done.")) self.show_lobby_menu() self.finish_interaction() class ImportWorld(LobbyMenu): verbtype = verb.LOBBYVERB command = '>' def process(self, message): self.json_message = '' self.world_name = '' self.session.send_to_client(_('Enter a name for your new world. ("/" to cancel)')) self.process = self.process_word_name def process_word_name(self, message): if message == "/": self.session.send_to_client(strings.cancelled) self.finish_interaction() return if not message: self.session.send_to_client(strings.is_empty) return self.world_name = message self.session.send_to_client(_( 'Now paste the text export of the world.\n' 'It will be automatically divided into multiple messages if it is too long.' 'The server won\'t consider the text as received until it is valid.\n' 'If you entered the wrong text, send "/" to cancel.' )) self.process = self.process_world_json def process_world_json(self, message): # todo: check for possible risks and outcomes of bad input. if message == '/': self.session.send_to_client(strings.cancelled) self.show_lobby_menu() self.finish_interaction() return self.session.send_to_client(_("{char_number} chars received").format(char_number=len(message))) message_valid = False message_without_control_characters = util.remove_control_characters(message) self.json_message += message_without_control_characters self.session.send_to_client(_('Parsing your message. Please wait...')) world_dict = util.text_to_world_dict(self.json_message) if world_dict is not None: new_world = util.world_from_dict(world_dict, self.world_name, self.session.user) self.session.send_to_client(_('Your new world is ready. The items in all player inventories from the original world have been moved to your inventory.')) self.show_lobby_menu() self.finish_interaction() else: self.session.send_to_client(_('The text is still invalid. Waiting for more characters. ("/" to cancel)'))
py
1a45d9168438c95adbea5928b9e055dd4c95fc5d
# Copyright 2016-2021, Pulumi Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile import time import threading from concurrent import futures from enum import Enum from datetime import datetime from typing import List, Any, Mapping, MutableMapping, Optional, Callable, Tuple import grpc from ._cmd import CommandResult, _run_pulumi_cmd, OnOutput from ._config import ConfigValue, ConfigMap from .errors import StackAlreadyExistsError from .events import OpMap, EngineEvent, SummaryEvent from ._output import OutputMap from ._server import LanguageServer from ._workspace import Workspace, PulumiFn, Deployment from ..runtime.settings import _GRPC_CHANNEL_OPTIONS from ..runtime.proto import language_pb2_grpc from ._representable import _Representable _DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ' OnEvent = Callable[[EngineEvent], Any] class ExecKind(str, Enum): LOCAL = "auto.local" INLINE = "auto.inline" class StackInitMode(Enum): CREATE = "create" SELECT = "select" CREATE_OR_SELECT = "create_or_select" class UpdateSummary: def __init__(self, # pre-update info kind: str, start_time: datetime, message: str, environment: Mapping[str, str], config: Mapping[str, dict], # post-update info result: str, end_time: datetime, version: Optional[int] = None, deployment: Optional[str] = None, resource_changes: Optional[OpMap] = None): self.kind = kind self.start_time = start_time self.end_time = end_time self.message = message self.environment = environment self.result = result self.Deployment = deployment self.resource_changes = resource_changes self.version = version self.config: ConfigMap = {} for key in config: config_value = config[key] self.config[key] = ConfigValue(value=config_value["value"], secret=config_value["secret"]) def __repr__(self): return f"UpdateSummary(result={self.result!r}, version={self.version!r}, " \ f"start_time={self.start_time!r}, end_time={self.end_time!r}, kind={self.kind!r}, " \ f"message={self.message!r}, environment={self.environment!r}, " \ f"resource_changes={self.resource_changes!r}, config={self.config!r}, Deployment={self.Deployment!r})" class BaseResult(_Representable): def __init__(self, stdout: str, stderr: str): self.stdout = stdout self.stderr = stderr class PreviewResult(BaseResult): def __init__(self, stdout: str, stderr: str, change_summary: OpMap): super().__init__(stdout, stderr) self.change_summary = change_summary class UpResult(BaseResult): def __init__(self, stdout: str, stderr: str, summary: UpdateSummary, outputs: OutputMap): super().__init__(stdout, stderr) self.outputs = outputs self.summary = summary class RefreshResult(BaseResult): def __init__(self, stdout: str, stderr: str, summary: UpdateSummary): super().__init__(stdout, stderr) self.summary = summary class DestroyResult(BaseResult): def __init__(self, stdout: str, stderr: str, summary: UpdateSummary): super().__init__(stdout, stderr) self.summary = summary class Stack: @classmethod def create(cls, stack_name: str, workspace: Workspace) -> 'Stack': """ Creates a new stack using the given workspace, and stack name. It fails if a stack with that name already exists. :param stack_name: The name identifying the Stack :param workspace: The Workspace the Stack was created from. :return: Stack """ return Stack(stack_name, workspace, StackInitMode.CREATE) @classmethod def select(cls, stack_name: str, workspace: Workspace) -> 'Stack': """ Selects stack using the given workspace, and stack name. It returns an error if the given Stack does not exist. :param stack_name: The name identifying the Stack :param workspace: The Workspace the Stack was created from. :return: Stack """ return Stack(stack_name, workspace, StackInitMode.SELECT) @classmethod def create_or_select(cls, stack_name: str, workspace: Workspace) -> 'Stack': """ Tries to create a new stack using the given workspace and stack name if the stack does not already exist, or falls back to selecting the existing stack. If the stack does not exist, it will be created and selected. :param stack_name: The name identifying the Stack :param workspace: The Workspace the Stack was created from. :return: Stack """ return Stack(stack_name, workspace, StackInitMode.CREATE_OR_SELECT) def __init__(self, name: str, workspace: Workspace, mode: StackInitMode) -> None: """ Stack is an isolated, independently configurable instance of a Pulumi program. Stack exposes methods for the full pulumi lifecycle (up/preview/refresh/destroy), as well as managing configuration. Multiple Stacks are commonly used to denote different phases of development (such as development, staging and production) or feature branches (such as feature-x-dev, jane-feature-x-dev). """ self.name = name self.workspace = workspace self._mode = mode if not isinstance(name, str): raise TypeError("name must be of type 'str'") if not isinstance(workspace, Workspace): raise TypeError("workspace must be of type 'Workspace'") if not isinstance(mode, StackInitMode): raise TypeError("mode must be of type 'StackInitMode'") if mode is StackInitMode.CREATE: workspace.create_stack(name) elif mode is StackInitMode.SELECT: workspace.select_stack(name) elif mode is StackInitMode.CREATE_OR_SELECT: try: workspace.create_stack(name) except StackAlreadyExistsError: workspace.select_stack(name) def __repr__(self): return f"Stack(stack_name={self.name!r}, workspace={self.workspace!r}, mode={self._mode!r})" def __str__(self): return f"Stack(stack_name={self.name!r}, workspace={self.workspace!r})" def up(self, parallel: Optional[int] = None, message: Optional[str] = None, target: Optional[List[str]] = None, expect_no_changes: Optional[bool] = None, diff: Optional[bool] = None, target_dependents: Optional[bool] = None, replace: Optional[List[str]] = None, on_output: Optional[OnOutput] = None, on_event: Optional[OnEvent] = None, program: Optional[PulumiFn] = None) -> UpResult: """ Creates or updates the resources in a stack by executing the program in the Workspace. https://www.pulumi.com/docs/reference/cli/pulumi_up/ :param parallel: Parallel is the number of resource operations to run in parallel at once. (1 for no parallelism). Defaults to unbounded (2147483647). :param message: Message (optional) to associate with the update operation. :param target: Specify an exclusive list of resource URNs to destroy. :param expect_no_changes: Return an error if any changes occur during this update. :param diff: Display operation as a rich diff showing the overall change. :param target_dependents: Allows updating of dependent targets discovered but not specified in the Target list. :param replace: Specify resources to replace. :param on_output: A function to process the stdout stream. :param on_event: A function to process structured events from the Pulumi event stream. :param program: The inline program. :returns: UpResult """ # Disable unused-argument because pylint doesn't understand we process them in _parse_extra_args # pylint: disable=unused-argument program = program or self.workspace.program extra_args = _parse_extra_args(**locals()) args = ["up", "--yes", "--skip-preview"] args.extend(extra_args) kind = ExecKind.LOCAL.value on_exit = None if program: kind = ExecKind.INLINE.value server = grpc.server(futures.ThreadPoolExecutor(max_workers=4), # pylint: disable=consider-using-with options=_GRPC_CHANNEL_OPTIONS) language_server = LanguageServer(program) language_pb2_grpc.add_LanguageRuntimeServicer_to_server(language_server, server) port = server.add_insecure_port(address="0.0.0.0:0") server.start() def on_exit_fn(): language_server.on_pulumi_exit() server.stop(0) on_exit = on_exit_fn args.append(f"--client=127.0.0.1:{port}") args.extend(["--exec-kind", kind]) log_watcher_thread = None temp_dir = None if on_event: log_file, temp_dir = _create_log_file("up") args.extend(["--event-log", log_file]) log_watcher_thread = threading.Thread(target=_watch_logs, args=(log_file, on_event)) log_watcher_thread.start() try: up_result = self._run_pulumi_cmd_sync(args, on_output) outputs = self.outputs() summary = self.info() assert summary is not None finally: _cleanup(temp_dir, log_watcher_thread, on_exit) return UpResult(stdout=up_result.stdout, stderr=up_result.stderr, summary=summary, outputs=outputs) def preview(self, parallel: Optional[int] = None, message: Optional[str] = None, target: Optional[List[str]] = None, expect_no_changes: Optional[bool] = None, diff: Optional[bool] = None, target_dependents: Optional[bool] = None, replace: Optional[List[str]] = None, on_output: Optional[OnOutput] = None, on_event: Optional[OnEvent] = None, program: Optional[PulumiFn] = None) -> PreviewResult: """ Performs a dry-run update to a stack, returning pending changes. https://www.pulumi.com/docs/reference/cli/pulumi_preview/ :param parallel: Parallel is the number of resource operations to run in parallel at once. (1 for no parallelism). Defaults to unbounded (2147483647). :param message: Message to associate with the preview operation. :param target: Specify an exclusive list of resource URNs to update. :param expect_no_changes: Return an error if any changes occur during this update. :param diff: Display operation as a rich diff showing the overall change. :param target_dependents: Allows updating of dependent targets discovered but not specified in the Target list. :param replace: Specify resources to replace. :param on_output: A function to process the stdout stream. :param on_event: A function to process structured events from the Pulumi event stream. :param program: The inline program. :returns: PreviewResult """ # Disable unused-argument because pylint doesn't understand we process them in _parse_extra_args # pylint: disable=unused-argument program = program or self.workspace.program extra_args = _parse_extra_args(**locals()) args = ["preview"] args.extend(extra_args) kind = ExecKind.LOCAL.value on_exit = None if program: kind = ExecKind.INLINE.value server = grpc.server(futures.ThreadPoolExecutor(max_workers=4), # pylint: disable=consider-using-with options=_GRPC_CHANNEL_OPTIONS) language_server = LanguageServer(program) language_pb2_grpc.add_LanguageRuntimeServicer_to_server(language_server, server) port = server.add_insecure_port(address="0.0.0.0:0") server.start() def on_exit_fn(): language_server.on_pulumi_exit() server.stop(0) on_exit = on_exit_fn args.append(f"--client=127.0.0.1:{port}") args.extend(["--exec-kind", kind]) log_file, temp_dir = _create_log_file("preview") args.extend(["--event-log", log_file]) summary_events: List[SummaryEvent] = [] def on_event_callback(event: EngineEvent) -> None: if event.summary_event: summary_events.append(event.summary_event) if on_event: on_event(event) # Start watching logs in a thread log_watcher_thread = threading.Thread(target=_watch_logs, args=(log_file, on_event_callback)) log_watcher_thread.start() try: preview_result = self._run_pulumi_cmd_sync(args, on_output) finally: _cleanup(temp_dir, log_watcher_thread, on_exit) if not summary_events: raise RuntimeError("summary event never found") return PreviewResult(stdout=preview_result.stdout, stderr=preview_result.stderr, change_summary=summary_events[0].resource_changes) def refresh(self, parallel: Optional[int] = None, message: Optional[str] = None, target: Optional[List[str]] = None, expect_no_changes: Optional[bool] = None, on_output: Optional[OnOutput] = None, on_event: Optional[OnEvent] = None) -> RefreshResult: """ Compares the current stack’s resource state with the state known to exist in the actual cloud provider. Any such changes are adopted into the current stack. :param parallel: Parallel is the number of resource operations to run in parallel at once. (1 for no parallelism). Defaults to unbounded (2147483647). :param message: Message (optional) to associate with the refresh operation. :param target: Specify an exclusive list of resource URNs to refresh. :param expect_no_changes: Return an error if any changes occur during this update. :param on_output: A function to process the stdout stream. :param on_event: A function to process structured events from the Pulumi event stream. :returns: RefreshResult """ # Disable unused-argument because pylint doesn't understand we process them in _parse_extra_args # pylint: disable=unused-argument extra_args = _parse_extra_args(**locals()) args = ["refresh", "--yes", "--skip-preview"] args.extend(extra_args) kind = ExecKind.INLINE.value if self.workspace.program else ExecKind.LOCAL.value args.extend(["--exec-kind", kind]) log_watcher_thread = None temp_dir = None if on_event: log_file, temp_dir = _create_log_file("refresh") args.extend(["--event-log", log_file]) log_watcher_thread = threading.Thread(target=_watch_logs, args=(log_file, on_event)) log_watcher_thread.start() try: refresh_result = self._run_pulumi_cmd_sync(args, on_output) finally: _cleanup(temp_dir, log_watcher_thread) summary = self.info() assert summary is not None return RefreshResult(stdout=refresh_result.stdout, stderr=refresh_result.stderr, summary=summary) def destroy(self, parallel: Optional[int] = None, message: Optional[str] = None, target: Optional[List[str]] = None, target_dependents: Optional[bool] = None, on_output: Optional[OnOutput] = None, on_event: Optional[OnEvent] = None) -> DestroyResult: """ Destroy deletes all resources in a stack, leaving all history and configuration intact. :param parallel: Parallel is the number of resource operations to run in parallel at once. (1 for no parallelism). Defaults to unbounded (2147483647). :param message: Message (optional) to associate with the destroy operation. :param target: Specify an exclusive list of resource URNs to destroy. :param target_dependents: Allows updating of dependent targets discovered but not specified in the Target list. :param on_output: A function to process the stdout stream. :param on_event: A function to process structured events from the Pulumi event stream. :returns: DestroyResult """ # Disable unused-argument because pylint doesn't understand we process them in _parse_extra_args # pylint: disable=unused-argument extra_args = _parse_extra_args(**locals()) args = ["destroy", "--yes", "--skip-preview"] args.extend(extra_args) kind = ExecKind.INLINE.value if self.workspace.program else ExecKind.LOCAL.value args.extend(["--exec-kind", kind]) log_watcher_thread = None temp_dir = None if on_event: log_file, temp_dir = _create_log_file("destroy") args.extend(["--event-log", log_file]) log_watcher_thread = threading.Thread(target=_watch_logs, args=(log_file, on_event)) log_watcher_thread.start() try: destroy_result = self._run_pulumi_cmd_sync(args, on_output) finally: _cleanup(temp_dir, log_watcher_thread) summary = self.info() assert summary is not None return DestroyResult(stdout=destroy_result.stdout, stderr=destroy_result.stderr, summary=summary) def get_config(self, key: str) -> ConfigValue: """ Returns the config value associated with the specified key. :param key: The key for the config item to get. :returns: ConfigValue """ return self.workspace.get_config(self.name, key) def get_all_config(self) -> ConfigMap: """ Returns the full config map associated with the stack in the Workspace. :returns: ConfigMap """ return self.workspace.get_all_config(self.name) def set_config(self, key: str, value: ConfigValue) -> None: """ Sets a config key-value pair on the Stack in the associated Workspace. :param key: The config key to add. :param value: The config value to add. """ self.workspace.set_config(self.name, key, value) def set_all_config(self, config: ConfigMap) -> None: """ Sets all specified config values on the stack in the associated Workspace. :param config: A mapping of key to ConfigValue to set to config. """ self.workspace.set_all_config(self.name, config) def remove_config(self, key: str) -> None: """ Removes the specified config key from the Stack in the associated Workspace. :param key: The key to remove from config. """ self.workspace.remove_config(self.name, key) def remove_all_config(self, keys: List[str]) -> None: """ Removes the specified config keys from the Stack in the associated Workspace. :param keys: The keys to remove from config. """ self.workspace.remove_all_config(self.name, keys) def refresh_config(self) -> None: """Gets and sets the config map used with the last update.""" self.workspace.refresh_config(self.name) def outputs(self) -> OutputMap: """ Gets the current set of Stack outputs from the last Stack.up(). :returns: OutputMap """ return self.workspace.stack_outputs(self.name) def history(self, page_size: Optional[int] = None, page: Optional[int] = None) -> List[UpdateSummary]: """ Returns a list summarizing all previous and current results from Stack lifecycle operations (up/preview/refresh/destroy). :param page_size: Paginate history entries (used in combination with page), defaults to all. :param page: Paginate history entries (used in combination with page_size), defaults to all. :returns: List[UpdateSummary] """ args = ["stack", "history", "--json", "--show-secrets"] if page_size is not None: # default page=1 when page_size is set if page is None: page = 1 args.extend(["--page-size", str(page_size), "--page", str(page)]) result = self._run_pulumi_cmd_sync(args) summary_list = json.loads(result.stdout) summaries: List[UpdateSummary] = [] for summary_json in summary_list: summary = UpdateSummary(kind=summary_json["kind"], start_time=datetime.strptime(summary_json["startTime"], _DATETIME_FORMAT), message=summary_json["message"], environment=summary_json["environment"], config=summary_json["config"], result=summary_json["result"], end_time=datetime.strptime(summary_json["endTime"], _DATETIME_FORMAT), version=summary_json["version"] if "version" in summary_json else None, deployment=summary_json["Deployment"] if "Deployment" in summary_json else None, resource_changes=summary_json["resourceChanges"] if "resourceChanges" in summary_json else None) summaries.append(summary) return summaries def info(self) -> Optional[UpdateSummary]: """ Returns the current results from Stack lifecycle operations. :returns: Optional[UpdateSummary] """ history = self.history(page_size=1) if not history: return None return history[0] def cancel(self) -> None: """ Cancel stops a stack's currently running update. It returns an error if no update is currently running. Note that this operation is _very dangerous_, and may leave the stack in an inconsistent state if a resource operation was pending when the update was canceled. This command is not supported for local backends. """ self._run_pulumi_cmd_sync(["cancel", "--yes"]) def export_stack(self) -> Deployment: """ export_stack exports the deployment state of the stack. This can be combined with Stack.import_state to edit a stack's state (such as recovery from failed deployments). :returns: Deployment """ return self.workspace.export_stack(self.name) def import_stack(self, state: Deployment) -> None: """ import_stack imports the specified deployment state into a pre-existing stack. This can be combined with Stack.export_state to edit a stack's state (such as recovery from failed deployments). :param state: The deployment state to import. """ return self.workspace.import_stack(self.name, state) def _run_pulumi_cmd_sync(self, args: List[str], on_output: Optional[OnOutput] = None) -> CommandResult: envs = {"PULUMI_DEBUG_COMMANDS": "true"} if self.workspace.pulumi_home is not None: envs = {**envs, "PULUMI_HOME": self.workspace.pulumi_home} envs = {**envs, **self.workspace.env_vars} additional_args = self.workspace.serialize_args_for_op(self.name) args.extend(additional_args) args.extend(["--stack", self.name]) result = _run_pulumi_cmd(args, self.workspace.work_dir, envs, on_output) self.workspace.post_command_callback(self.name) return result def _parse_extra_args(**kwargs) -> List[str]: extra_args: List[str] = [] message = kwargs.get("message") expect_no_changes = kwargs.get("expect_no_changes") diff = kwargs.get("diff") replace = kwargs.get("replace") target = kwargs.get("target") target_dependents = kwargs.get("target_dependents") parallel = kwargs.get("parallel") if message: extra_args.extend(["--message", message]) if expect_no_changes: extra_args.append("--expect-no-changes") if diff: extra_args.append("--diff") if replace: for r in replace: extra_args.extend(["--replace", r]) if target: for t in target: extra_args.extend(["--target", t]) if target_dependents: extra_args.append("--target-dependents") if parallel: extra_args.extend(["--parallel", str(parallel)]) return extra_args def fully_qualified_stack_name(org: str, project: str, stack: str) -> str: """ Returns a stack name formatted with the greatest possible specificity: org/project/stack or user/project/stack Using this format avoids ambiguity in stack identity guards creating or selecting the wrong stack. Note that filestate backends (local file, S3, Azure Blob) do not support stack names in this format, and instead only use the stack name without an org/user or project to qualify it. See: https://github.com/pulumi/pulumi/issues/2522 :param org: The name of the org or user. :param project: The name of the project. :param stack: The name of the stack. :returns: The fully qualified stack name. """ return f"{org}/{project}/{stack}" def _create_log_file(command: str) -> Tuple[str, tempfile.TemporaryDirectory]: log_dir = tempfile.TemporaryDirectory(prefix=f"automation-logs-{command}-") # pylint: disable=consider-using-with filepath = os.path.join(log_dir.name, "eventlog.txt") # Open and close the file to ensure it exists before we start polling for logs with open(filepath, "w+", encoding="utf-8"): pass return filepath, log_dir def _watch_logs(filename: str, callback: OnEvent): with open(filename, encoding="utf-8") as f: while True: line = f.readline() # sleep if file hasn't been updated if not line: time.sleep(0.1) continue event = EngineEvent.from_json(json.loads(line)) callback(event) # if this is the cancel event, stop watching logs. if event.cancel_event: break def _cleanup(temp_dir: Optional[tempfile.TemporaryDirectory], thread: Optional[threading.Thread], on_exit_fn: Optional[Callable[[], None]] = None) -> None: # If there's an on_exit function, execute it (used in preview/up to shut down server) if on_exit_fn: on_exit_fn() # If we started a thread to watch logs, wait for it to terminate, timing out after 5 seconds. if thread: thread.join(5) # If we created a temp_dir for the logs, clean up. if temp_dir: temp_dir.cleanup()
py
1a45d954becfa1525c90976f8e050cc6a57a97fd
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import itertools import json import logging import os from argparse import Namespace import numpy as np from fairseq import metrics, options, utils from fairseq.data import ( AppendTokenDataset, ConcatDataset, data_utils, encoders, indexed_dataset, LanguagePairDataset, PrependTokenDataset, StripTokenDataset, TruncateDataset, ) from fairseq.tasks import FairseqTask, register_task EVAL_BLEU_ORDER = 4 logger = logging.getLogger(__name__) def load_langpair_dataset( data_path, split, src, src_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, left_pad_source, left_pad_target, max_source_positions, max_target_positions, prepend_bos=False, load_alignments=False, truncate_source=False, append_source_id=False, num_buckets=0, shuffle=True, args=None ): def split_exists(split, src, tgt, lang, data_path): filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang)) return indexed_dataset.dataset_exists(filename, impl=dataset_impl) src_datasets = [] tgt_datasets = [] for k in itertools.count(): split_k = split + (str(k) if k > 0 else '') # infer langcode if split_exists(split_k, src, tgt, src, data_path): prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, src, tgt)) elif split_exists(split_k, tgt, src, src, data_path): prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, tgt, src)) else: if k > 0: break else: raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path)) src_dataset = data_utils.load_indexed_dataset(prefix + src, src_dict, dataset_impl) if truncate_source: src_dataset = AppendTokenDataset( TruncateDataset( StripTokenDataset(src_dataset, src_dict.eos()), max_source_positions - 1, ), src_dict.eos(), ) src_datasets.append(src_dataset) tgt_dataset = data_utils.load_indexed_dataset(prefix + tgt, tgt_dict, dataset_impl) if tgt_dataset is not None: tgt_datasets.append(tgt_dataset) logger.info('{} {} {}-{} {} examples'.format( data_path, split_k, src, tgt, len(src_datasets[-1]) )) if not combine: break assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0 if len(src_datasets) == 1: src_dataset = src_datasets[0] tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None else: sample_ratios = [1] * len(src_datasets) sample_ratios[0] = upsample_primary src_dataset = ConcatDataset(src_datasets, sample_ratios) if len(tgt_datasets) > 0: tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios) else: tgt_dataset = None if prepend_bos: assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index") src_dataset = PrependTokenDataset(src_dataset, src_dict.bos()) if tgt_dataset is not None: tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos()) eos = None if append_source_id: src_dataset = AppendTokenDataset(src_dataset, src_dict.index('[{}]'.format(src))) if tgt_dataset is not None: tgt_dataset = AppendTokenDataset(tgt_dataset, tgt_dict.index('[{}]'.format(tgt))) eos = tgt_dict.index('[{}]'.format(tgt)) align_dataset = None if load_alignments: align_path = os.path.join(data_path, '{}.align.{}-{}'.format(split, src, tgt)) if indexed_dataset.dataset_exists(align_path, impl=dataset_impl): align_dataset = data_utils.load_indexed_dataset(align_path, None, dataset_impl) tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None return LanguagePairDataset( src_dataset, src_dataset.sizes, src_dict, tgt_dataset, tgt_dataset_sizes, tgt_dict, left_pad_source=left_pad_source, left_pad_target=left_pad_target, align_dataset=align_dataset, eos=eos, num_buckets=num_buckets, shuffle=shuffle, split=split, args=args ) @register_task('translation') class TranslationTask(FairseqTask): """ Translate from one (source) language to another (target) language. Args: src_dict (~fairseq.data.Dictionary): dictionary for the source language tgt_dict (~fairseq.data.Dictionary): dictionary for the target language .. note:: The translation task is compatible with :mod:`fairseq-train`, :mod:`fairseq-generate` and :mod:`fairseq-interactive`. The translation task provides the following additional command-line arguments: .. argparse:: :ref: fairseq.tasks.translation_parser :prog: """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" # fmt: off parser.add_argument('data', help='colon separated path to data directories list, \ will be iterated upon during epochs in round-robin manner') parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language') parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language') parser.add_argument('--load-alignments', action='store_true', help='load the binarized alignments') parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL', help='pad the source on the left') parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL', help='pad the target on the left') parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence') parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence') parser.add_argument('--upsample-primary', default=1, type=int, help='amount to upsample primary dataset') parser.add_argument('--truncate-source', action='store_true', default=False, help='truncate source to max-source-positions') parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N', help='if >0, then bucket source and target lengths into N ' 'buckets and pad accordingly; this is useful on TPUs ' 'to minimize the number of compilations') # options for reporting BLEU during validation parser.add_argument('--eval-bleu', action='store_true', help='evaluation with BLEU scores') parser.add_argument('--eval-bleu-detok', type=str, default="space", help='detokenize before computing BLEU (e.g., "moses"); ' 'required if using --eval-bleu; use "space" to ' 'disable detokenization; see fairseq.data.encoders ' 'for other options') parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON', help='args for building the tokenizer, if needed') parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False, help='compute tokenized BLEU instead of sacrebleu') parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None, help='remove BPE before computing BLEU') parser.add_argument('--eval-bleu-args', type=str, metavar='JSON', help='generation args for BLUE scoring, ' 'e.g., \'{"beam": 4, "lenpen": 0.6}\'') parser.add_argument('--eval-bleu-print-samples', action='store_true', help='print sample generations during validation') # fmt: on def __init__(self, args, src_dict, tgt_dict): super().__init__(args) self.src_dict = src_dict self.tgt_dict = tgt_dict @classmethod def setup_task(cls, args, **kwargs): """Setup the task (e.g., load dictionaries). Args: args (argparse.Namespace): parsed command-line arguments """ args.left_pad_source = options.eval_bool(args.left_pad_source) args.left_pad_target = options.eval_bool(args.left_pad_target) paths = utils.split_paths(args.data) assert len(paths) > 0 # find language pair automatically if args.source_lang is None or args.target_lang is None: args.source_lang, args.target_lang = data_utils.infer_language_pair(paths[0]) if args.source_lang is None or args.target_lang is None: raise Exception('Could not infer language pair, please provide it explicitly') # load dictionaries src_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.source_lang))) tgt_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.target_lang))) assert src_dict.pad() == tgt_dict.pad() assert src_dict.eos() == tgt_dict.eos() assert src_dict.unk() == tgt_dict.unk() logger.info('[{}] dictionary: {} types'.format(args.source_lang, len(src_dict))) logger.info('[{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict))) return cls(args, src_dict, tgt_dict) def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ paths = utils.split_paths(self.args.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] # infer langcode src, tgt = self.args.source_lang, self.args.target_lang self.datasets[split] = load_langpair_dataset( data_path, split, src, self.src_dict, tgt, self.tgt_dict, combine=combine, dataset_impl=self.args.dataset_impl, upsample_primary=self.args.upsample_primary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=self.args.max_source_positions, max_target_positions=self.args.max_target_positions, load_alignments=self.args.load_alignments, truncate_source=self.args.truncate_source, num_buckets=self.args.num_batch_buckets, shuffle=(split != 'test'), ) def build_dataset_for_inference(self, src_tokens, src_lengths): return LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary) def build_model(self, args): model = super().build_model(args) if getattr(args, 'eval_bleu', False): assert getattr(args, 'eval_bleu_detok', None) is not None, ( '--eval-bleu-detok is required if using --eval-bleu; ' 'try --eval-bleu-detok=moses (or --eval-bleu-detok=space ' 'to disable detokenization, e.g., when using sentencepiece)' ) detok_args = json.loads(getattr(args, 'eval_bleu_detok_args', '{}') or '{}') self.tokenizer = encoders.build_tokenizer(Namespace( tokenizer=getattr(args, 'eval_bleu_detok', None), **detok_args )) gen_args = json.loads(getattr(args, 'eval_bleu_args', '{}') or '{}') self.sequence_generator = self.build_generator([model], Namespace(**gen_args)) return model def valid_step(self, sample, model, criterion, **kwargs): # 看是否需要获取dependency_mat special_input = model.get_special_input(sample) loss, sample_size, logging_output = super().valid_step(sample, model, criterion, **special_input) if self.args.eval_bleu: bleu = self._inference_with_bleu(self.sequence_generator, sample, model) logging_output['_bleu_sys_len'] = bleu.sys_len logging_output['_bleu_ref_len'] = bleu.ref_len # we split counts into separate entries so that they can be # summed efficiently across workers using fast-stat-sync assert len(bleu.counts) == EVAL_BLEU_ORDER for i in range(EVAL_BLEU_ORDER): logging_output['_bleu_counts_' + str(i)] = bleu.counts[i] logging_output['_bleu_totals_' + str(i)] = bleu.totals[i] return loss, sample_size, logging_output def reduce_metrics(self, logging_outputs, criterion): super().reduce_metrics(logging_outputs, criterion) if self.args.eval_bleu: def sum_logs(key): return sum(log.get(key, 0) for log in logging_outputs) counts, totals = [], [] for i in range(EVAL_BLEU_ORDER): counts.append(sum_logs('_bleu_counts_' + str(i))) totals.append(sum_logs('_bleu_totals_' + str(i))) if max(totals) > 0: # log counts as numpy arrays -- log_scalar will sum them correctly metrics.log_scalar('_bleu_counts', np.array(counts)) metrics.log_scalar('_bleu_totals', np.array(totals)) metrics.log_scalar('_bleu_sys_len', sum_logs('_bleu_sys_len')) metrics.log_scalar('_bleu_ref_len', sum_logs('_bleu_ref_len')) def compute_bleu(meters): import inspect import sacrebleu fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0] if 'smooth_method' in fn_sig: smooth = {'smooth_method': 'exp'} else: smooth = {'smooth': 'exp'} bleu = sacrebleu.compute_bleu( correct=meters['_bleu_counts'].sum, total=meters['_bleu_totals'].sum, sys_len=meters['_bleu_sys_len'].sum, ref_len=meters['_bleu_ref_len'].sum, **smooth ) return round(bleu.score, 2) metrics.log_derived('bleu', compute_bleu) def max_positions(self): """Return the max sentence length allowed by the task.""" return (self.args.max_source_positions, self.args.max_target_positions) @property def source_dictionary(self): """Return the source :class:`~fairseq.data.Dictionary`.""" return self.src_dict @property def target_dictionary(self): """Return the target :class:`~fairseq.data.Dictionary`.""" return self.tgt_dict def _inference_with_bleu(self, generator, sample, model): import sacrebleu def decode(toks, escape_unk=False): s = self.tgt_dict.string( toks.int().cpu(), self.args.eval_bleu_remove_bpe, unk_string=( "UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP" ), ) if self.tokenizer: s = self.tokenizer.decode(s) return s gen_out = self.inference_step(generator, [model], sample, None) hyps, refs = [], [] for i in range(len(gen_out)): hyps.append(decode(gen_out[i][0]['tokens'])) refs.append(decode( utils.strip_pad(sample['target'][i], self.tgt_dict.pad()), escape_unk=True, # don't count <unk> as matches to the hypo )) if self.args.eval_bleu_print_samples: logger.info('example hypothesis: ' + hyps[0]) logger.info('example reference: ' + refs[0]) if self.args.eval_tokenized_bleu: return sacrebleu.corpus_bleu(hyps, [refs], tokenize='none') else: return sacrebleu.corpus_bleu(hyps, [refs])
py
1a45d9c997aeb7cb4de99042d8be6b2f4d8948ca
#!/usr/bin/env python3 # You are probably well aware of the 'birthday paradox' # https://en.wikipedia.org/wiki/Birthday_problem # Let's try simulating it # We will have a variable number of bins (can be months or days) # And some number of trials for the simulation # And some number of people whose have random birthdays # Use assert() to check parameters # On the command line: # python3 birthday.py <bins> <trials> <people> import sys #allows us to use sys arg import random assert(len(sys.argv) == 4) #4 refers to the numbers on the line bins = int(sys.argv[1]) trials = int(sys.argv[2]) people = int(sys.argv[3]) assert(bins > 0) assert(trials > 0) assert(people >1) collisions = 0 for t in range(trials): calendar = [] #create and empty calendar same_day = False for i in range(bins): calendar.append(0) #line 29 and 30 is the same thing as calendar = [0] = bins for p in range(people): #insert people into calendar r = random.randint(0, bins-1) #r represents their birthday calendar[r] += 1 for day in calendar: #finds shared birthday if day >1: same_day = True break #makes this faster beacuse it breaks the loop as soon as a same birthday is found if same_day: collisions += 1 print(collisions/trials) """ python3 birthday.py 365 1000 23 0.520 """
py
1a45da3ef550a3ca0f177171affc5b309b4c048c
# coding: utf-8 """ Copyright 2016 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ref: https://github.com/swagger-api/swagger-codegen """ from pprint import pformat from six import iteritems import re import json from ..utils import sanitize_for_serialization class GroupGreetingEventGreetingOwner(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self): """ GroupGreetingEventGreetingOwner - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'id': 'str' } self.attribute_map = { 'id': 'id' } self._id = None @property def id(self): """ Gets the id of this GroupGreetingEventGreetingOwner. :return: The id of this GroupGreetingEventGreetingOwner. :rtype: str """ return self._id @id.setter def id(self, id): """ Sets the id of this GroupGreetingEventGreetingOwner. :param id: The id of this GroupGreetingEventGreetingOwner. :type: str """ self._id = id def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_json(self): """ Returns the model as raw JSON """ return json.dumps(sanitize_for_serialization(self.to_dict())) def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
py
1a45da97bebf55707eb3e5a27843989923edbe77
from bs4 import BeautifulSoup ENDPOINT = 'https://www2.correios.com.br/sistemas/rastreamento/ctrl/ctrlRastreamento.cfm' def __make_request(session, tracking_id): payload = { 'acao': 'track', 'objetos': tracking_id, 'btnPesq': 'Buscar' } return session.post(ENDPOINT, data=payload) async def __make_soup(response): if type(response) == str: return BeautifulSoup(response, 'html.parser') return BeautifulSoup(await response.text(), 'html.parser') def __find_href(tag): a = tag.find('a') if a: return a.get('href') def __get_events(soup): events = soup.select('td.sroLbEvent') for i, event in enumerate(events): events[i] = { 'event': event.strong.string, 'link': __find_href(event) } return events def __get_info(soup): infos = soup.select('td.sroDtEvent') for i, info in enumerate(infos): info = list(info.stripped_strings) infos[i] = { 'date': info[0], 'hour': info[1], 'local': __fix_local(info[2]) } return infos def __fix_local(local): return local.replace('\xa0/\xa0', ' / ') def _get_events(html): soup = BeautifulSoup(html, 'lxml') events = __get_events(soup) infos = __get_info(soup) full_events = [] for event, info in zip(events, infos): full_events.append({**event, **info}) return full_events async def track_package(session, tracking_id): async with __make_request(session, tracking_id) as r: html = await r.text() if 'Aguardando postagem pelo remetente.' in html: return else: return _get_events(html)
py
1a45dc81a631dd61a37d866730381e1e1f73c54e
import numpy as np from PIL import Image import matplotlib.pyplot as plt i=Image.open('chessboard.png') iar=np.array(i) print(iar) plt.imshow(iar) plt.show()
py
1a45dd1856fba2eb80865504f14cd47602f21688
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.conf.urls import include, url import spirit.topic.views import spirit.admin.urls import spirit.user.urls import spirit.search.urls import spirit.category.urls import spirit.topic.urls import spirit.comment.urls patterns = [ url(r'^$', spirit.topic.views.index_active, name='index'), url(r'^st/admin/', include(spirit.admin.urls, namespace='admin')), url(r'^user/', include(spirit.user.urls, namespace='user')), url(r'^search/', include(spirit.search.urls, namespace='search')), url(r'^category/', include(spirit.category.urls, namespace='category')), url(r'^topic/', include(spirit.topic.urls, namespace='topic')), url(r'^comment/', include(spirit.comment.urls, namespace='comment')), ] urlpatterns = [ url(r'^', include(patterns, namespace='spirit', app_name='spirit')), ]
py
1a45dd56b9e06f6d6ca1e9dd07a26275b397f6a1
import aiohttp import asyncio import json from collections import namedtuple import getpass print("This demo will connect to: https://gsp.eur.onstar.com/\n") print("Before trying - ensure you have access by login in above site\n") print("\nProvide credentials\n") username = input("Username/email: ") password = getpass.getpass("Password: ") gm_pin = getpass.getpass("PIN for localization: ") def _json_object_hook(d): return namedtuple('X', d.keys())(*d.values()) def dumpJson(parsed): print(json.dumps(parsed, sort_keys=True, indent=4)) @asyncio.coroutine def fetch(loop): payload = {'username': username, 'password': password, 'roleCode': 'driver', 'place': ''} session = aiohttp.ClientSession(loop=loop) response = yield from session.post('https://gsp.eur.onstar.com/gspserver/services/admin/login.json', data=payload) ret = yield from response.text() return json.loads(ret, object_hook=lambda d: namedtuple('X', d.keys())(*d.values())), session @asyncio.coroutine def getLoginInfo(token, session): header = {'X-GM-token': token} ret = yield from session.get('https://gsp.eur.onstar.com/gspserver/services/admin/getLoginInfo.json', headers=header) return ret @asyncio.coroutine def getDiagnostics(token, session, vehicleId): header = {'X-GM-token': token} payload = {'vehicleId': vehicleId} ret = yield from session.get('https://gsp.eur.onstar.com/gspserver/services/vehicle/getDiagnosticsReport.json', params=payload, headers = header) return ret @asyncio.coroutine def locateCar(token, pin, session, vehicleId): header = {'X-GM-token': token, 'X-GM-pincode': pin} payload = {'vehicleId': vehicleId} ret = yield from session.post('https://gsp.eur.onstar.com/gspserver/services/vehicle/performLocationHistoryQuery.json', data=payload, headers = header) return ret @asyncio.coroutine def main(loop): data,session = ((yield from fetch(loop))) print("Data from fetch - looking for token\n") print(data) token = data.results[0].token print("Found token: ") print(token) print("\n\n") data = (yield from getLoginInfo(token,session)) object = yield from data.text() print("Getting login info data, looking for first vehicleId:\n") dumpJson(json.loads(object) ) object = json.loads(object, object_hook=lambda d: namedtuple('X', list(map(lambda x:x.replace('$','_'),d.keys())))(*d.values())) vehicleId = object.results[0].vehicles[0].vehicle.vehicleId print("Found vehicleId: ") print(vehicleId) print ("\n") diag = (yield from getDiagnostics(token, session, vehicleId)) vehDiag = yield from diag.text() print("Getting diagnostics information:\n") dumpJson(json.loads(vehDiag)) print("Getting localization:\n") locate = (yield from locateCar(token, gm_pin, session, vehicleId) ) locate = yield from locate.text() dumpJson(json.loads(locate)) print("Done for now") session.close() loop = asyncio.get_event_loop() loop.run_until_complete(main(loop))
py
1a45de2a7c1036d1833e2376bb0b4b2b1efa1d91
""" ASGI config for FromNode project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'FromNode.settings') application = get_asgi_application()
py
1a45de90948a1ee4fce61c41d4c2103b8393d5b4
from django.apps import AppConfig class PersonalinfoConfig(AppConfig): name = 'personalinfo'
py
1a45df555436e1c44ae414ec6d67afaf7e17dcd1
class Job: """Generic job class.""" def __init__(self, namespace=None, name=None, image=None, data=None, **kw): if data is not None: self._raw = data else: self._raw = self.create( namespace, name=name, image=image, **kw) @property def active(self): raise NotImplementedError() @property def finished(self): raise NotImplementedError() @property def id(self): raise NotImplementedError() def get_payload(self): raise NotImplementedError() def payload(self): return self._raw
py
1a45df97b80529a1f27bef8580e66830d3a352b1
#!/usr/bin/python3 # coding=utf-8 """ :Copyright: © 2022 Advanced Control Systems, Inc. All Rights Reserved. @Author: Stephen Hung @Author: Darren Liang @Date : 2022-02-18 """ import os import sys sys.path.append("..") from adms_api.core.OracleInterface import OracleInterface # from acsprism import RtdbAddress, RtdbPoint, rtdb_init # from core.LinuxInterface import LinuxInterface # APP INFO TITLE = "ADMS API" IPADDR = "127.0.0.1" PORT = "5000" # PRISM INFO # PRISM = LinuxInterface() # DB INFO def connect_database(): USER = os.getenv('ORACLE_USER', 'acs_das') PSWD = os.getenv('ORACLE_PW' , 'acs_das') TNS = os.getenv('ORACLE_DBSTRING', 'ems') DASdb = OracleInterface(USER, PSWD, TNS) #DASdb.ConnectTest() return DASdb # LOG INFO LOG_FILENAME = 'ADMS_API.log' LOG_FORMAT = '%(asctime)s [%(process)d] %(levelname)s %(name)s: %(message)s' LOG_FOLDER = '/home/acs/tmp' if __name__ == "__main__": USER = "" PSWD = "" TNS = "" DASdb = OracleInterface(USER, PSWD, TNS) DASdb.ConnectTest()
py
1a45e05adccd6144268b1a4372ca93d3f6720a98
#SPDX-License-Identifier: MIT """ Data source that uses the GHTorrent relational database of GitHub activity. """ import pandas as pd import sqlalchemy as s import numpy as np import re from augur import logger from augur.util import annotate class GHTorrent(object): """Uses the GHTorrent database to return dataframes with interesting GitHub indicators""" def __init__(self, user, password, host, port, dbname): """ Connect to GHTorrent :param dbstr: The [database string](http://docs.sqlalchemy.org/en/latest/core/engines.html) to connect to the GHTorrent database """ self.DB_STR = 'mysql+pymysql://{}:{}@{}:{}/{}'.format( user, password, host, port, dbname ) logger.debug('GHTorrent: Connecting to {}:{}/{} as {}'.format(host, port, dbname, user)) self.db = s.create_engine(self.DB_STR, poolclass=s.pool.NullPool) try: self.userid('howderek') except Exception as e: logger.error("Could not connect to GHTorrent database. Error: " + str(e)) def __single_table_count_by_date(self, table, repo_col='project_id', user_col='author_id', group_by="week"): """ Generates query string to count occurances of rows per date for a given table. External input must never be sent to this function, it is for internal use only. :param table: The table in GHTorrent to generate the string for :param repo_col: The column in that table with the project ids :param user_col: The column in that table with the user ids :param group_by: Default week; Options raw, day, week, month, year; Selects period of time to be grouped by :return: Query string """ if group_by == "raw": return """ SELECT SUBDATE(DATE(created_at), WEEKDAY(DATE(created_at))) AS "date", {2} AS "user_id" FROM {0} WHERE {1} = :repoid """.format(table, repo_col, user_col) if group_by == "day": return """ SELECT SUBDATE(DATE(created_at), WEEKDAY(DATE(created_at))) AS "date", COUNT(*) AS "{0}" FROM {0} WHERE {1} = :repoid GROUP BY DATE(created_at) ORDER BY DATE(created_at) DESC""".format(table, repo_col) if group_by == "week": return """ SELECT SUBDATE(DATE(created_at), WEEKDAY(DATE(created_at))) AS "date", COUNT(*) AS "{0}" FROM {0} WHERE {1} = :repoid GROUP BY YEARWEEK(created_at) ORDER BY DATE(created_at) DESC""".format(table, repo_col) if group_by == "month": return """ SELECT SUBDATE(DATE(created_at), WEEKDAY(DATE(created_at))) AS "date", COUNT(*) AS "{0}" FROM {0} WHERE {1} = :repoid GROUP BY MONTH(created_at), YEAR(created_at) ORDER BY DATE(created_at) DESC""".format(table, repo_col) if group_by == "year": return """ SELECT SUBDATE(DATE(created_at), WEEKDAY(DATE(created_at))) AS "date", COUNT(*) AS "{0}" FROM {0} WHERE {1} = :repoid GROUP BY YEAR(created_at) ORDER BY DATE(created_at) DESC""".format(table, repo_col) def __sub_table_count_by_date(self, parent_table, sub_table, parent_id, sub_id, project_id): """ Generates query string to count occurances of rows per date for a given query sub-table. A query sub-table is a table that describes in more detail a specfic asset of another query table- for example, the table "pull_request_comments" is a sub table of "pull_request", where the query is pull requests. External input must never be sent to this function, it is for internal use only. :param parent_table: The table in GHTorrent that holds the related project_id and parent_id :param sub_table: The table in GHTorrent to generate the string for :param parent_id: The column in parent_table with the query id :param sub_id: The column in sub_id with the query id :param project_id: the column in parent_table that holds the repoid :return: Query string """ return """ SELECT date({1}.created_at) AS "date", COUNT(*) AS {1} FROM {1}, {0} WHERE {1}.{3} = {0}.{2} AND {0}.{4} = :repoid GROUP BY YEARWEEK({1}.created_at)""".format(parent_table, sub_table, parent_id, sub_id, project_id) def repoid(self, owner_or_repoid, repo=None): """ Returns a repository's ID as it appears in the GHTorrent projects table github.com/[owner]/[project] :param owner: The username of a project's owner :param repo: The name of the repository :return: The repository's ID as it appears in the GHTorrent projects table """ repoid = 0 if repo is None: repoid = owner_or_repoid else: reposql = s.sql.text('SELECT projects.id FROM projects INNER JOIN users ON projects.owner_id = users.id WHERE projects.name = :repo AND users.login = :repoowner') result = self.db.execute(reposql, repo=repo, repoowner=owner_or_repoid) for row in result: repoid = row[0] return repoid def userid(self, username): """ Returns the userid given a username :param username: GitHub username to be matched against the login table in GHTorrent :return: The id from the users table in GHTorrent """ reposql = s.sql.text('SELECT users.id FROM users WHERE users.login = :username') userid = 0 result = self.db.execute(reposql, username=username) for row in result: userid = row[0] return userid ##################################### ### DIVERSITY AND INCLUSION ### ##################################### ##################################### ### GROWTH, MATURITY, AND DECLINE ### ##################################### @annotate(tag='closed-issues') def closed_issues(self, owner, repo=None): """ Timeseries of the count of the number of issues closed per week :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with newly closed issues/week """ repoid = self.repoid(owner, repo) issuesClosedSQL = s.sql.text(""" SELECT SUBDATE(DATE(issue_events.created_at), WEEKDAY(DATE(issue_events.created_at))) AS "date", COUNT(*) as issues_closed FROM issue_events, issues WHERE issue_events.issue_id = issues.id AND issue_events.action = "closed" AND issues.repo_id = :repoid GROUP BY YEARWEEK(issue_events.created_at) """) return pd.read_sql(issuesClosedSQL, self.db, params={"repoid": str(repoid)}) @annotate(tag='code-commits') def code_commits(self, owner, repo=None, group_by="week"): """ Timeseries of the count of commits :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with new commits/week """ repoid = self.repoid(owner, repo) commitsSQL = s.sql.text(self.__single_table_count_by_date('commits', group_by=group_by)) return pd.read_sql(commitsSQL, self.db, params={"repoid": str(repoid)}) @annotate(tag='code-review-iteration') def code_review_iteration(self, owner, repo=None): """ Timeseries of the count of iterations (being closed and reopened) that a merge request (code review) goes through until it is finally merged :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with iterations/issue for each issue that week """ repoid = self.repoid(owner, repo) codeReviewIterationSQL = s.sql.text(""" SELECT DATE(issues.created_at) AS "created_at", DATE(pull_request_history.created_at) AS "merged_at", issues.issue_id AS "issue_id", pull_request_history.pull_request_id AS "pull_request_id", pull_request_history.action AS "action", COUNT(CASE WHEN action = "closed" THEN 1 ELSE NULL END) AS "iterations" FROM issues, pull_request_history WHERE find_in_set(pull_request_history.action, "closed,merged")>0 AND pull_request_history.pull_request_id IN( SELECT pull_request_id FROM pull_request_history WHERE pull_request_history.action = "closed") #go by reopened or closed??? (min: completed 1 iteration and has started another OR min: completed 1 iteration) AND pull_request_history.pull_request_id = issues.issue_id AND issues.pull_request = 1 AND issues.repo_id = :repoid GROUP BY YEARWEEK(issues.created_at) #YEARWEEK to get (iterations (all PRs in repo) / week) instead of (iterations / PR)? """) df = pd.read_sql(codeReviewIterationSQL, self.db, params={"repoid": str(repoid)}) return pd.DataFrame({'date': df['created_at'], 'iterations': df['iterations']}) @annotate(tag='contribution-acceptance') def contribution_acceptance(self, owner, repo=None): """ Timeseries of the rolling ratio between merged pull requests over unmerged pull requests :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with ratio/week """ source_df = self.community_engagement(owner, repo) df = pd.DataFrame() df['date'] = source_df['date'] df['acceptance_rate'] = source_df['pull_requests_merged_rate_this_week'] return df @annotate(tag='contributing-github-organizations') def contributing_github_organizations(self, owner, repo=None): #needs clarification about return value """ Returns of all the contributing organizations to a project and the counts of each organization's contributions :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with each organization's information """ repoid = self.repoid(owner, repo) contributingOrgSQL = s.sql.text(""" SELECT id AS contributing_org, SUM(commits) AS commits, SUM(issues) AS issues, SUM(commit_comments) AS commit_comments, SUM(issue_comments) AS issue_comments, SUM(pull_requests) AS pull_requests, SUM(pull_request_comments) AS pull_request_comments, SUM(contribution_fields.commits + contribution_fields.issues + contribution_fields.commit_comments + contribution_fields.issue_comments + contribution_fields.pull_requests + contribution_fields.pull_request_comments) AS total, COUNT(DISTINCT contribution_fields.user) AS distinct_users FROM ( (SELECT organization_members.org_id AS id, commits.author_id AS user, COUNT(*) AS commits, 0 AS issues, 0 AS commit_comments, 0 AS issue_comments, 0 AS pull_requests, 0 AS pull_request_comments FROM organization_members, projects, commits WHERE projects.id = :repoid AND commits.project_id = :repoid AND projects.owner_id <> organization_members.org_id AND commits.author_id = organization_members.user_id GROUP BY commits.committer_id) UNION ALL (SELECT organization_members.org_id AS id, reporter_id AS user, 0 AS commits, COUNT(*) AS issues, 0 AS commit_comments, 0 AS issue_comments, 0, 0 FROM organization_members, projects, issues WHERE projects.id = :repoid AND issues.repo_id = :repoid AND pull_request = 0 AND projects.owner_id <> organization_members.org_id AND reporter_id = organization_members.user_id GROUP BY issues.reporter_id) UNION ALL (SELECT organization_members.org_id AS id, commit_comments.user_id AS user, 0 AS commits, 0 AS commit_comments, COUNT(*) AS commit_comments, 0 AS issue_comments, 0 , 0 FROM organization_members, projects, commit_comments JOIN commits ON commits.id = commit_comments.commit_id WHERE projects.id = :repoid AND commits.project_id = :repoid AND projects.owner_id <> organization_members.org_id AND commit_comments.user_id = organization_members.user_id GROUP BY commit_comments.user_id) UNION ALL (SELECT organization_members.org_id AS id, issue_comments.user_id AS user, 0 AS commits, 0 AS commit_comments, 0 AS commit_comments, COUNT(*) AS issue_comments, 0 , 0 FROM organization_members, projects, issue_comments JOIN issues ON issues.id = issue_comments.issue_id WHERE projects.id = :repoid AND issues.repo_id = :repoid AND projects.owner_id <> organization_members.org_id AND issue_comments.user_id = organization_members.user_id GROUP BY id) UNION ALL (SELECT organization_members.org_id AS id, reporter_id AS user, 0, 0, 0, 0, COUNT(*) AS pull_requests, 0 FROM organization_members, projects, issues WHERE projects.id = :repoid AND issues.repo_id = :repoid AND pull_request = 1 AND projects.owner_id <> organization_members.org_id AND reporter_id = organization_members.user_id GROUP BY issues.reporter_id) UNION ALL (SELECT organization_members.org_id AS id, pull_request_comments.user_id AS user, 0, 0, 0, 0, 0, COUNT(*) AS pull_request_comments FROM organization_members, projects, pull_request_comments JOIN pull_requests ON pull_requests.base_commit_id = pull_request_comments.commit_id WHERE pull_requests.base_repo_id = :repoid AND projects.id = :repoid AND projects.owner_id <> organization_members.org_id AND pull_request_comments.user_id = organization_members.user_id GROUP BY pull_request_comments.user_id) ) contribution_fields group by id having distinct_users > 1 ORDER BY total DESC """) return pd.read_sql(contributingOrgSQL, self.db, params={"repoid": str(repoid)}) @annotate(tag='first-response-to-issue-duration') def first_response_to_issue_duration(self, owner, repo): #needs clarification about return value """ Timeseries of the time to first comment by issue :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame of issues with their response information """ repoid = self.repoid(owner, repo) issueCommentsSQL = s.sql.text(""" SELECT *, TIMESTAMPDIFF(MINUTE, opened, first_commented) AS minutes_to_comment FROM ( SELECT issues.id AS id, issues.created_at AS opened, MIN(issue_comments.created_at) AS first_commented, 0 AS pull_request FROM issues LEFT JOIN issue_comments ON issues.id = issue_comments.issue_id WHERE issues.pull_request = 0 AND issues.repo_id = :repoid GROUP BY id UNION ALL SELECT issues.id AS id, issues.created_at AS opened, MIN(pull_request_comments.created_at) AS first_commented, 1 AS pull_request FROM issues LEFT JOIN pull_request_comments ON issues.pull_request_id = pull_request_comments.pull_request_id WHERE issues.pull_request = 1 AND issues.repo_id = :repoid GROUP BY id ) a """) rs = pd.read_sql(issueCommentsSQL, self.db, params={"repoid": str(repoid)}) return rs @annotate(tag='forks') def forks(self, owner, repo=None, group_by="week"): """ Timeseries of when a repo's forks were created :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with new forks/week """ repoid = self.repoid(owner, repo) forksSQL = s.sql.text(self.__single_table_count_by_date('projects', 'forked_from', 'owner_id', group_by=group_by)) return pd.read_sql(forksSQL, self.db, params={"repoid": str(repoid)}).drop(0) @annotate(tag='maintainer-response-to-merge-request-duration') def maintainer_response_to_merge_request_duration(self, owner, repo=None): #needs clarification on return value """ Timeseries of duration of time between a merge request being created and a maintainer commenting on that request :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with each row being a week """ repoid = self.repoid(owner, repo) maintainerResponseToMRSQL = s.sql.text(""" SELECT DATE(issues.created_at) AS date, TIMESTAMPDIFF(DAY, issues.created_at, pull_request_comments.created_at) as days, pull_request_comments.created_at AS pull_request_comment_created_at, issues.id AS issue_id, pull_request_comments.user_id AS user_id, pull_request_comments.comment_id as pull_request_comment_id FROM issues JOIN pull_request_comments ON issues.pull_request_id = pull_request_comments.pull_request_id JOIN (SELECT DISTINCT actor_id FROM pull_request_history JOIN pull_requests ON pull_request_history.pull_request_id = pull_requests.pullreq_id WHERE action = "merged" AND base_repo_id = :repoid ORDER BY actor_id) a ON a.actor_id = user_id WHERE issues.pull_request = 1 AND issues.repo_id = :repoid GROUP BY YEARWEEK(date) """) df = pd.read_sql(maintainerResponseToMRSQL, self.db, params={"repoid": str(repoid)}) return df.iloc[:, 0:2] @annotate(tag='new-contributing-github-organizations') def new_contributing_github_organizations(self, owner, repo=None): #needs clarification about return value """ Timeseries of information about new contributing organizations on a certain date :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with each organization's information """ repoid = self.repoid(owner, repo) contributingOrgSQL = s.sql.text(""" SELECT SUBDATE(DATE(fields.date), WEEKDAY(DATE(fields.date))) AS "date", fields.id AS "contributing_org", count(DISTINCT fields.user) AS distinct_users FROM ( (SELECT organization_members.org_id AS id, commits.created_at AS date, commits.author_id AS user FROM organization_members, projects, commits WHERE projects.id = :repoid AND commits.project_id = :repoid AND projects.owner_id <> organization_members.org_id AND commits.author_id = organization_members.user_id GROUP BY commits.committer_id) UNION ALL (SELECT organization_members.org_id AS id, issues.created_at AS date, issues.reporter_id AS user FROM organization_members, projects, issues WHERE projects.id = :repoid AND issues.repo_id = :repoid AND pull_request = 0 AND projects.owner_id <> organization_members.org_id AND reporter_id = organization_members.user_id GROUP BY issues.reporter_id) UNION ALL (SELECT organization_members.org_id AS id, commit_comments.created_at AS date, commit_comments.user_id as user FROM organization_members, projects, commit_comments JOIN commits ON commits.id = commit_comments.commit_id WHERE projects.id = :repoid AND commits.project_id = :repoid AND projects.owner_id <> organization_members.org_id AND commit_comments.user_id = organization_members.user_id GROUP BY commit_comments.user_id) UNION ALL (SELECT organization_members.org_id AS id, issue_comments.created_at AS date, issue_comments.user_id AS user FROM organization_members, projects, issue_comments JOIN issues ON issues.id = issue_comments.issue_id WHERE projects.id = :repoid AND issues.repo_id = :repoid AND projects.owner_id <> organization_members.org_id AND issue_comments.user_id = organization_members.user_id GROUP BY id) UNION ALL (SELECT organization_members.org_id AS id, issues.created_at AS date, issues.reporter_id AS user FROM organization_members, projects, issues WHERE projects.id = :repoid AND issues.repo_id = :repoid AND pull_request = 1 AND projects.owner_id <> organization_members.org_id AND reporter_id = organization_members.user_id GROUP BY issues.reporter_id) UNION ALL (SELECT organization_members.org_id AS id, pull_request_comments.created_at AS date, pull_request_comments.user_id AS user FROM organization_members, projects, pull_request_comments JOIN pull_requests ON pull_requests.base_commit_id = pull_request_comments.commit_id WHERE pull_requests.base_repo_id = :repoid AND projects.id = :repoid AND projects.owner_id <> organization_members.org_id AND pull_request_comments.user_id = organization_members.user_id GROUP BY pull_request_comments.user_id)) fields Group BY contributing_org HAVING distinct_users > 1 ORDER BY YEARWEEK(date) """) df = pd.read_sql(contributingOrgSQL, self.db, params={"repoid": str(repoid)}) numOrgs = [] count = 0 for index, row in df.iterrows(): count += 1 numOrgs = np.append(numOrgs, count) return pd.DataFrame({'date': df["date"], 'organizations': numOrgs}) @annotate(tag='open-issues') def open_issues(self, owner, repo=None, group_by="week"): """ Timeseries of the count of newly issues opened per week :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with opened issues/week """ repoid = self.repoid(owner, repo) issuesSQL = s.sql.text(self.__single_table_count_by_date('issues', 'repo_id', 'reporter_id', group_by=group_by)) return pd.read_sql(issuesSQL, self.db, params={"repoid": str(repoid)}) @annotate(tag='pull-request-comments') def pull_request_comments(self, owner, repo=None): """ Timeseries of the count of new pull request comments :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with new pull request comments/week """ repoid = self.repoid(owner, repo) pullRequestCommentsSQL = s.sql.text(self.__sub_table_count_by_date("pull_requests", "pull_request_comments", "pullreq_id", "pull_request_id", "base_repo_id")) return pd.read_sql(pullRequestCommentsSQL, self.db, params={"repoid": str(repoid)}) @annotate(tag='pull-requests-open') def pull_requests_open(self, owner, repo=None): """ Timeseries of pull requests creation and their associated activity :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with pull request information/week """ repoid = self.repoid(owner, repo) pullsSQL = s.sql.text(""" SELECT SUBDATE(DATE(pull_request_history.created_at), WEEKDAY(DATE(pull_request_history.created_at))) AS "date", COUNT(pull_requests.id) AS "pull_requests" FROM pull_request_history INNER JOIN pull_requests ON pull_request_history.pull_request_id = pull_requests.id WHERE pull_requests.head_repo_id = :repoid AND pull_request_history.action = "merged" GROUP BY YEARWEEK(DATE(pull_request_history.created_at)) """) return pd.read_sql(pullsSQL, self.db, params={"repoid": str(repoid)}) ##################################### ### RISK ### ##################################### ##################################### ### VALUE ### ##################################### ##################################### ### ACTIVITY ### ##################################### @annotate(tag='issue-comments') def issue_comments(self, owner, repo=None): """ Timeseries of the count of new issue comments :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with new issue comments/week """ repoid = self.repoid(owner, repo) issueCommentsSQL = s.sql.text(self.__sub_table_count_by_date("issues", "issue_comments", "issue_id", "issue_id", "repo_id")) return pd.read_sql(issueCommentsSQL, self.db, params={"repoid": str(repoid)}) @annotate(tag='pull-requests-made-closed') def pull_requests_made_closed(self, owner, repo=None): """ Timeseries of the ratio of pull requests made/closed :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with the ratio of pull requests made/closed """ repoid = self.repoid(owner, repo) pullRequestsMadeClosedSQL = s.sql.text(""" SELECT DATE(closed_on) AS "date", CAST(num_opened AS DECIMAL)/CAST(num_closed AS DECIMAL) AS "rate" FROM (SELECT COUNT(DISTINCT pull_request_id) AS num_opened, DATE(pull_request_history.created_at) AS opened_on FROM pull_request_history JOIN pull_requests ON pull_request_history.pull_request_id = pull_requests.id WHERE action = 'opened' AND pull_requests.base_repo_id = :repoid GROUP BY opened_on) opened JOIN (SELECT count(distinct pull_request_id) AS num_closed, DATE(pull_request_history.created_at) AS closed_on FROM pull_request_history JOIN pull_requests ON pull_request_history.pull_request_id = pull_requests.id WHERE action = 'closed' AND pull_requests.base_repo_id = :repoid GROUP BY closed_on) closed ON closed.closed_on = opened.opened_on """) return pd.read_sql(pullRequestsMadeClosedSQL, self.db, params={"repoid": str(repoid)}) @annotate(tag='watchers') def watchers(self, owner, repo=None, group_by="week"): """ Returns of the count of people who starred the repo on that date :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with new stargazers """ repoid = self.repoid(owner, repo) stargazersSQL = s.sql.text(self.__single_table_count_by_date('watchers', 'repo_id', 'user_id', group_by=group_by)) df = pd.read_sql(stargazersSQL, self.db, params={"repoid": str(repoid)}) df.drop(df.index[:1], inplace=True) return df ##################################### ### EXPERIMENTAL ### ##################################### # COMMIT RELATED @annotate(tag='commits100') def commits100(self, owner, repo=None, group_by="week"): """ Timeseries of the count of commits, limited to the first 100 overall :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with commits/day """ repoid = self.repoid(owner, repo) commitsSQL = s.sql.text(self.__single_table_count_by_date('commits', group_by=group_by)) temp = pd.read_sql(commitsSQL, self.db, params={"repoid": str(repoid)}) tem = temp['commits'] > 100 return temp[tem].reset_index(drop=True) @annotate(tag='commit-comments') def commit_comments(self, owner, repo=None, group_by="week"): """ Timeseries of the count of new commit comments :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with new by week """ repoid = self.repoid(owner, repo) commitCommentsSQL = s.sql.text(self.__sub_table_count_by_date("commits", "commit_comments", "id", "commit_id", "project_id")) return pd.read_sql(commitCommentsSQL, self.db, params={"repoid": str(repoid)}) @annotate(tag='committer-locations') def committer_locations(self, owner, repo=None): """ Returns committers and their locations :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. :param repo: The name of the repo. :return: DataFrame with users and locations sorted by descending count of commits """ #TODO: Group by country code instead of users, needs the new schema repoid = self.repoid(owner, repo) rawContributionsSQL = s.sql.text(""" SELECT users.login, users.location, COUNT(*) AS "commits" FROM commits JOIN project_commits ON commits.id = project_commits.commit_id JOIN users ON users.id = commits.author_id WHERE project_commits.project_id = :repoid GROUP BY users.id ORDER BY commits DESC """) return pd.read_sql(rawContributionsSQL, self.db, params={"repoid": str(repoid)}) @annotate(tag='total-committers') def total_committers(self, owner, repo=None): """ Timeseries of total committers as of each week :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with total committers/week """ repoid = self.repoid(owner, repo) totalCommittersSQL = s.sql.text(""" SELECT total_committers.created_at AS "date", COUNT(total_committers.author_id) total_committers FROM ( SELECT author_id, MIN(DATE(created_at)) created_at FROM commits WHERE project_id = :repoid GROUP BY author_id ORDER BY created_at ASC) AS total_committers GROUP BY YEARWEEK(total_committers.created_at) """) df = pd.read_sql(totalCommittersSQL, self.db, params={"repoid": str(repoid)}) df['total_committers'] = df['total_committers'].cumsum() return df # ISSUE RELATED @annotate(tag='issue-activity') def issue_activity(self, owner, repo=None): """ Timeseries of issue related activity: issues opened, closed, reopened, and currently open :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with total committers/week """ repoid = self.repoid(owner, repo) issueActivity = s.sql.text(""" SELECT Date(issues.created_at) as 'date', COUNT(issues.id) as 'issues_opened', SUM(CASE WHEN issue_events.action = 'closed' THEN 1 ELSE 0 END) as 'issues_closed', SUM(CASE WHEN issue_events.action = 'reopened' THEN 1 ELSE 0 END) as 'issues_reopened' FROM issues JOIN issue_events ON issues.id = issue_events.issue_id WHERE issues.repo_id = :repoid GROUP BY YEARWEEK(issues.created_at) """) #TODO: clean this up df = pd.read_sql(issueActivity, self.db, params={"repoid": str(repoid)}) df = df.assign(issues_open = 0) globalIssuesOpened = 0 df["issues_open"] = df["issues_opened"] - df["issues_closed"] + df["issues_reopened"] dates = [] issueActivityCount = [] issuesAction = [] for index, row in df.iterrows(): for x in range(0, 4): dates = np.append(dates, row["date"]) issueActivityCount = np.append(issueActivityCount, row["issues_closed"]) issuesAction = np.append(issuesAction, "closed") issueActivityCount = np.append(issueActivityCount, row["issues_opened"]) issuesAction = np.append(issuesAction, "opened") issueActivityCount = np.append(issueActivityCount, row["issues_reopened"]) issuesAction = np.append(issuesAction, "reopened") issueActivityCount = np.append(issueActivityCount, row["issues_open"]) issuesAction = np.append(issuesAction, "open") df1 = pd.DataFrame(data=dates, columns=["date"]) df2 = pd.DataFrame(data=issueActivityCount, columns=["count"]) df3 = pd.DataFrame(data=issuesAction, columns=["action"]) df4 = df1.join(df2).join(df3) return df4 # PULL REQUEST RELATED @annotate(tag='pull-request-acceptance-rate') def pull_request_acceptance_rate(self, owner, repo=None): """ Timeseries of pull request acceptance rate (expressed as the ratio of pull requests merged on a date to the count of pull requests opened on a date) :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with ratio/day """ repoid = self.repoid(owner, repo) pullAcceptanceSQL = s.sql.text(""" SELECT DATE(date_created) AS "date", CAST(num_approved AS DECIMAL)/CAST(num_open AS DECIMAL) AS "rate" FROM (SELECT COUNT(DISTINCT pull_request_id) AS num_approved, DATE(pull_request_history.created_at) AS accepted_on FROM pull_request_history JOIN pull_requests ON pull_request_history.pull_request_id = pull_requests.id WHERE action = 'merged' AND pull_requests.base_repo_id = :repoid GROUP BY accepted_on) accepted JOIN (SELECT count(distinct pull_request_id) AS num_open, DATE(pull_request_history.created_at) AS date_created FROM pull_request_history JOIN pull_requests ON pull_request_history.pull_request_id = pull_requests.id WHERE action = 'opened' AND pull_requests.base_repo_id = :repoid GROUP BY date_created) opened ON opened.date_created = accepted.accepted_on """) df = pd.read_sql(pullAcceptanceSQL, self.db, params={"repoid": str(repoid)}) print(df) return df # COMMUNITY / CONRIBUTIONS @annotate(tag='community-age') def community_age(self, owner, repo=None): """ Information helpful to determining a community's age (Currently broken) :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with the first event of each type (commits, fork, ...) """ repoid = self.repoid(owner, repo) communityAgeSQL = s.sql.text(""" SELECT DATE(proj.created_at) AS "project", DATE(commits.created_at) AS "commit", DATE(frk.created_at) AS "fork", DATE(iss.created_at) AS "issue", DATE(pr.created_at) AS "pull_request" FROM commits LEFT JOIN (SELECT forked_from AS "repo_id", created_at AS "created_at" FROM projects WHERE projects.forked_from = :repoid ORDER BY created_at DESC LIMIT 1) AS frk ON frk.repo_id = commits.project_id LEFT JOIN (SELECT repo_id AS "repo_id", created_at AS "created_at" FROM issues WHERE issues.repo_id = :repoid ORDER BY created_at DESC LIMIT 1) AS iss ON iss.repo_id = commits.project_id LEFT JOIN (SELECT pull_request_history.created_at AS "created_at", pull_requests.base_repo_id AS "repo_id" FROM pull_request_history JOIN pull_requests ON pull_requests.id = pull_request_history.pull_request_id WHERE pull_requests.base_repo_id = :repoid AND pull_request_history.action = 'merged' ORDER BY pull_request_history.created_at DESC LIMIT 1) AS pr ON pr.repo_id = commits.project_id LEFT JOIN (SELECT projects.id AS "repo_id", created_at AS "created_at" FROM projects WHERE projects.id = :repoid) AS proj ON proj.repo_id = commits.project_id WHERE commits.project_id = :repoid ORDER BY commits.created_at DESC LIMIT 1 """) return pd.read_sql(communityAgeSQL, self.db, params={"repoid": str(repoid)}) @annotate(tag='community-engagement') def community_engagement(self, owner, repo): """ Timeseries with lots of information about issues and pull requests DataFrame returns these columns: date issues_opened issues_closed pull_requests_opened pull_requests_merged pull_requests_closed issues_opened_total issues_closed_total issues_closed_rate_this_window issues_closed_rate_total issues_delta issues_open pull_requests_opened_total pull_requests_closed_total pull_requests_closed_rate_this_window pull_requests_closed_rate_total pull_requests_delta pull_requests :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with the associated information about a repo's activity on that specific date """ repoid = self.repoid(owner, repo) issuesFullSQL = s.sql.text(""" SELECT STR_TO_DATE(CONCAT(YEARWEEK(DATE,0),' Sunday'), '%X%V %W') as "date", SUM(issues_opened) AS "issues_opened", SUM(issues_closed) AS "issues_closed", SUM(pull_requests_opened) AS "pull_requests_opened", SUM(pull_requests_merged) AS "pull_requests_merged", SUM(pull_requests_closed) AS "pull_requests_closed" FROM ( SELECT STR_TO_DATE(CONCAT(YEARWEEK(issue_events.created_at,0),' Sunday'), '%X%V %W') as "date", issue_events.action = "closed" AND issues.pull_request = 0 AS issues_closed, 0 AS pull_requests_closed, 0 AS pull_requests_merged, issue_events.action = "reopened" AND issues.pull_request = 0 AS issues_opened, 0 AS pull_requests_opened FROM issues LEFT JOIN issue_events ON issue_events.issue_id = issues.id LEFT JOIN pull_request_history ON pull_request_history.pull_request_id = issues.pull_request_id WHERE issues.repo_id = :repoid AND issue_events.action IN ('closed', 'reopened') UNION ALL SELECT STR_TO_DATE(CONCAT(YEARWEEK(pull_request_history.created_at,0),' Sunday'), '%X%V %W') as "date", 0 AS issues_closed, pull_request_history.action = "closed" AND issues.pull_request = 1 AS pull_requests_closed, pull_request_history.action = "merged" AND issues.pull_request = 1 AS pull_requests_merged, 0 AS issues_opened, pull_request_history.action = "reopened" AND issues.pull_request = 1 AS pull_requests_opened FROM issues LEFT JOIN pull_request_history ON pull_request_history.pull_request_id = issues.pull_request_id WHERE issues.repo_id = :repoid AND pull_request_history.action IN ('closed', 'merged', 'reopened') UNION ALL SELECT STR_TO_DATE(CONCAT(YEARWEEK(issues.created_at ,0),' Sunday'), '%X%V %W') as "date", 0 AS issues_closed, 0 AS pull_requests_closed, 0 AS pull_requests_merged, issues.pull_request = 0 AS issues_opened, issues.pull_request AS pull_requests_opened FROM issues WHERE issues.repo_id = :repoid ) summary GROUP BY YEARWEEK(date, 1) """) counts = pd.read_sql(issuesFullSQL, self.db, params={"repoid": str(repoid)}) counts.drop(0, inplace=True) counts['issues_opened_total'] = counts.issues_opened.cumsum() counts['issues_closed_total'] = counts.issues_closed.cumsum() counts['issues_closed_rate_this_week'] = counts.issues_closed / counts.issues_opened counts['issues_closed_rate_total'] = counts.issues_closed_total / counts.issues_opened_total counts['issues_delta'] = counts.issues_opened - counts.issues_closed counts['issues_open'] = counts['issues_delta'].cumsum() counts['pull_requests_opened_total'] = counts.pull_requests_opened.cumsum() counts['pull_requests_closed_total'] = counts.pull_requests_closed.cumsum() counts['pull_requests_merged_total'] = counts.pull_requests_merged.cumsum() counts['pull_requests_closed_rate_this_week'] = counts.pull_requests_closed / counts.pull_requests_opened counts['pull_requests_merged_rate_this_week'] = counts.pull_requests_merged / counts.pull_requests_opened counts['pull_requests_closed_rate_total'] = counts.pull_requests_closed_total / counts.pull_requests_opened_total counts['pull_requests_merged_rate_total'] = counts.pull_requests_merged_total / counts.pull_requests_opened_total counts['pull_requests_delta'] = counts.pull_requests_opened - counts.pull_requests_closed counts['pull_requests_open'] = counts['pull_requests_delta'].cumsum() return counts @annotate(tag='contributors') def contributors(self, owner, repo=None): """ All the contributors to a project and the counts of their contributions :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with user's id and contributions by type, separated by user """ repoid = self.repoid(owner, repo) contributorsSQL = s.sql.text(""" SELECT users.login as name, a.id AS user, SUM(commits) AS commits, SUM(issues) AS issues, SUM(commit_comments) AS commit_comments, SUM(issue_comments) AS issue_comments, SUM(pull_requests) AS pull_requests, SUM(pull_request_comments) AS pull_request_comments, SUM(a.commits + a.issues + a.commit_comments + a.issue_comments + a.pull_requests + a.pull_request_comments) AS total FROM ( (SELECT committer_id AS id, COUNT(*) AS commits, 0 AS issues, 0 AS commit_comments, 0 AS issue_comments, 0 AS pull_requests, 0 AS pull_request_comments FROM commits INNER JOIN project_commits ON project_commits.commit_id = commits.id WHERE project_commits.project_id = :repoid GROUP BY commits.committer_id) UNION ALL (SELECT reporter_id AS id, 0 AS commits, COUNT(*) AS issues, 0 AS commit_comments, 0 AS issue_comments, 0, 0 FROM issues WHERE issues.repo_id = :repoid GROUP BY issues.reporter_id) UNION ALL (SELECT commit_comments.user_id AS id, 0 AS commits, 0 AS commit_comments, COUNT(*) AS commit_comments, 0 AS issue_comments, 0 , 0 FROM commit_comments JOIN project_commits ON project_commits.commit_id = commit_comments.commit_id WHERE project_commits.project_id = :repoid GROUP BY commit_comments.user_id) UNION ALL (SELECT issue_comments.user_id AS id, 0 AS commits, 0 AS commit_comments, 0 AS issue_comments, COUNT(*) AS issue_comments, 0, 0 FROM issue_comments JOIN issues ON issue_comments.issue_id = issues.id WHERE issues.repo_id = :repoid GROUP BY issue_comments.user_id) UNION ALL (SELECT actor_id AS id, 0, 0, 0, 0, COUNT(*) AS pull_requests, 0 FROM pull_request_history JOIN pull_requests ON pull_requests.id = pull_request_history.id WHERE pull_request_history.action = 'opened' AND pull_requests.`base_repo_id` = :repoid GROUP BY actor_id) UNION ALL (SELECT user_id AS id, 0, 0, 0, 0, 0, COUNT(*) AS pull_request_comments FROM pull_request_comments JOIN pull_requests ON pull_requests.base_commit_id = pull_request_comments.commit_id WHERE pull_requests.base_repo_id = :repoid GROUP BY user_id) ) a JOIN users ON users.id = a.id WHERE a.id IS NOT NULL GROUP BY a.id ORDER BY total DESC; """) return pd.read_sql(contributorsSQL, self.db, params={"repoid": str(repoid)}) @annotate(tag='contributions') def contributions(self, owner, repo=None, userid=None): """ Timeseries of all the contributions to a project, optionally limited to a specific user DataFrame has these columns: date commits pull_requests issues commit_comments pull_request_comments issue_comments tota :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table :param repo: The name of the repo. Unneeded if repository id was passed as owner. :param userid: The id of user if you want to limit the contributions to a specific user. :return: DataFrame with all of the contributions separated by day """ repoid = self.repoid(owner, repo) rawContributionsSQL = """ SELECT DATE(coms.created_at) as "date", coms.count as "commits", pulls.count as "pull_requests", iss.count as "issues", comcoms.count as "commit_comments", pullscoms.count as "pull_request_comments", isscoms.count as "issue_comments", coms.count + pulls.count + iss.count + comcoms.count + pullscoms.count + isscoms.count as "total" FROM (SELECT created_at AS created_at, COUNT(*) AS count FROM commits INNER JOIN project_commits ON project_commits.commit_id = commits.id WHERE project_commits.project_id = :repoid[[ AND commits.author_id = :userid]] GROUP BY DATE(created_at)) coms LEFT JOIN (SELECT pull_request_history.created_at AS created_at, COUNT(*) AS count FROM pull_request_history JOIN pull_requests ON pull_requests.id = pull_request_history.pull_request_id WHERE pull_requests.base_repo_id = :repoid AND pull_request_history.action = 'merged'[[ AND pull_request_history.actor_id = :userid]] GROUP BY DATE(created_at)) AS pulls ON DATE(pulls.created_at) = DATE(coms.created_at) LEFT JOIN (SELECT issues.created_at AS created_at, COUNT(*) AS count FROM issues WHERE issues.repo_id = :repoid[[ AND issues.reporter_id = :userid]] GROUP BY DATE(created_at)) AS iss ON DATE(iss.created_at) = DATE(coms.created_at) LEFT JOIN (SELECT commit_comments.created_at AS created_at, COUNT(*) AS count FROM commit_comments JOIN project_commits ON project_commits.commit_id = commit_comments.commit_id WHERE project_commits.project_id = :repoid[[ AND commit_comments.user_id = :userid]] GROUP BY DATE(commit_comments.created_at)) AS comcoms ON DATE(comcoms.created_at) = DATE(coms.created_at) LEFT JOIN (SELECT pull_request_comments.created_at AS created_at, COUNT(*) AS count FROM pull_request_comments JOIN pull_requests ON pull_request_comments.pull_request_id = pull_requests.id WHERE pull_requests.base_repo_id = :repoid[[ AND pull_request_comments.user_id = :userid]] GROUP BY DATE(pull_request_comments.created_at)) AS pullscoms ON DATE(pullscoms.created_at) = DATE(coms.created_at) LEFT JOIN (SELECT issue_comments.created_at AS created_at, COUNT(*) AS count FROM issue_comments JOIN issues ON issue_comments.issue_id = issues.id WHERE issues.repo_id = :repoid[[ AND issue_comments.user_id = :userid]] GROUP BY DATE(issue_comments.created_at)) AS isscoms ON DATE(isscoms.created_at) = DATE(coms.created_at) GROUP BY YEARWEEK(coms.created_at) ORDER BY DATE(coms.created_at) """ if (userid is not None and len(userid) > 0): rawContributionsSQL = rawContributionsSQL.replace('[[', '') rawContributionsSQL = rawContributionsSQL.replace(']]', '') parameterized = s.sql.text(rawContributionsSQL) return pd.read_sql(parameterized, self.db, params={"repoid": str(repoid), "userid": str(userid)}) else: rawContributionsSQL = re.sub(r'\[\[.+?\]\]', '', rawContributionsSQL) parameterized = s.sql.text(rawContributionsSQL) return pd.read_sql(parameterized, self.db, params={"repoid": str(repoid)}) def classify_contributors(self, owner, repo=None): """ Classify everyone who has interacted with a repo into - user - tester - rejected_contributor - contributor - major_contributor - maintainer :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with the id and role of contributors """ repoid = self.repoid(owner, repo) contributors = self.contributors(repoid, repo=None) sums = contributors.sum() def classify(row): role = 'user' ratio = row / sums if (ratio['issue_comments'] > 0.05): role = 'tester' if (row['pull_requests'] >= 1 and row['commits'] == 0): role = 'rejected_contributor' if (row['pull_requests'] >= 1 and row['commits'] >= 1): role = 'contributor' if (ratio['pull_requests'] > 0.10 or ratio['commits'] > 0.01): role = 'major_contributor' if (ratio['commits'] > 0.02 or ratio['pull_request_comments'] > 0.15): role = 'maintainer' return pd.Series({'user': row['user'], 'role': role}) roles = contributors.apply(classify, axis=1) return roles @annotate(tag='project-age') def project_age(self, owner, repo=None): """ Date of the project's creation :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with the date of the project's creation """ repoid = self.repoid(owner, repo) projectAgeSQL = s.sql.text(""" SELECT SUBDATE(DATE(created_at), WEEKDAY(DATE(created_at))) AS "date", COUNT(*) AS "{0}" FROM projects WHERE id = :repoid GROUP BY YEARWEEK(created_at) """) return pd.read_sql(projectAgeSQL, self.db, params={"repoid": str(repoid)}) # DEPENDENCY RELATED # OTHER @annotate(tag='fakes') def fakes(self, owner, repo=None): #should this be for users who contribute to the given repo? """ Timeseries of new fake users per week :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with new fake users/week """ repoid = self.repoid(owner, repo) contributorsSQL = s.sql.text(""" SELECT SUBDATE(DATE(created_at), WEEKDAY(DATE(created_at))) AS "date", COUNT(*) AS fakes FROM users WHERE fake = true GROUP BY YEARWEEK(date) """) return pd.read_sql(contributorsSQL, self.db, params={"repoid": str(repoid)}) @annotate(tag='new-watchers') def new_watchers(self, owner, repo=None): """ Timeseries of new watchers per week :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with new watchers/week """ repoid = self.repoid(owner, repo) newWatchersSQL = s.sql.text(""" SELECT SUBDATE(DATE(created_at), WEEKDAY(DATE(created_at))) as "date", COUNT(*) as "watchers" FROM watchers WHERE repo_id = :repoid GROUP BY YEARWEEK(created_at) """) return pd.read_sql(newWatchersSQL, self.db, params={"repoid": str(repoid)}) ##################################### ### Project Information ### ##################################### @annotate(tag='project_information') def project_information(self, owner, repo=None): """ Basic Information about the selected Project :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with new watchers/week """ repoid = self.repoid(owner, repo) projectInfoSQL = s.sql.text(""" SELECT * FROM projects WHERE id = :repoid """) return pd.read_sql(projectInfoSQL, self.db, params={"repoid": str(repoid)}) @annotate(tag='get_languages') def get_languages(self, owner, repo=None): """ Retrives languages that the selected project uses :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with new watchers/week """ repoid = self.repoid(owner, repo) projectInfoSQL = s.sql.text(""" SELECT DISTINCT language FROM project_languages WHERE project_id = :repoid """) return pd.read_sql(projectInfoSQL, self.db, params={"repoid": str(repoid)}) @annotate(tag='language_bytes_used') def language_bytes_used(self, owner, repo=None): """ Retrives languages that the selected project uses :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with new watchers/week """ repoid = self.repoid(owner, repo) projectInfoSQL = s.sql.text(""" SELECT DISTINCT language, bytes FROM project_languages WHERE project_id = :repoid """) return pd.read_sql(projectInfoSQL, self.db, params={"repoid": str(repoid)}) @annotate(tag='edit_project_information') def edit_project_information(self, owner, new_name, new_description, new_url): """ Edit Basic Information about the selected Project :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with new watchers/week """ repoid = self.repoid(owner, repo) projectInfoSQL = s.sql.text(""" UPDATE projects SET url = %s, name = %s, description = %s WHERE project_id = :repoid """, (new_name, new_description, new_url)) return pd.read_sql(projectInfoSQL, self.db, params={"repoid": str(repoid)})
py
1a45e0cee59a40d5b17bfb2ac203ee94918480aa
"""Console script for cubids.""" import argparse import subprocess import os import sys import re import logging import tempfile import tqdm import shutil import pandas as pd from cubids import CuBIDS from pathlib import Path from .validator import (build_validator_call, run_validator, parse_validator_output, build_subject_paths) from .metadata_merge import merge_json_into_json logging.basicConfig(level=logging.INFO) logger = logging.getLogger('cubids-cli') GIT_CONFIG = os.path.join(os.path.expanduser("~"), '.gitconfig') def cubids_validate(): '''Command Line Interface function for running the bids validator.''' parser = argparse.ArgumentParser( description="cubids-validate: Wrapper around the official " "BIDS Validator", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('bids_dir', type=Path, action='store', help='the root of a BIDS dataset. It should contain ' 'sub-X directories and dataset_description.json') parser.add_argument('output_prefix', type=Path, action='store', help='file prefix to which tabulated validator output ' 'is written.') parser.add_argument('--sequential', action='store_true', default=False, help='Run the BIDS validator sequentially ' 'on each subject.', required=False) parser.add_argument('--container', action='store', help='Docker image tag or Singularity image file.', default=None) parser.add_argument('--ignore_nifti_headers', action='store_true', default=False, help='Disregard NIfTI header content during' ' validation', required=False) parser.add_argument('--ignore_subject_consistency', action='store_true', default=False, help='Skip checking that any given file for one' ' subject is present for all other subjects', required=False) parser.add_argument('--sequential-subjects', action='store', default=None, help='List: Filter the sequential run to only include' ' the listed subjects. e.g. --sequential-subjects ' 'sub-01 sub-02 sub-03', nargs='+', required=False) opts = parser.parse_args() # Run directly from python using subprocess if opts.container is None: if not opts.sequential: # run on full dataset call = build_validator_call(str(opts.bids_dir), opts.ignore_nifti_headers, opts.ignore_subject_consistency) ret = run_validator(call) if ret.returncode != 0: logger.error("Errors returned from validator run, parsing now") # parse the string output parsed = parse_validator_output(ret.stdout.decode('UTF-8')) if parsed.shape[1] < 1: logger.info("No issues/warnings parsed, your dataset" " is BIDS valid.") sys.exit(0) else: logger.info("BIDS issues/warnings found in the dataset") if opts.output_prefix: # normally, write dataframe to file in CLI logger.info("Writing issues out to file") parsed.to_csv(str(opts.output_prefix) + "_validation.csv", index=False) sys.exit(0) else: # user may be in python session, return dataframe return parsed else: logger.info("Prepping sequential validator run...") # build a dictionary with {SubjectLabel: [List of files]} subjects_dict = build_subject_paths(opts.bids_dir) logger.info("Running validator sequentially...") # iterate over the dictionary parsed = [] if opts.sequential_subjects: subjects_dict = {k: v for k, v in subjects_dict.items() if k in opts.sequential_subjects} assert len(list(subjects_dict.keys())) > 1, ("No subjects found" " in filter") for subject, files_list in tqdm.tqdm(subjects_dict.items()): logger.info(" ".join(["Processing subject:", subject])) # create a temporary directory and symlink the data with tempfile.TemporaryDirectory() as tmpdirname: for fi in files_list: # cut the path down to the subject label bids_start = fi.find(subject) # maybe it's a single file if bids_start < 1: bids_folder = tmpdirname fi_tmpdir = tmpdirname else: bids_folder = Path(fi[bids_start:]).parent fi_tmpdir = tmpdirname + '/' + str(bids_folder) if not os.path.exists(fi_tmpdir): os.makedirs(fi_tmpdir) output = fi_tmpdir + '/' + str(Path(fi).name) shutil.copy2(fi, output) # run the validator nifti_head = opts.ignore_nifti_headers subj_consist = opts.ignore_subject_consistency call = build_validator_call(tmpdirname, nifti_head, subj_consist) ret = run_validator(call) # parse output if ret.returncode != 0: logger.error("Errors returned " "from validator run, parsing now") # parse the output and add to list if it returns a df decoded = ret.stdout.decode('UTF-8') tmp_parse = parse_validator_output(decoded) if tmp_parse.shape[1] > 1: tmp_parse['subject'] = subject parsed.append(tmp_parse) # concatenate the parsed data and exit, we're goin home fellas if len(parsed) < 1: logger.info("No issues/warnings parsed, your dataset" " is BIDS valid.") sys.exit(0) else: parsed = pd.concat(parsed, axis=0) subset = parsed.columns.difference(['subject']) parsed = parsed.drop_duplicates(subset=subset) logger.info("BIDS issues/warnings found in the dataset") if opts.output_prefix: # normally, write dataframe to file in CLI logger.info("Writing issues out to file") parsed.to_csv(str(opts.output_prefix) + "_validation.csv", index=False) sys.exit(0) else: # user may be in python session, return dataframe return parsed # Run it through a container container_type = _get_container_type(opts.container) bids_dir_link = str(opts.bids_dir.absolute()) + ":/bids:ro" output_dir_link = str(opts.output_prefix.parent.absolute()) + ":/csv:rw" linked_output_prefix = "/csv/" + opts.output_prefix.name if container_type == 'docker': cmd = ['docker', 'run', '--rm', '-v', bids_dir_link, '-v', GIT_CONFIG+":/root/.gitconfig", '-v', output_dir_link, '--entrypoint', 'cubids-validate', opts.container, '/bids', linked_output_prefix] if opts.ignore_nifti_headers: cmd.append('--ignore_nifti_headers') if opts.ignore_subject_consistency: cmd.append('--ignore_subject_consistency') elif container_type == 'singularity': cmd = ['singularity', 'exec', '--cleanenv', '-B', bids_dir_link, '-B', output_dir_link, opts.container, 'cubids-validate', '/bids', linked_output_prefix] if opts.ignore_nifti_headers: cmd.append('--ignore_nifti_headers') if opts.ignore_subject_consistency: cmd.append('--ignore_subject_consistency') if opts.sequential: cmd.append('--sequential') print("RUNNING: " + ' '.join(cmd)) proc = subprocess.run(cmd) sys.exit(proc.returncode) def bids_sidecar_merge(): parser = argparse.ArgumentParser( description="bids-sidecar-merge: merge critical keys from one " "sidecar to another", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('from_json', type=Path, action='store', help='Source json file.') parser.add_argument('to_json', type=Path, action='store', help='destination json. This file will have data ' 'from `from_json` copied into it.') opts = parser.parse_args() merge_status = merge_json_into_json(opts.from_json, opts.to_json, raise_on_error=False) sys.exit(merge_status) def cubids_group(): '''Command Line Interface function for finding key and param groups.''' parser = argparse.ArgumentParser( description="cubids-group: find key and parameter groups in BIDS", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('bids_dir', type=Path, action='store', help='the root of a BIDS dataset. It should contain ' 'sub-X directories and dataset_description.json') parser.add_argument('output_prefix', type=Path, action='store', help='file prefix to which a _summary.csv, _files.csv ' 'and _group.csv are written.') parser.add_argument('--container', action='store', help='Docker image tag or Singularity image file.') parser.add_argument('--use-datalad', action='store_true', help='ensure that there are no untracked changes ' 'before finding groups') parser.add_argument('--acq-group-level', default='subject', action='store', help='Level at which acquisition groups are created ' 'options: "subject" or "session"') parser.add_argument('--config', action='store', type=Path, help='path to a config file for grouping') opts = parser.parse_args() # Run directly from python using if opts.container is None: bod = CuBIDS(data_root=str(opts.bids_dir), use_datalad=opts.use_datalad, acq_group_level=opts.acq_group_level, grouping_config=opts.config) if opts.use_datalad and not bod.is_datalad_clean(): raise Exception("Untracked change in " + str(opts.bids_dir)) bod.get_CSVs(str(opts.output_prefix),) sys.exit(0) # Run it through a container container_type = _get_container_type(opts.container) bids_dir_link = str(opts.bids_dir.absolute()) + ":/bids" output_dir_link = str(opts.output_prefix.parent.absolute()) + ":/csv:rw" apply_config = opts.config is not None if apply_config: input_config_dir_link = str( opts.config.parent.absolute()) + ":/in_config:ro" linked_input_config = "/in_config/" + opts.config.name linked_output_prefix = "/csv/" + opts.output_prefix.name if container_type == 'docker': cmd = ['docker', 'run', '--rm', '-v', bids_dir_link, '-v', GIT_CONFIG+":/root/.gitconfig", '-v', output_dir_link, '--entrypoint', 'cubids-group', opts.container, '/bids', linked_output_prefix] if apply_config: cmd.insert(3, '-v') cmd.insert(4, input_config_dir_link) cmd += ['--config', linked_input_config] elif container_type == 'singularity': cmd = ['singularity', 'exec', '--cleanenv', '-B', bids_dir_link, '-B', output_dir_link, opts.container, 'cubids-group', '/bids', linked_output_prefix] if apply_config: cmd.insert(3, '-B') cmd.insert(4, input_config_dir_link) cmd += ['--config', linked_input_config] if opts.use_datalad: cmd.append("--use-datalad") if opts.acq_group_level: cmd.append("--acq-group-level") cmd.append(str(opts.acq_group_level)) print("RUNNING: " + ' '.join(cmd)) proc = subprocess.run(cmd) sys.exit(proc.returncode) def cubids_apply(): ''' Command Line Interface funciton for applying the csv changes.''' parser = argparse.ArgumentParser( description="cubids-apply: apply the changes specified in a csv " "to a BIDS directory", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('bids_dir', type=Path, action='store', help='the root of a BIDS dataset. It should contain ' 'sub-X directories and dataset_description.json') parser.add_argument('edited_summary_csv', type=Path, action='store', help='the _summary.csv that has been edited in the ' 'MergeInto and RenameKeyGroup columns.') parser.add_argument('files_csv', type=Path, action='store', help='the _files.csv that the _summary.csv ' 'corresponds to.') parser.add_argument('new_csv_prefix', type=Path, action='store', help='file prefix for writing the new _summary.csv, ' '_files.csv and _group.csv that have been edited.') parser.add_argument('--use-datalad', action='store_true', help='ensure that there are no untracked changes ' 'before finding groups') parser.add_argument('--container', action='store', help='Docker image tag or Singularity image file.') parser.add_argument('--acq-group-level', default='subject', action='store', help='Level at which acquisition groups are created ' 'options: "subject" or "session"') parser.add_argument('--config', action='store', type=Path, help='path to a config file for grouping') opts = parser.parse_args() # Run directly from python using if opts.container is None: bod = CuBIDS(data_root=str(opts.bids_dir), use_datalad=opts.use_datalad, acq_group_level=opts.acq_group_level, grouping_config=opts.config) if opts.use_datalad: if not bod.is_datalad_clean(): raise Exception("Untracked change in " + str(opts.bids_dir)) bod.apply_csv_changes(str(opts.edited_summary_csv), str(opts.files_csv), str(opts.new_csv_prefix), raise_on_error=False) sys.exit(0) # Run it through a container container_type = _get_container_type(opts.container) bids_dir_link = str(opts.bids_dir.absolute()) + ":/bids" input_summary_csv_dir_link = str( opts.edited_csv_prefix.parent.absolute()) + ":/in_summary_csv:ro" input_files_csv_dir_link = str( opts.edited_csv_prefix.parent.absolute()) + ":/in_files_csv:ro" output_csv_dir_link = str( opts.new_csv_prefix.parent.absolute()) + ":/out_csv:rw" # FROM BOND-GROUP apply_config = opts.config is not None if apply_config: input_config_dir_link = str( opts.config.parent.absolute()) + ":/in_config:ro" linked_input_config = "/in_config/" + opts.config.name linked_output_prefix = "/csv/" + opts.output_prefix.name #### linked_input_summary_csv = "/in_summary_csv/" \ + opts.edited_summary_csv.name linked_input_files_csv = "/in_files_csv/" + opts.files_csv.name linked_output_prefix = "/out_csv/" + opts.new_csv_prefix.name if container_type == 'docker': cmd = ['docker', 'run', '--rm', '-v', bids_dir_link, '-v', GIT_CONFIG+":/root/.gitconfig", '-v', input_summary_csv_dir_link, '-v', input_files_csv_dir_link, '-v', output_csv_dir_link, '--entrypoint', 'cubids-apply', opts.container, '/bids', linked_input_summary_csv, linked_input_files_csv, linked_output_prefix] if apply_config: cmd.insert(3, '-v') cmd.insert(4, input_config_dir_link) cmd += ['--config', linked_input_config] elif container_type == 'singularity': cmd = ['singularity', 'exec', '--cleanenv', '-B', bids_dir_link, '-B', input_summary_csv_dir_link, '-B', input_files_csv_dir_link, '-B', output_csv_dir_link, opts.container, 'cubids-apply', '/bids', linked_input_summary_csv, linked_input_files_csv, linked_output_prefix] if apply_config: cmd.insert(3, '-B') cmd.insert(4, input_config_dir_link) cmd += ['--config', linked_input_config] if opts.use_datalad: cmd.append("--use-datalad") if opts.acq_group_level: cmd.append("--acq-group-level") cmd.append(str(opts.acq_group_level)) print("RUNNING: " + ' '.join(cmd)) proc = subprocess.run(cmd) sys.exit(proc.returncode) def cubids_datalad_save(): ''' Command Line Interfcae function for performing datalad save.''' parser = argparse.ArgumentParser( description="cubids-datalad-save: perform a DataLad save on a BIDS " "directory", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('bids_dir', type=Path, action='store', help='the root of a BIDS dataset. It should contain ' 'sub-X directories and dataset_description.json') parser.add_argument('-m', action='store', help='message for this commit') parser.add_argument('--container', action='store', help='Docker image tag or Singularity image file.') opts = parser.parse_args() # Run directly from python using if opts.container is None: bod = CuBIDS(data_root=str(opts.bids_dir), use_datalad=True) bod.datalad_save(message=opts.m) sys.exit(0) # Run it through a container container_type = _get_container_type(opts.container) bids_dir_link = str(opts.bids_dir.absolute()) + ":/bids" if container_type == 'docker': cmd = ['docker', 'run', '--rm', '-v', bids_dir_link, '-v', GIT_CONFIG+":/root/.gitconfig", '--entrypoint', 'cubids-datalad-save', opts.container, '/bids', '-m', opts.m] elif container_type == 'singularity': cmd = ['singularity', 'exec', '--cleanenv', '-B', bids_dir_link, opts.container, 'cubids-datalad-save', '/bids', '-m', opts.m] print("RUNNING: " + ' '.join(cmd)) proc = subprocess.run(cmd) sys.exit(proc.returncode) def cubids_undo(): ''' Command Line Interface function for reverting a commit.''' parser = argparse.ArgumentParser( description="cubids-undo: revert most recent commit", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('bids_dir', type=Path, action='store', help='the root of a BIDS dataset. It should contain ' 'sub-X directories and dataset_description.json') parser.add_argument('--container', action='store', help='Docker image tag or Singularity image file.') opts = parser.parse_args() # Run directly from python using if opts.container is None: bod = CuBIDS(data_root=str(opts.bids_dir), use_datalad=True) bod.datalad_undo_last_commit() sys.exit(0) # Run it through a container container_type = _get_container_type(opts.container) bids_dir_link = str(opts.bids_dir.absolute()) + ":/bids" if container_type == 'docker': cmd = ['docker', 'run', '--rm', '-v', bids_dir_link, '-v', GIT_CONFIG+":/root/.gitconfig", '--entrypoint', 'cubids-undo', opts.container, '/bids'] elif container_type == 'singularity': cmd = ['singularity', 'exec', '--cleanenv', '-B', bids_dir_link, opts.container, 'cubids-undo', '/bids'] print("RUNNING: " + ' '.join(cmd)) proc = subprocess.run(cmd) sys.exit(proc.returncode) def cubids_copy_exemplars(): ''' Command Line Interface function for purging scan associations.''' parser = argparse.ArgumentParser( description="cubids-copy-exemplars: create and save a directory with " " one subject from each Acquisition Group in the BIDS dataset", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('bids_dir', type=Path, action='store', help='absolute path to the root of a BIDS dataset. ' 'It should contain sub-X directories and ' 'dataset_description.json.') parser.add_argument('exemplars_dir', type=Path, action='store', help='absolute path to the root of a BIDS dataset ' 'containing one subject from each Acquisition Group. ' 'It should contain sub-X directories and ' 'dataset_description.json.') parser.add_argument('exemplars_csv', type=Path, action='store', help='absolute path to the .csv file that lists one ' 'subject from each Acqusition Group ' '(*_AcqGrouping.csv from the cubids-group output)') parser.add_argument('--use-datalad', action='store_true', help='ensure that there are no untracked changes ' 'before finding groups') parser.add_argument('--min-group-size', action='store', default=1, help='minimum number of subjects an Acquisition Group ' 'must have in order to be included in the exemplar ' 'dataset ', required=False) # parser.add_argument('--include-groups', # action='store', # nargs='+', # default=[], # help='only include an exemplar subject from these ' # 'listed Acquisition Groups in the exemplar dataset ', # required=False) parser.add_argument('--force-unlock', action='store_true', default=False, help='unlock exemplar subjects before copying ', required=False) parser.add_argument('--container', action='store', help='Docker image tag or Singularity image file.') opts = parser.parse_args() # Run directly from python using if opts.container is None: bod = CuBIDS(data_root=str(opts.bids_dir), use_datalad=opts.use_datalad) if opts.use_datalad: if not bod.is_datalad_clean(): raise Exception("Untracked changes. Need to save " + str(opts.bids_dir) + " before coyping exemplars") bod.copy_exemplars(str(opts.exemplars_dir), str(opts.exemplars_csv), min_group_size=opts.min_group_size, force_unlock=opts.force_unlock, raise_on_error=True) sys.exit(0) # Run it through a container container_type = _get_container_type(opts.container) bids_dir_link = str(opts.bids_dir.absolute()) + ":/bids:ro" exemplars_dir_link = str(opts.exemplars_dir.absolute()) + ":/exemplars:ro" exemplars_csv_link = str(opts.exemplars_csv.absolute()) + ":/in_csv:ro" if container_type == 'docker': cmd = ['docker', 'run', '--rm', '-v', bids_dir_link, '-v', exemplars_dir_link, '-v', GIT_CONFIG+":/root/.gitconfig", '-v', exemplars_csv_link, '--entrypoint', 'cubids-copy-exemplars', opts.container, '/bids', '/exemplars', '/in_csv'] if opts.force_unlock: cmd.append('--force-unlock') if opts.min_group_size: cmd.append('--min-group-size') elif container_type == 'singularity': cmd = ['singularity', 'exec', '--cleanenv', '-B', bids_dir_link, '-B', exemplars_dir_link, '-B', exemplars_csv_link, opts.container, 'cubids-copy-exemplars', '/bids', '/exemplars', '/in_csv'] if opts.force_unlock: cmd.append('--force-unlock') if opts.min_group_size: cmd.append('--min-group-size') print("RUNNING: " + ' '.join(cmd)) proc = subprocess.run(cmd) sys.exit(proc.returncode) def cubids_add_nifti_info(): ''' Command Line Interface function for purging scan associations.''' parser = argparse.ArgumentParser( description="cubids-add-nifti-info: Add information from nifti" "files to the sidecars of each dataset", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('bids_dir', type=Path, action='store', help='absolute path to the root of a BIDS dataset. ' 'It should contain sub-X directories and ' 'dataset_description.json.') parser.add_argument('--use-datalad', action='store_true', help='ensure that there are no untracked changes ' 'before finding groups') parser.add_argument('--force-unlock', action='store_true', default=False, help='unlock dataset before adding nift info ', required=False) parser.add_argument('--container', action='store', help='Docker image tag or Singularity image file.') opts = parser.parse_args() # Run directly from python using if opts.container is None: bod = CuBIDS(data_root=str(opts.bids_dir), use_datalad=opts.use_datalad) if opts.use_datalad: if bod.is_datalad_clean() and not opts.force_unlock: raise Exception("Need to unlock " + str(opts.bids_dir)) bod.add_nifti_info(force_unlock=opts.force_unlock, raise_on_error=True) sys.exit(0) # Run it through a container container_type = _get_container_type(opts.container) bids_dir_link = str(opts.bids_dir.absolute()) + ":/bids:ro" if container_type == 'docker': cmd = ['docker', 'run', '--rm', '-v', bids_dir_link, '-v', GIT_CONFIG+":/root/.gitconfig", '--entrypoint', 'cubids-add-nifti-info', opts.container, '/bids'] if opts.force_unlock: cmd.append('--force-unlock') elif container_type == 'singularity': cmd = ['singularity', 'exec', '--cleanenv', '-B', bids_dir_link, opts.container, 'cubids-add-nifti-info', '/bids'] if opts.force_unlock: cmd.append('--force-unlock') print("RUNNING: " + ' '.join(cmd)) proc = subprocess.run(cmd) sys.exit(proc.returncode) def cubids_purge(): ''' Command Line Interface function for purging scan associations.''' parser = argparse.ArgumentParser( description="cubids-purge: purge associations from the dataset", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('bids_dir', type=Path, action='store', help='absolute path to the root of a BIDS dataset. ' 'It should contain sub-X directories and ' 'dataset_description.json.') parser.add_argument('scans', type=Path, action='store', help='absolute path to the txt file of scans whose ' 'associations should be purged.') parser.add_argument('--use-datalad', action='store_true', help='ensure that there are no untracked changes ' 'before finding groups') parser.add_argument('--container', action='store', help='Docker image tag or Singularity image file.') opts = parser.parse_args() # Run directly from python using if opts.container is None: bod = CuBIDS(data_root=str(opts.bids_dir), use_datalad=opts.use_datalad) if opts.use_datalad: if not bod.is_datalad_clean(): raise Exception("Untracked change in " + str(opts.bids_dir)) bod.purge(str(opts.scans), raise_on_error=False) sys.exit(0) # Run it through a container container_type = _get_container_type(opts.container) bids_dir_link = str(opts.bids_dir.absolute()) + ":/bids" input_scans_link = str( opts.scans.parent.absolute()) + ":/in_scans:ro" if container_type == 'docker': cmd = ['docker', 'run', '--rm', '-v', bids_dir_link, '-v', GIT_CONFIG+":/root/.gitconfig", '-v', input_scans_link, '--entrypoint', 'cubids-purge', opts.container, '/bids', input_scans_link] elif container_type == 'singularity': cmd = ['singularity', 'exec', '--cleanenv', '-B', bids_dir_link, '-B', input_scans_link, opts.container, 'cubids-purge', '/bids', input_scans_link] print("RUNNING: " + ' '.join(cmd)) if opts.use_datalad: cmd.append("--use-datalad") proc = subprocess.run(cmd) sys.exit(proc.returncode) def cubids_remove_metadata_fields(): ''' Command Line Interface function for deteling fields from metadata.''' parser = argparse.ArgumentParser( description="cubids-remove-metadata-fields: delete fields from " "metadata", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('bids_dir', type=Path, action='store', help='the root of a BIDS dataset. It should contain ' 'sub-X directories and dataset_description.json') parser.add_argument('--fields', nargs='+', action='store', default=[], help='space-separated list of metadata fields to ' 'remove.') parser.add_argument('--container', action='store', help='Docker image tag or Singularity image file.') opts = parser.parse_args() # Run directly from python if opts.container is None: bod = CuBIDS(data_root=str(opts.bids_dir), use_datalad=False) bod.remove_metadata_fields(opts.fields) sys.exit(0) # Run it through a container container_type = _get_container_type(opts.container) bids_dir_link = str(opts.bids_dir.absolute()) + ":/bids:rw" if container_type == 'docker': cmd = ['docker', 'run', '--rm', '-v', bids_dir_link, '--entrypoint', 'cubids-remove-metadata-fields', opts.container, '/bids', '--fields'] + opts.fields elif container_type == 'singularity': cmd = ['singularity', 'exec', '--cleanenv', '-B', bids_dir_link, opts.container, 'cubids-remove-metadata-fields', '/bids', '--fields'] + opts.fields print("RUNNING: " + ' '.join(cmd)) proc = subprocess.run(cmd) sys.exit(proc.returncode) def cubids_print_metadata_fields(): '''Command Line Interface function that prints unique metadata fields.''' parser = argparse.ArgumentParser( description="cubids-print-metadata-fields: print all unique " "metadata fields", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('bids_dir', type=Path, action='store', help='the root of a BIDS dataset. It should contain ' 'sub-X directories and dataset_description.json') parser.add_argument('--container', action='store', help='Docker image tag or Singularity image file.') opts = parser.parse_args() # Run directly from python if opts.container is None: bod = CuBIDS(data_root=str(opts.bids_dir), use_datalad=False) fields = bod.get_all_metadata_fields() print("\n".join(fields)) sys.exit(0) # Run it through a container container_type = _get_container_type(opts.container) bids_dir_link = str(opts.bids_dir.absolute()) + ":/bids:ro" if container_type == 'docker': cmd = ['docker', 'run', '--rm', '-v', bids_dir_link, '--entrypoint', 'cubids-print-metadata-fields', opts.container, '/bids'] elif container_type == 'singularity': cmd = ['singularity', 'exec', '--cleanenv', '-B', bids_dir_link, opts.container, 'cubids-print-metadata-fields', '/bids'] print("RUNNING: " + ' '.join(cmd)) proc = subprocess.run(cmd) sys.exit(proc.returncode) def _get_container_type(image_name): '''Gets and returns the container type.''' # If it's a file on disk, it must be a singularity image if Path(image_name).exists(): return "singularity" # It needs to match a docker tag pattern to be docker if re.match(r"(?:.+\/)?([^:]+)(?::.+)?", image_name): return "docker" raise Exception("Unable to determine the container type of " + image_name)
py
1a45e118eeb6e0db021e246e2d62de536a97e47c
# -*- coding: utf8 -*- from __future__ import unicode_literals, print_function from pytest import raises from lnc.lib.exceptions import ProgramError from lnc.lib.toc import read_toc, generate_toc, escape CORRECT_TOC_TEXT = """utf8 1 Chapter 1 * 2 Page 2 * 2 Subchapter 1 ** 3 Some other caption * 4 Subchapter 2 ** 5 Page 5 10 Chapter 2 * 12 Another page 15 Not a chapter 100 List of figures * 1 Figure 1 * 10 Figure 2""" CORRECT_TOC_PARSED = [ [1, "Chapter 1", [2, "Page 2"], [2, "Subchapter 1", [3, "Some other caption"]], [4, "Subchapter 2", [5, "Page 5"]]], [10, "Chapter 2", [12, "Another page"]], [15, "Not a chapter"], [100, "List of figures", [1, "Figure 1"], [10, "Figure 2"]]] def test_read_toc_correct(tmpdir): test_toc = tmpdir.join("test_toc.txt") test_toc.write(CORRECT_TOC_TEXT) assert read_toc(test_toc.open()) == CORRECT_TOC_PARSED def check_read_toc_fails(tmpdir, contents): test_toc = tmpdir.join("test_toc.txt") test_toc.write(contents) raises(ProgramError, read_toc, test_toc.open()) def test_read_toc_wrong_encoding(tmpdir): check_read_toc_fails(tmpdir, "utff8\n1 Page 1") _TOC_WRONG_LEVELS = """utf8 1 Chapter 1 ** 2 Page 2""" def test_read_toc_wrong_levels(tmpdir): check_read_toc_fails(tmpdir, _TOC_WRONG_LEVELS) def test_read_toc_malformed_line(tmpdir): check_read_toc_fails(tmpdir, "utf8\n1.5 abc\n") check_read_toc_fails(tmpdir, "utf8\n1 abc\n* - a\n") check_read_toc_fails(tmpdir, "utf8\n-1 abc\n") def test_read_toc_nonlatin(tmpdir): test_toc = tmpdir.join("test_toc.txt") test_toc.write_text("utf8\n1 Оглавление\n", encoding="utf8") assert read_toc(test_toc.open()) == [[1, "Оглавление"]] def _write_entry(f, level, entry): print("(%d %d %s " % (level, entry[0], entry[1]), file=f, end="") for e in entry[2:]: _write_entry(f, level + 1, e) print(")", file=f, end="") _TOC_FOR_PRINT_TEST = """utf8 1 P 1 * 2 Ch 1.1 ** 3 Pg 3 * 4 Ch 1.2 ** 4 P 4 5 P 5 """ def test_print_toc(tmpdir): test_toc = tmpdir.join("test_toc.txt") test_toc.write(_TOC_FOR_PRINT_TEST) test_output = tmpdir.join("test_output.txt") generate_toc(str(test_toc), str(test_output), _write_entry, "[", "]") res = "[\n(0 1 P 1 (1 2 Ch 1.1 (2 3 Pg 3 ))(1 4 Ch 1.2 (2 4 P 4 )))(0 5 P 5 )]\n" assert test_output.check() assert test_output.read() == res def test_escape(): assert escape("123", "\"'") == "123" assert escape("1\"2'3" , "\"'") == "1\\\"2\\'3" assert escape("\\12", "") == "\\12" assert escape("\\12", "\\") == "\\\\12"
py
1a45e18f19f15843c8baaff593327c30a7711244
from datasource.data_orchestrator import DataOrchestrator from datasource.factors.factors_processor import FactorsProcessor from logic.embeddings.spacy_embedder import SpacyEmbedder from logic.reduction.umap_reducer import UmapReducer from logic.clustering.hdbscan_clusterer import HDBScanClusterer from logic.ml_model_dao.ml_model_docker_volume_dao import MLModelDockerVolumeDAO from elastic.elastic_indices import get_factor_recommendation_index_id class FactorsOrchestrator(DataOrchestrator): UMAP_REDUCER_MODEL_NAME = 'umap-reducer' HDBSCAN_CLUSTERER_MODEL_NAME = 'hdbscan-clusterer' def __init__(self, data_source, es_host, kb_index, use_saved_reducer=False, use_saved_clusterer=False): self.data_source = data_source self.load_reducer = use_saved_reducer self.load_clusterer = use_saved_clusterer self.ml_model_dao = MLModelDockerVolumeDAO(es_host, kb_index) self.curation_index_id = get_factor_recommendation_index_id(kb_index) def orchestrate(self): if self.load_reducer: reducer = self.load_model(self.UMAP_REDUCER_MODEL_NAME) else: reducer = UmapReducer(300, 2, 0.01, 15) if self.load_clusterer: clusterer = self.load_model(self.HDBSCAN_CLUSTERER_MODEL_NAME) else: clusterer = HDBScanClusterer(2, 15, 8, 0.01) embedder = SpacyEmbedder(normalize=True) processor = FactorsProcessor(self.data_source, reducer, clusterer, embedder) data = processor.process() if not self.load_reducer: self.save_model(reducer, self.UMAP_REDUCER_MODEL_NAME) if not self.load_clusterer: self.save_model(clusterer, self.HDBSCAN_CLUSTERER_MODEL_NAME) return data def load_model(self, name): return self.ml_model_dao.load(name, self.curation_index_id) def save_model(self, data, model_name): self.ml_model_dao.save(data, model_name, self.curation_index_id)
py
1a45e1b3bd6edd47fdd70de8df4e11cd39b6fd59
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data def model(mnist, epoches=1000, batch_size=100, learning_rate=0.003): print("Start model") with tf.name_scope('X'): X = tf.placeholder(tf.float32, [None, 784], name='X') x_image = tf.reshape(X, [-1, 28, 28, 1]) with tf.name_scope('weights'): W = tf.Variable(tf.zeros([784, 10]), name='weights') with tf.name_scope('biases'): b = tf.Variable(tf.zeros([10]), name='biases') with tf.name_scope('Wx_plus_b'): # Модель Y = X.W + b Y = tf.nn.softmax(tf.matmul(tf.reshape(X, [-1, 784]), W) + b, name='labels') # Подстановка для корректных значений входных данных with tf.name_scope('Y_'): Y_ = tf.placeholder(tf.float32, [None, 10]) with tf.name_scope('xentropy'): # Функция потерь H = Sum(Y_ * log(Y)) cross_entropy = -tf.reduce_sum(Y_ * tf.log(Y)) with tf.name_scope('accuracy'): with tf.name_scope('correct_prediction'): # Доля верных ответов найденных в наборе is_correct = tf.equal(tf.argmax(Y,1), tf.argmax(Y_,1)) with tf.name_scope('xentropy_mean'): accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32)) with tf.name_scope('train'): # Оптимизируем функцию потерь меотодом градиентного спуска # 0.003 - это шаг градиента, гиперпараметр optimizer = tf.train.GradientDescentOptimizer(learning_rate) # Минимизируем потери train_step = optimizer.minimize(cross_entropy) tf.summary.image('input', x_image, 10) tf.summary.histogram('weights', W) tf.summary.histogram('biases', b) tf.summary.scalar('accuracy', accuracy) tf.summary.scalar('cross_entropy', cross_entropy) with tf.Session() as sess: merged = tf.summary.merge_all() # Merge all the summaries and write them out to writer = tf.summary.FileWriter("/tmp/tensorflow/one_layer_nn", sess.graph) tf.global_variables_initializer().run() for i in range(epoches): # загружаем набор изображений и меток классов batch_X, batch_Y = mnist.train.next_batch(batch_size) train_data={X: batch_X, Y_: batch_Y} # train sess.run(train_step, feed_dict=train_data) if i % 10 == 0: test_data={X: mnist.test.images, Y_: mnist.test.labels} summary, a = sess.run([merged, accuracy], feed_dict=test_data) writer.add_summary(summary, i) if i % 200 == 0: print("Test: {}".format(a)) writer.close() def main(): print("MNIST single layer NN") mnist = input_data.read_data_sets("MNIST_data/", one_hot=True, reshape=True) tf.set_random_seed(0) tf.reset_default_graph() model(mnist, epoches=10000) if __name__ == '__main__': main()
py
1a45e37ca2660efee0a3c7ffb939d1947012b7a8
""" NCL_station_3.py ================ This script illustrates the following concepts: - Drawing station numbers on a map, and removing ones that overlap - Attaching lots of text strings to a map - Using Cartopy's GeoAxes.gridlines as a workaround to adding tick labels on Axes with Mercator (or another) map projection See following URLs to see the reproduced NCL plot & script: - Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/station_3.ncl - Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/station_3_1_lg.png and https://www.ncl.ucar.edu/Applications/Images/station_3_2_lg.png """ ################################################### # Import packages: import numpy as np import pandas as pd import cartopy.crs as ccrs import cartopy.feature as cfeature from matplotlib import pyplot as plt import matplotlib.ticker as mticker import geocat.datafiles as gdf ################################################### # Read in data: # Open a ascii data file using pandas read_csv and assigning column names ds = pd.read_csv( gdf.get('ascii_files/istasyontablosu_son.txt'), delimiter='\\s+', names=['index', 'station', 'year1', 'year2', 'number', 'lat', 'lon']) # Get number of stations npts = len(ds) # Extract variables no = ds.index + 1 # +1 because Pandas' RangeIndex defaults start with 0 lat = ds.lat lon = ds.lon ############################################################################## # Helper function to add plot elements to the axes def create_axes(maintitle): # Generate figure (set its size (width, height) in inches) fig = plt.figure(figsize=(12, 6.5)) # Generate axes ax = plt.axes(projection=ccrs.Mercator()) # Set extent to show particular area of the map ax.set_extent([25.5, 45.2, 35.5, 42.5], ccrs.PlateCarree()) # Add state boundaries other lake features ax.add_feature(cfeature.LAND, facecolor='none', edgecolor='gray') # Draw gridlines gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, dms=False, x_inline=False, y_inline=False, linewidth=1, color="gray", alpha=0.25) # Set frequency of gridlines in the x and y directions gl.xlocator = mticker.FixedLocator(np.arange(26, 45, 2)) gl.ylocator = mticker.FixedLocator(np.arange(36, 43, 1)) # Turn top/right labels gl.top_labels = False gl.right_labels = False # Set label sizes gl.xlabel_style = {"rotation": 0, "size": 14} gl.ylabel_style = {"rotation": 0, "size": 14} # Manually turn off ticks on top and right spines ax.tick_params(axis='x', top=False) ax.tick_params(axis='y', right=False) # Add title ax.set_title(maintitle, fontweight='bold', fontsize=18, y=1.03) return fig, ax ############################################################################## # Plot with texts overlapping fig, ax = create_axes('Overlapping text strings') # Add all station number texts for i in range(npts): ax.text(lon[i], lat[i], no[i], fontsize=8, fontweight='bold', va='center', ha='center', transform=ccrs.PlateCarree()) # Show the plot plt.tight_layout() plt.show() ############################################################################## # Plot without texts overlapping fig, ax = create_axes('Overlapping text strings removed') # Transpose the array of longitude and latitude for easier access of the location of each station point location = np.transpose(np.array([lon, lat])) # Create an array of booleans denoting if station would be removed remove = np.full(npts, False) # Currently minimum distance is calculated through finding distance between two suitable stations # In the future we would like to find mindist by finding the width and height of texts in pixel coordinates mindist = np.sqrt(np.sum(np.square(location[123] - location[124]))) # Tag station to be removed using array `remove` # Loop through every pair of stations and calculate distances between them for i in range(npts): for j in range(npts): # Calculate euclidean distance with numpy functions dist = np.sqrt(np.sum(np.square(location[j] - location[i]))) if dist <= mindist and i != j and not remove[j]: # Tag one of the stations to be removed if distance between them allows for overlap, # they are different stations, and if the other station will not be removed remove[i] = True # Add text if it is not tagged to be removed for i in range(npts): if not remove[i]: ax.text(lon[i], lat[i], no[i], fontsize=8, fontweight='bold', va='center', ha='center', transform=ccrs.PlateCarree()) # Show the plot plt.tight_layout() plt.show()
py
1a45e433c0b47abe7e1d3def5c4338d6666518c1
# Copyright (c) 2014-present PlatformIO <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import os import re import sys from imp import load_source from os.path import basename, dirname, isdir, isfile, join import click import semantic_version from platformio import __version__, app, exception, util from platformio.compat import PY2, hashlib_encode_data, is_bytes from platformio.managers.core import get_core_package_dir from platformio.managers.package import BasePkgManager, PackageManager from platformio.proc import (BuildAsyncPipe, copy_pythonpath_to_osenv, exec_command, get_pythonexe_path) from platformio.project.config import ProjectConfig from platformio.project.helpers import (get_project_boards_dir, get_project_core_dir, get_project_packages_dir, get_project_platforms_dir) try: from urllib.parse import quote except ImportError: from urllib import quote class PlatformManager(BasePkgManager): def __init__(self, package_dir=None, repositories=None): if not repositories: repositories = [ "https://dl.bintray.com/platformio/dl-platforms/manifest.json", "{0}://dl.platformio.org/platforms/manifest.json".format( "https" if app.get_setting("enable_ssl") else "http") ] BasePkgManager.__init__(self, package_dir or get_project_platforms_dir(), repositories) @property def manifest_names(self): return ["platform.json"] def get_manifest_path(self, pkg_dir): if not isdir(pkg_dir): return None for name in self.manifest_names: manifest_path = join(pkg_dir, name) if isfile(manifest_path): return manifest_path return None def install(self, name, requirements=None, with_packages=None, without_packages=None, skip_default_package=False, after_update=False, silent=False, force=False, **_): # pylint: disable=too-many-arguments, arguments-differ platform_dir = BasePkgManager.install(self, name, requirements, silent=silent, force=force) p = PlatformFactory.newPlatform(platform_dir) # don't cleanup packages or install them after update # we check packages for updates in def update() if after_update: return True p.install_packages(with_packages, without_packages, skip_default_package, silent=silent, force=force) return self.cleanup_packages(list(p.packages)) def uninstall(self, package, requirements=None, after_update=False): if isdir(package): pkg_dir = package else: name, requirements, url = self.parse_pkg_uri(package, requirements) pkg_dir = self.get_package_dir(name, requirements, url) if not pkg_dir: raise exception.UnknownPlatform(package) p = PlatformFactory.newPlatform(pkg_dir) BasePkgManager.uninstall(self, pkg_dir, requirements) # don't cleanup packages or install them after update # we check packages for updates in def update() if after_update: return True return self.cleanup_packages(list(p.packages)) def update( # pylint: disable=arguments-differ self, package, requirements=None, only_check=False, only_packages=False): if isdir(package): pkg_dir = package else: name, requirements, url = self.parse_pkg_uri(package, requirements) pkg_dir = self.get_package_dir(name, requirements, url) if not pkg_dir: raise exception.UnknownPlatform(package) p = PlatformFactory.newPlatform(pkg_dir) pkgs_before = list(p.get_installed_packages()) missed_pkgs = set() if not only_packages: BasePkgManager.update(self, pkg_dir, requirements, only_check) p = PlatformFactory.newPlatform(pkg_dir) missed_pkgs = set(pkgs_before) & set(p.packages) missed_pkgs -= set(p.get_installed_packages()) p.update_packages(only_check) self.cleanup_packages(list(p.packages)) if missed_pkgs: p.install_packages(with_packages=list(missed_pkgs), skip_default_package=True) return True def cleanup_packages(self, names): self.cache_reset() deppkgs = {} for manifest in PlatformManager().get_installed(): p = PlatformFactory.newPlatform(manifest['__pkg_dir']) for pkgname, pkgmanifest in p.get_installed_packages().items(): if pkgname not in deppkgs: deppkgs[pkgname] = set() deppkgs[pkgname].add(pkgmanifest['version']) pm = PackageManager(get_project_packages_dir()) for manifest in pm.get_installed(): if manifest['name'] not in names: continue if (manifest['name'] not in deppkgs or manifest['version'] not in deppkgs[manifest['name']]): try: pm.uninstall(manifest['__pkg_dir'], after_update=True) except exception.UnknownPackage: pass self.cache_reset() return True @util.memoized(expire="5s") def get_installed_boards(self): boards = [] for manifest in self.get_installed(): p = PlatformFactory.newPlatform(manifest['__pkg_dir']) for config in p.get_boards().values(): board = config.get_brief_data() if board not in boards: boards.append(board) return boards @staticmethod def get_registered_boards(): return util.get_api_result("/boards", cache_valid="7d") def get_all_boards(self): boards = self.get_installed_boards() know_boards = ["%s:%s" % (b['platform'], b['id']) for b in boards] try: for board in self.get_registered_boards(): key = "%s:%s" % (board['platform'], board['id']) if key not in know_boards: boards.append(board) except (exception.APIRequestError, exception.InternetIsOffline): pass return sorted(boards, key=lambda b: b['name']) def board_config(self, id_, platform=None): for manifest in self.get_installed_boards(): if manifest['id'] == id_ and (not platform or manifest['platform'] == platform): return manifest for manifest in self.get_registered_boards(): if manifest['id'] == id_ and (not platform or manifest['platform'] == platform): return manifest raise exception.UnknownBoard(id_) class PlatformFactory(object): @staticmethod def get_clsname(name): name = re.sub(r"[^\da-z\_]+", "", name, flags=re.I) return "%s%sPlatform" % (name.upper()[0], name.lower()[1:]) @staticmethod def load_module(name, path): module = None try: module = load_source("platformio.managers.platform.%s" % name, path) except ImportError: raise exception.UnknownPlatform(name) return module @classmethod def newPlatform(cls, name, requirements=None): pm = PlatformManager() platform_dir = None if isdir(name): platform_dir = name name = pm.load_manifest(platform_dir)['name'] elif name.endswith("platform.json") and isfile(name): platform_dir = dirname(name) name = util.load_json(name)['name'] else: name, requirements, url = pm.parse_pkg_uri(name, requirements) platform_dir = pm.get_package_dir(name, requirements, url) if platform_dir: name = pm.load_manifest(platform_dir)['name'] if not platform_dir: raise exception.UnknownPlatform( name if not requirements else "%s@%s" % (name, requirements)) platform_cls = None if isfile(join(platform_dir, "platform.py")): platform_cls = getattr( cls.load_module(name, join(platform_dir, "platform.py")), cls.get_clsname(name)) else: platform_cls = type(str(cls.get_clsname(name)), (PlatformBase, ), {}) _instance = platform_cls(join(platform_dir, "platform.json")) assert isinstance(_instance, PlatformBase) return _instance class PlatformPackagesMixin(object): def install_packages( # pylint: disable=too-many-arguments self, with_packages=None, without_packages=None, skip_default_package=False, silent=False, force=False): with_packages = set(self.find_pkg_names(with_packages or [])) without_packages = set(self.find_pkg_names(without_packages or [])) upkgs = with_packages | without_packages ppkgs = set(self.packages) if not upkgs.issubset(ppkgs): raise exception.UnknownPackage(", ".join(upkgs - ppkgs)) for name, opts in self.packages.items(): version = opts.get("version", "") if name in without_packages: continue elif (name in with_packages or not (skip_default_package or opts.get("optional", False))): if ":" in version: self.pm.install("%s=%s" % (name, version), silent=silent, force=force) else: self.pm.install(name, version, silent=silent, force=force) return True def find_pkg_names(self, candidates): result = [] for candidate in candidates: found = False # lookup by package types for _name, _opts in self.packages.items(): if _opts.get("type") == candidate: result.append(_name) found = True if (self.frameworks and candidate.startswith("framework-") and candidate[10:] in self.frameworks): result.append(self.frameworks[candidate[10:]]['package']) found = True if not found: result.append(candidate) return result def update_packages(self, only_check=False): for name, manifest in self.get_installed_packages().items(): requirements = self.packages[name].get("version", "") if ":" in requirements: _, requirements, __ = self.pm.parse_pkg_uri(requirements) self.pm.update(manifest['__pkg_dir'], requirements, only_check) def get_installed_packages(self): items = {} for name in self.packages: pkg_dir = self.get_package_dir(name) if pkg_dir: items[name] = self.pm.load_manifest(pkg_dir) return items def are_outdated_packages(self): for name, manifest in self.get_installed_packages().items(): requirements = self.packages[name].get("version", "") if ":" in requirements: _, requirements, __ = self.pm.parse_pkg_uri(requirements) if self.pm.outdated(manifest['__pkg_dir'], requirements): return True return False def get_package_dir(self, name): version = self.packages[name].get("version", "") if ":" in version: return self.pm.get_package_dir( *self.pm.parse_pkg_uri("%s=%s" % (name, version))) return self.pm.get_package_dir(name, version) def get_package_version(self, name): pkg_dir = self.get_package_dir(name) if not pkg_dir: return None return self.pm.load_manifest(pkg_dir).get("version") class PlatformRunMixin(object): LINE_ERROR_RE = re.compile(r"(^|\s+)error:?\s+", re.I) @staticmethod def encode_scons_arg(value): data = base64.urlsafe_b64encode(hashlib_encode_data(value)) return data.decode() if is_bytes(data) else data @staticmethod def decode_scons_arg(data): value = base64.urlsafe_b64decode(data) return value.decode() if is_bytes(value) else value def run( # pylint: disable=too-many-arguments self, variables, targets, silent, verbose, jobs): assert isinstance(variables, dict) assert isinstance(targets, list) config = ProjectConfig.get_instance(variables['project_config']) options = config.items(env=variables['pioenv'], as_dict=True) if "framework" in options: # support PIO Core 3.0 dev/platforms options['pioframework'] = options['framework'] self.configure_default_packages(options, targets) self.install_packages(silent=True) self.silent = silent self.verbose = verbose or app.get_setting("force_verbose") if "clean" in targets: targets = ["-c", "."] variables['platform_manifest'] = self.manifest_path if "build_script" not in variables: variables['build_script'] = self.get_build_script() if not isfile(variables['build_script']): raise exception.BuildScriptNotFound(variables['build_script']) result = self._run_scons(variables, targets, jobs) assert "returncode" in result return result def _run_scons(self, variables, targets, jobs): args = [ get_pythonexe_path(), join(get_core_package_dir("tool-scons"), "script", "scons"), "-Q", "--warn=no-no-parallel-support", "--jobs", str(jobs), "--sconstruct", join(util.get_source_dir(), "builder", "main.py") ] # yapf: disable args.append("PIOVERBOSE=%d" % (1 if self.verbose else 0)) # pylint: disable=protected-access args.append("ISATTY=%d" % (1 if click._compat.isatty(sys.stdout) else 0)) args += targets # encode and append variables for key, value in variables.items(): args.append("%s=%s" % (key.upper(), self.encode_scons_arg(value))) def _write_and_flush(stream, data): try: stream.write(data) stream.flush() except IOError: pass copy_pythonpath_to_osenv() result = exec_command( args, stdout=BuildAsyncPipe( line_callback=self._on_stdout_line, data_callback=lambda data: _write_and_flush(sys.stdout, data)), stderr=BuildAsyncPipe( line_callback=self._on_stderr_line, data_callback=lambda data: _write_and_flush(sys.stderr, data))) return result def _on_stdout_line(self, line): if "`buildprog' is up to date." in line: return self._echo_line(line, level=1) def _on_stderr_line(self, line): is_error = self.LINE_ERROR_RE.search(line) is not None self._echo_line(line, level=3 if is_error else 2) a_pos = line.find("fatal error:") b_pos = line.rfind(": No such file or directory") if a_pos == -1 or b_pos == -1: return self._echo_missed_dependency(line[a_pos + 12:b_pos].strip()) def _echo_line(self, line, level): if line.startswith("scons: "): line = line[7:] assert 1 <= level <= 3 if self.silent and (level < 2 or not line): return fg = (None, "yellow", "red")[level - 1] if level == 1 and "is up to date" in line: fg = "green" click.secho(line, fg=fg, err=level > 1, nl=False) @staticmethod def _echo_missed_dependency(filename): if "/" in filename or not filename.endswith((".h", ".hpp")): return banner = """ {dots} * Looking for {filename_styled} dependency? Check our library registry! * * CLI > platformio lib search "header:{filename}" * Web > {link} * {dots} """.format(filename=filename, filename_styled=click.style(filename, fg="cyan"), link=click.style( "https://platformio.org/lib/search?query=header:%s" % quote(filename, safe=""), fg="blue"), dots="*" * (56 + len(filename))) click.echo(banner, err=True) class PlatformBase( # pylint: disable=too-many-public-methods PlatformPackagesMixin, PlatformRunMixin): PIO_VERSION = semantic_version.Version(util.pepver_to_semver(__version__)) _BOARDS_CACHE = {} def __init__(self, manifest_path): self.manifest_path = manifest_path self.silent = False self.verbose = False self._BOARDS_CACHE = {} self._manifest = util.load_json(manifest_path) self._custom_packages = None self.pm = PackageManager(get_project_packages_dir(), self.package_repositories) # if self.engines and "platformio" in self.engines: # if self.PIO_VERSION not in semantic_version.Spec( # self.engines['platformio']): # raise exception.IncompatiblePlatform(self.name, # str(self.PIO_VERSION)) @property def name(self): return self._manifest['name'] @property def title(self): return self._manifest['title'] @property def description(self): return self._manifest['description'] @property def version(self): return self._manifest['version'] @property def homepage(self): return self._manifest.get("homepage") @property def vendor_url(self): return self._manifest.get("url") @property def docs_url(self): return self._manifest.get("docs") @property def repository_url(self): return self._manifest.get("repository", {}).get("url") @property def license(self): return self._manifest.get("license") @property def frameworks(self): return self._manifest.get("frameworks") @property def engines(self): return self._manifest.get("engines") @property def package_repositories(self): return self._manifest.get("packageRepositories") @property def manifest(self): return self._manifest @property def packages(self): packages = self._manifest.get("packages", {}) for item in (self._custom_packages or []): name = item version = "*" if "@" in item: name, version = item.split("@", 2) name = name.strip() if name not in packages: packages[name] = {} packages[name].update({ "version": version.strip(), "optional": False }) return packages def get_dir(self): return dirname(self.manifest_path) def get_build_script(self): main_script = join(self.get_dir(), "builder", "main.py") if isfile(main_script): return main_script raise NotImplementedError() def is_embedded(self): for opts in self.packages.values(): if opts.get("type") == "uploader": return True return False def get_boards(self, id_=None): def _append_board(board_id, manifest_path): config = PlatformBoardConfig(manifest_path) if "platform" in config and config.get("platform") != self.name: return if "platforms" in config \ and self.name not in config.get("platforms"): return config.manifest['platform'] = self.name self._BOARDS_CACHE[board_id] = config bdirs = [ get_project_boards_dir(), join(get_project_core_dir(), "boards"), join(self.get_dir(), "boards"), ] if id_ is None: for boards_dir in bdirs: if not isdir(boards_dir): continue for item in sorted(os.listdir(boards_dir)): _id = item[:-5] if not item.endswith(".json") or _id in self._BOARDS_CACHE: continue _append_board(_id, join(boards_dir, item)) else: if id_ not in self._BOARDS_CACHE: for boards_dir in bdirs: if not isdir(boards_dir): continue manifest_path = join(boards_dir, "%s.json" % id_) if isfile(manifest_path): _append_board(id_, manifest_path) break if id_ not in self._BOARDS_CACHE: raise exception.UnknownBoard(id_) return self._BOARDS_CACHE[id_] if id_ else self._BOARDS_CACHE def board_config(self, id_): return self.get_boards(id_) def get_package_type(self, name): return self.packages[name].get("type") def configure_default_packages(self, options, targets): # override user custom packages self._custom_packages = options.get("platform_packages") # enable used frameworks for framework in options.get("framework", []): if not self.frameworks: continue framework = framework.lower().strip() if not framework or framework not in self.frameworks: continue _pkg_name = self.frameworks[framework].get("package") if _pkg_name: self.packages[_pkg_name]['optional'] = False # enable upload tools for upload targets if any(["upload" in t for t in targets] + ["program" in targets]): for name, opts in self.packages.items(): if opts.get("type") == "uploader": self.packages[name]['optional'] = False # skip all packages in "nobuild" mode # allow only upload tools and frameworks elif "nobuild" in targets and opts.get("type") != "framework": self.packages[name]['optional'] = True def get_lib_storages(self): storages = [] for opts in (self.frameworks or {}).values(): if "package" not in opts: continue pkg_dir = self.get_package_dir(opts['package']) if not pkg_dir or not isdir(join(pkg_dir, "libraries")): continue libs_dir = join(pkg_dir, "libraries") storages.append({"name": opts['package'], "path": libs_dir}) libcores_dir = join(libs_dir, "__cores__") if not isdir(libcores_dir): continue for item in os.listdir(libcores_dir): libcore_dir = join(libcores_dir, item) if not isdir(libcore_dir): continue storages.append({ "name": "%s-core-%s" % (opts['package'], item), "path": libcore_dir }) return storages class PlatformBoardConfig(object): def __init__(self, manifest_path): self._id = basename(manifest_path)[:-5] assert isfile(manifest_path) self.manifest_path = manifest_path try: self._manifest = util.load_json(manifest_path) except ValueError: raise exception.InvalidBoardManifest(manifest_path) if not set(["name", "url", "vendor"]) <= set(self._manifest): raise exception.PlatformioException( "Please specify name, url and vendor fields for " + manifest_path) def get(self, path, default=None): try: value = self._manifest for k in path.split("."): value = value[k] # pylint: disable=undefined-variable if PY2 and isinstance(value, unicode): # cast to plain string from unicode for PY2, resolves issue in # dev/platform when BoardConfig.get() is used in pair with # os.path.join(file_encoding, unicode_encoding) try: value = value.encode("utf-8") except UnicodeEncodeError: pass return value except KeyError: if default is not None: return default raise KeyError("Invalid board option '%s'" % path) def update(self, path, value): newdict = None for key in path.split(".")[::-1]: if newdict is None: newdict = {key: value} else: newdict = {key: newdict} util.merge_dicts(self._manifest, newdict) def __contains__(self, key): try: self.get(key) return True except KeyError: return False @property def id(self): return self._id @property def id_(self): return self.id @property def manifest(self): return self._manifest def get_brief_data(self): return { "id": self.id, "name": self._manifest['name'], "platform": self._manifest.get("platform"), "mcu": self._manifest.get("build", {}).get("mcu", "").upper(), "fcpu": int("".join([ c for c in str( self._manifest.get("build", {}).get("f_cpu", "0L")) if c.isdigit() ])), "ram": self._manifest.get("upload", {}).get("maximum_ram_size", 0), "rom": self._manifest.get("upload", {}).get("maximum_size", 0), "connectivity": self._manifest.get("connectivity"), "frameworks": self._manifest.get("frameworks"), "debug": self.get_debug_data(), "vendor": self._manifest['vendor'], "url": self._manifest['url'] } def get_debug_data(self): if not self._manifest.get("debug", {}).get("tools"): return None tools = {} for name, options in self._manifest['debug']['tools'].items(): tools[name] = {} for key, value in options.items(): if key in ("default", "onboard"): tools[name][key] = value return {"tools": tools} def get_debug_tool_name(self, custom=None): debug_tools = self._manifest.get("debug", {}).get("tools") tool_name = custom if tool_name == "custom": return tool_name if not debug_tools: raise exception.DebugSupportError(self._manifest['name']) if tool_name: if tool_name in debug_tools: return tool_name raise exception.DebugInvalidOptions( "Unknown debug tool `%s`. Please use one of `%s` or `custom`" % (tool_name, ", ".join(sorted(list(debug_tools))))) # automatically select best tool data = {"default": [], "onboard": [], "external": []} for key, value in debug_tools.items(): if value.get("default"): data['default'].append(key) elif value.get("onboard"): data['onboard'].append(key) data['external'].append(key) for key, value in data.items(): if not value: continue return sorted(value)[0] assert any(item for item in data)
py
1a45e443399606f1708de5139675abea1c95cdb9
#!/usr/bin/env python # -*- coding: utf-8 -*- """Tests for form validation.""" import json import unittest from werkzeug import MultiDict import webcompat from webcompat import form FIREFOX_UA = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:48.0) Gecko/20100101 Firefox/48.0' # nopep8 class TestForm(unittest.TestCase): """Module for testing the form.""" def setUp(self): """Set up.""" self.maxDiff = None webcompat.app.config['TESTING'] = True self.maxDiff = None self.app = webcompat.app.test_client() def tearDown(self): """Tear down.""" pass def test_normalize_url(self): """Check that URL is normalized.""" r = form.normalize_url('http://example.com') self.assertEqual(r, 'http://example.com') r = form.normalize_url(u'愛') self.assertEqual(r, u'http://愛') r = form.normalize_url(u'http://愛') self.assertEqual(r, u'http://愛') r = form.normalize_url('https://example.com') self.assertEqual(r, 'https://example.com') r = form.normalize_url('example.com') self.assertEqual(r, 'http://example.com') r = form.normalize_url('http:/example.com') self.assertEqual(r, 'http://example.com') r = form.normalize_url('https:/example.com') self.assertEqual(r, 'https://example.com') r = form.normalize_url('http:example.com') self.assertEqual(r, 'http://example.com') r = form.normalize_url('https:example.com') self.assertEqual(r, 'https://example.com') r = form.normalize_url('//example.com') self.assertEqual(r, 'http://example.com') r = form.normalize_url('http://https://bad.example.com') self.assertEqual(r, 'https://bad.example.com') r = form.normalize_url('http://param.example.com/?q=foo#bar') self.assertEqual(r, 'http://param.example.com/?q=foo#bar') r = form.normalize_url('') self.assertIsNone(r) def test_domain_name(self): """Check that domain name is extracted.""" r = form.domain_name('http://example.com') self.assertEqual(r, 'example.com') r = form.domain_name('https://example.com') self.assertEqual(r, 'example.com') r = form.normalize_url('') self.assertIsNone(r) def test_metadata_wrapping(self): """Check that metadata is processed and wrapped.""" TEST_DICT = {'cool': 'dude', 'wow': 'ok'} EXPECTED_SINGLE = '<!-- @cool: dude -->\n' EXPECTED_SINGLE_COMMA = '<!-- @cool: dude, wow -->\n' EXPECTED_MULTIPLE = '<!-- @cool: dude -->\n<!-- @wow: ok -->\n' r = form.wrap_metadata(('cool', 'dude')) self.assertEqual(r, EXPECTED_SINGLE) r = form.wrap_metadata(('cool', 'dude, wow')) self.assertEqual(r, EXPECTED_SINGLE_COMMA) r = form.get_metadata(('cool', 'wow'), TEST_DICT) self.assertEqual(r, EXPECTED_MULTIPLE) def test_radio_button_label(self): """Check that appropriate radio button label is returned.""" TEST_LABELS_LIST = [ (u'detection_bug', u'Desktop site instead of mobile site'), (u'unknown_bug', u'Something else') ] r = form.get_radio_button_label('unknown_bug', TEST_LABELS_LIST) self.assertEqual(r, u'Something else') r = form.get_radio_button_label(u'detection_bug', TEST_LABELS_LIST) self.assertEqual(r, u'Desktop site instead of mobile site') r = form.get_radio_button_label(None, TEST_LABELS_LIST) self.assertEqual(r, u'Unknown') r = form.get_radio_button_label('failme', TEST_LABELS_LIST) self.assertEqual(r, u'Unknown') def test_get_form(self): """Checks we return the right form with the appropriate data.""" with webcompat.app.test_request_context('/'): actual = form.get_form(FIREFOX_UA) expected_browser = 'Firefox 48.0' expected_os = 'Mac OS X 10.11' self.assertIsInstance(actual, form.IssueForm) self.assertEqual(actual.browser.data, expected_browser) self.assertEqual(actual.os.data, expected_os) def test_get_metadata(self): """HTML comments need the right values depending on the keys.""" metadata_keys = ('sky', 'earth') form_object = {'blah': 'goo', 'hello': 'moshi', 'sky': 'blue'} actual = form.get_metadata(metadata_keys, form_object) expected = u'<!-- @sky: blue -->\n<!-- @earth: None -->\n' self.assertEqual(actual, expected) form_object = MultiDict([ ('reported_with', u'desktop-reporter'), ('url', u'http://localhost:5000/issues/new'), ('extra_labels', [u'type-stylo', u'type-webrender-enabled']), ('ua_header', u'Mozilla/5.0...Firefox 59.0'), ('browser', u'Firefox 59.0')]) metadata_keys = ['browser', 'ua_header', 'reported_with', 'extra_labels'] actual = form.get_metadata(metadata_keys, form_object) expected = u'<!-- @browser: Firefox 59.0 -->\n<!-- @ua_header: Mozilla/5.0...Firefox 59.0 -->\n<!-- @reported_with: desktop-reporter -->\n<!-- @extra_labels: type-stylo, type-webrender-enabled -->\n' # nopep8 self.assertEqual(actual, expected) def test_normalize_metadata(self): """Avoid some type of strings.""" cases = [('blue sky -->', 'blue sky'), ('blue sky ---->>', 'blue sky'), ('', ''), ('blue sky ', 'blue sky'), ('bad_bird <script>', ''), ('bad_bird <script-->>', ''), ('a' * 300, ''), (None, None), ] for meta_value, expected in cases: self.assertEqual(form.normalize_metadata(meta_value), expected) def test_build_formdata(self): """The data body sent to GitHub API.""" # we just need to test that nothing breaks # even if the data are empty form_object = {'foo': 'bar'} actual = form.build_formdata(form_object) expected = {'body': u'<!-- @browser: None -->\n<!-- @ua_header: None -->\n<!-- @reported_with: None -->\n\n**URL**: None\n\n**Browser / Version**: None\n**Operating System**: None\n**Tested Another Browser**: Unknown\n\n**Problem type**: Unknown\n**Description**: None\n**Steps to Reproduce**:\nNone\n\n\n\n_From [webcompat.com](https://webcompat.com/) with \u2764\ufe0f_', 'title': 'None - unknown'} # nopep8 self.assertIs(type(actual), dict) self.assertEqual(actual, expected) # testing for double URL Schemes. form_object = {'url': 'http://https://example.com/'} actual = form.build_formdata(form_object) expected = {'body': u'<!-- @browser: None -->\n<!-- @ua_header: None -->\n<!-- @reported_with: None -->\n\n**URL**: https://example.com/\n\n**Browser / Version**: None\n**Operating System**: None\n**Tested Another Browser**: Unknown\n\n**Problem type**: Unknown\n**Description**: None\n**Steps to Reproduce**:\nNone\n\n\n\n_From [webcompat.com](https://webcompat.com/) with \u2764\ufe0f_', 'title': 'example.com - unknown'} # nopep8 self.assertEqual(actual, expected) # testing with unicode strings. form_object = {'url': u'愛'} actual = form.build_formdata(form_object) expected = {'body': u'<!-- @browser: None -->\n<!-- @ua_header: None -->\n<!-- @reported_with: None -->\n\n**URL**: http://\u611b\n\n**Browser / Version**: None\n**Operating System**: None\n**Tested Another Browser**: Unknown\n\n**Problem type**: Unknown\n**Description**: None\n**Steps to Reproduce**:\nNone\n\n\n\n_From [webcompat.com](https://webcompat.com/) with \u2764\ufe0f_', 'title': u'\u611b - unknown'} # nopep8 self.assertEqual(actual, expected) def test_get_details(self): """Assert we handle valid JSON and other values.""" actual_string_arg = form.get_details('cool') expected_string_arg = 'cool' self.assertEqual(actual_string_arg, expected_string_arg) actual_json_arg = form.get_details(json.dumps({'a': 'b', 'c': False})) expected_json_arg = '<li>a: b</li><li>c: false</li>' self.assertEqual(actual_json_arg, expected_json_arg) def test_build_details(self): """Assert we return the expected HTML, for a json object or a string. """ actual_json_arg = form.build_details(json.dumps( {'a': 'b', 'c': False})) expected_json_arg = '<details>\n<summary>Browser Configuration</summary>\n<ul>\n <li>a: b</li><li>c: false</li>\n</ul>\n</details>' # nopep8 self.assertEqual(actual_json_arg, expected_json_arg) actual_string_arg = form.build_details("cool") expected_string_arg = '<details>\n<summary>Browser Configuration</summary>\n<ul>\n cool\n</ul>\n</details>' # nopep8 self.assertEqual(actual_string_arg, expected_string_arg)
py
1a45e6dbb89d99728197fcedb20f0bcde7bba037
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RoaRequest class DeleteBranchRequest(RoaRequest): def __init__(self): RoaRequest.__init__(self, 'codeup', '2020-04-14', 'DeleteBranch') self.set_uri_pattern('/api/v3/projects/[ProjectId]/repository/branches/delete') self.set_method('DELETE') def get_OrganizationId(self): return self.get_query_params().get('OrganizationId') def set_OrganizationId(self,OrganizationId): self.add_query_param('OrganizationId',OrganizationId) def get_SubUserId(self): return self.get_query_params().get('SubUserId') def set_SubUserId(self,SubUserId): self.add_query_param('SubUserId',SubUserId) def get_AccessToken(self): return self.get_query_params().get('AccessToken') def set_AccessToken(self,AccessToken): self.add_query_param('AccessToken',AccessToken) def get_ProjectId(self): return self.get_path_params().get('ProjectId') def set_ProjectId(self,ProjectId): self.add_path_param('ProjectId',ProjectId) def get_BranchName(self): return self.get_query_params().get('BranchName') def set_BranchName(self,BranchName): self.add_query_param('BranchName',BranchName)
py
1a45e6e5381df4d75fdfba13106afed85a01941e
from typing import List, Union import warnings import pandas as pd from sklearn.base import BaseEstimator, TransformerMixin from sklearn.utils.validation import check_is_fitted from feature_engine.dataframe_checks import ( _is_dataframe, _check_contains_na, _check_input_matches_training_df, ) from feature_engine.variable_manipulation import _find_or_check_categorical_variables class BaseCategoricalTransformer(BaseEstimator, TransformerMixin): """shared set-up checks and methods across categorical transformers""" def _check_fit_input_and_variables(self, X: pd.DataFrame) -> pd.DataFrame: """ Checks that input is a dataframe, finds categorical variables, or alternatively checks that the variables entered by the user are of type object (categorical). Checks absence of NA. Parameters ---------- X : Pandas DataFrame Raises ------ TypeError If the input is not a Pandas DataFrame. If any user provided variable is not categorical ValueError If there are no categorical variables in the df or the df is empty If the variable(s) contain null values Returns ------- X : Pandas DataFrame The same dataframe entered as parameter variables : list list of categorical variables """ # check input dataframe X = _is_dataframe(X) # find categorical variables or check variables entered by user are object self.variables: List[Union[str, int]] = _find_or_check_categorical_variables( X, self.variables ) # check if dataset contains na _check_contains_na(X, self.variables) return X def _check_transform_input_and_state(self, X: pd.DataFrame) -> pd.DataFrame: """ Checks that the input is a dataframe and of the same size than the one used in the fit method. Checks absence of NA. Parameters ---------- X : Pandas DataFrame Raises ------ TypeError If the input is not a Pandas DataFrame ValueError If the variable(s) contain null values. If the dataframe is not of same size as that used in fit() Returns ------- X : Pandas DataFrame The same dataframe entered by the user. """ # Check method fit has been called check_is_fitted(self) # check that input is a dataframe X = _is_dataframe(X) # check if dataset contains na _check_contains_na(X, self.variables) # Check input data contains same number of columns as df used to fit _check_input_matches_training_df(X, self.input_shape_[1]) return X def _check_encoding_dictionary(self): """After fit(), the encoders should return a dictionary with the original values to numerical mappings as key, values. This function checks that the dictionary was created and is not empty. """ # check that dictionary is not empty if len(self.encoder_dict_) == 0: raise ValueError( "Encoder could not be fitted. Check the parameters and the variables " "in your dataframe." ) def transform(self, X: pd.DataFrame) -> pd.DataFrame: """Replace categories with the learned parameters. Parameters ---------- X : pandas dataframe of shape = [n_samples, n_features]. The dataset to transform. Raises ------ TypeError If the input is not a Pandas DataFrame ValueError - If the variable(s) contain null values - If dataframe is not of same size as that used in fit() Warning If after encoding, NAN were introduced. Returns ------- X : pandas dataframe of shape = [n_samples, n_features]. The dataframe containing the categories replaced by numbers. """ X = self._check_transform_input_and_state(X) # replace categories by the learned parameters for feature in self.encoder_dict_.keys(): X[feature] = X[feature].map(self.encoder_dict_[feature]) # check if NaN values were introduced by the encoding if X[self.encoder_dict_.keys()].isnull().sum().sum() > 0: warnings.warn( "NaN values were introduced in the returned dataframe by the encoder." "This means that some of the categories in the input dataframe were " "not present in the training set used when the fit method was called. " "Thus, mappings for those categories do not exist. Try using the " "RareLabelCategoricalEncoder to remove infrequent categories before " "calling this encoder." ) return X def inverse_transform(self, X: pd.DataFrame) -> pd.DataFrame: """Convert the encoded variable back to the original values. Parameters ---------- X : pandas dataframe of shape = [n_samples, n_features]. The transformed dataframe. Raises ------ TypeError - If the input is not a Pandas DataFrame ValueError - If the variable(s) contain null values - If the dataframe is not of same size as that used in fit() Returns ------- X : pandas dataframe of shape = [n_samples, n_features]. The un-transformed dataframe, with the categorical variables containing the original values. """ X = self._check_transform_input_and_state(X) # replace encoded categories by the original values for feature in self.encoder_dict_.keys(): inv_map = {v: k for k, v in self.encoder_dict_[feature].items()} X[feature] = X[feature].map(inv_map) return X
py
1a45e72500052e1f2211837a0299cf8571b276d6
import os from serde.json import from_json from edge.command.common.precommand_check import precommand_checks from edge.config import EdgeConfig from edge.exception import EdgeException from edge.state import EdgeState from edge.train import TrainedModel from edge.tui import TUI, StepTUI, SubStepTUI from edge.vertex_deploy import vertex_deploy from edge.path import get_model_dvc_pipeline, get_vertex_model_json def model_deploy(model_name: str): intro = f"Deploying model '{model_name}' on Vertex AI" success_title = "Model deployed successfully" success_message = "Success" failure_title = "Model deployment failed" failure_message = "See the errors above. See README for more details." with EdgeConfig.context() as config: with TUI( intro, success_title, success_message, failure_title, failure_message ) as tui: precommand_checks(config) with EdgeState.context(config, to_lock=True, to_save=True) as state: with StepTUI("Checking model configuration", emoji="🐏"): with SubStepTUI("Checking that the model is initialised"): if model_name not in config.models: raise EdgeException("Model has not been initialised. " f"Run `./edge.sh model init {model_name}` to initialise.") if state.models is None or state.models[model_name] is None: raise EdgeException("Model is missing from vertex:edge state. " "This might mean that the model has not been initialised. " f"Run `./edge.sh model init {model_name}` to initialise.") endpoint_resource_name = state.models[model_name].endpoint_resource_name with SubStepTUI("Checking that the model has been trained"): if not os.path.exists(get_vertex_model_json(model_name)): raise EdgeException(f"{get_vertex_model_json(model_name)} does not exist. " "This means that the model has not been trained") with open(get_vertex_model_json(model_name)) as file: model = from_json(TrainedModel, file.read()) if model.is_local: raise EdgeException("This model was trained locally, and hence cannot be deployed " "on Vertex AI") model_resource_name = model.model_name vertex_deploy(endpoint_resource_name, model_resource_name, model_name) state.models[model_name].deployed_model_resource_name = model_resource_name short_endpoint_resource_name = "/".join(endpoint_resource_name.split("/")[2:]) tui.success_message = ( "You can see the deployed model at " f"https://console.cloud.google.com/vertex-ai/" f"{short_endpoint_resource_name}?project={config.google_cloud_project.project_id}\n\n" "Happy herding! 🐏" )
py
1a45e786a8d47afa0d6be1b66b3cf696985890d6
#!/usr/bin/env python # encoding: utf-8 """ @author: zhanghe @software: PyCharm @file: customer_invoice.py @time: 2018-07-05 17:54 """ from __future__ import unicode_literals from flask_restful import fields # 客户开票资料 fields_item_customer_invoice = { 'cid': fields.Integer, 'company_name': fields.String, 'company_tax_id': fields.String, 'company_address': fields.String, 'company_tel': fields.String, 'company_bank_name': fields.String, 'company_bank_account': fields.String, 'status_delete': fields.Boolean, 'delete_time': fields.DateTime(dt_format=b'iso8601'), 'create_time': fields.DateTime(dt_format=b'iso8601'), 'update_time': fields.DateTime(dt_format=b'iso8601'), }
bzl
1a45e87aa3e724e8980bae1381de4c9b703ceb82
"""Interop with cc_* rules These rules are temporary and will be deprecated in the future. """ load(":private/providers.bzl", "HaskellBuildInfo", "HaskellLibraryInfo", "HaskellBinaryInfo", "CcSkylarkApiProviderHacked", ) load(":private/set.bzl", "set") load("@bazel_skylib//:lib.bzl", "paths") load(":private/path_utils.bzl", "ln") CcInteropInfo = provider( doc = "Information needed for interop with cc rules.", fields = { "hdrs": "CC headers", "cpp_flags": "Preprocessor flags", "include_args": "Extra include dirs", } ) def cc_headers(ctx): """Bring in scope the header files of dependencies, if any. *Internal function - do not use.* """ hdrs = depset() # XXX There's gotta be a better way to test the presence of # CcSkylarkApiProvider. ccs = [dep.cc for dep in ctx.attr.deps if hasattr(dep, "cc")] hdrs = depset(transitive = [cc.transitive_headers for cc in ccs]) hdrs = depset(transitive = [hdrs] + [ # XXX cc_import doesn't produce a cc field, so we emulate it with a # custom provider. dep[CcSkylarkApiProviderHacked].transitive_headers for dep in ctx.attr.deps if CcSkylarkApiProviderHacked in dep ]) include_directories = set.to_list(set.from_list( [f for cc in ccs for f in cc.include_directories] + [f for dep in ctx.attr.deps if CcSkylarkApiProviderHacked in dep for f in dep[CcSkylarkApiProviderHacked].include_directories])) quote_include_directories = set.to_list(set.from_list( [f for cc in ccs for f in cc.quote_include_directories])) system_include_directories = set.to_list(set.from_list( [f for cc in ccs for f in cc.system_include_directories])) cpp_flags = ( ["-D" + define for cc in ccs for define in cc.defines] + [f for include in quote_include_directories for f in ["-iquote", include]] + [f for include in system_include_directories for f in ["-isystem", include]]) include_args = ["-I" + include for include in include_directories] return CcInteropInfo( hdrs = hdrs.to_list(), cpp_flags = cpp_flags, include_args = include_args, ) def _cc_import_impl(ctx): strip_prefix = ctx.attr.strip_include_prefix # cc_library's strip_include_prefix attribute accepts both absolute and # relative paths. For simplicity we currently only implement absolute # paths. if strip_prefix.startswith("/"): prefix = strip_prefix[1:] else: prefix = paths.join(ctx.label.workspace_root, ctx.label.package, strip_prefix) roots = set.empty() for f in ctx.files.hdrs: # If it's a generated file, strip off the bin or genfiles prefix. path = f.path if path.startswith(ctx.bin_dir.path): path = paths.relativize(path, ctx.bin_dir.path) elif path.startswith(ctx.genfiles_dir.path): path = paths.relativize(path, ctx.genfiles_dir.path) if not path.startswith(prefix): fail("Header {} does not have expected prefix {}".format( path, prefix)) roots = set.insert(roots, f.root.path if f.root.path else ".") include_directories = [paths.join(root, prefix) for root in set.to_list(roots)] return [ DefaultInfo(files = depset(ctx.attr.shared_library.files)), CcSkylarkApiProviderHacked( transitive_headers = depset(transitive = [l.files for l in ctx.attr.hdrs]), include_directories = include_directories), ] # XXX This is meant as a drop-in replacement for the native cc_import, # but it's a temporary hack. It's only necessary because the native # cc_import does not provide CcSkylarkApiProvider. So we write our own # rule that does just that. See # https://github.com/bazelbuild/bazel/issues/4369. haskell_cc_import = rule( _cc_import_impl, attrs = { "shared_library": attr.label( # NOTE We do not list all extensions here because .so libraries may # have numeric suffixes like foo.so.1.2.3, and if they also have # SONAME with numeric suffix, matching file must be provided, so this # attributes must accept libraries with almost arbitrary extensions. # It would be easier if Skylark supported regexps. allow_files = True, doc = """A single precompiled shared library. Bazel ensures it is available to the binary that depends on it during runtime. """, ), "hdrs": attr.label_list( allow_files = [".h"], doc = """ The list of header files published by this precompiled library to be directly included by sources in dependent rules. """, ), "strip_include_prefix": attr.string( doc = """ The prefix to strip from the paths of the headers of this rule. When set, the headers in the `hdrs` attribute of this rule are accessible at their path (relative to the repository) with this prefix cut off. If it's a relative path, it's taken as a package-relative one. If it's an absolute one, it's understood as a repository-relative path. """), }, ) """Imports a prebuilt shared library. Use this to make `.so`, `.dll`, `.dylib` files residing in external [external repositories][bazel-ext-repos] available to Haskell rules. *This rule is temporary replacement for [cc_import][cc_import] and will be deprecated in the future.* Example: ```bzl haskell_cc_import(name = "zlib", shared_library = "@zlib//:lib") haskell_binary( name = "crc32sum", srcs = ["Main.hs"], deps = [":zlib"], prebuilt_dependencies = ["base"], ) ``` [bazel-ext-repos]: https://docs.bazel.build/versions/master/external.html [cc_import]: https://docs.bazel.build/versions/master/be/c-cpp.html#cc_import """ def _cc_haskell_import(ctx): dyn_libs = set.empty() if HaskellBuildInfo in ctx.attr.dep: set.mutable_union(dyn_libs, ctx.attr.dep[HaskellBuildInfo].dynamic_libraries) else: fail("{0} has to provide `HaskellBuildInfo`".format(ctx.attr.dep.label.name)) if HaskellBinaryInfo in ctx.attr.dep: bin = ctx.attr.dep[HaskellBinaryInfo].binary dyn_lib = ctx.actions.declare_file("lib{0}.so".format(bin.basename)) ln(ctx, bin, dyn_lib) set.mutable_insert(dyn_libs, dyn_lib) return [ DefaultInfo( files = set.to_depset(dyn_libs), default_runfiles = ctx.runfiles( files = ctx.attr.dep.default_runfiles.files.to_list(), collect_default = True, ), data_runfiles = ctx.runfiles( files = ctx.attr.dep.data_runfiles.files.to_list(), collect_data = True, ), ) ] if HaskellBinaryInfo in ctx.attr.dep: dbin = ctx.attr.dep[HaskellBinaryInfo].dynamic_bin if dbin != None: set.mutable_insert(dyn_libs, dbin) return [ DefaultInfo( files = set.to_depset(dyn_libs) ) ] if HaskellBinaryInfo in ctx.attr.dep: dbin = ctx.attr.dep[HaskellBinaryInfo].dynamic_bin if dbin != None: set.mutable_insert(dyn_libs, dbin) return [ DefaultInfo( files = set.to_depset(dyn_libs) ) ] cc_haskell_import = rule( _cc_haskell_import, attrs = { "dep": attr.label( doc = """ Target providing a `HaskellLibraryInfo` or `HaskellBinaryInfo`, such as `haskell_library` or `haskell_binary`. """ ), }, toolchains = ["@io_tweag_rules_haskell//haskell:toolchain"], ) """Exports a Haskell library as a CC library. Given a [haskell_library](#haskell_library) or [haskell_binary](#haskell_binary) input, outputs the shared object files produced as well as the object files it depends on directly and transitively. This is very useful if you want to link in a Haskell shared library from `cc_library`. There is a caveat: this will not provide any shared libraries that aren't explicitly given to it. This means that if you're using `prebuilt_dependencies` and relying on GHC to provide those objects, they will not be present here. You will have to provide those separately to your `cc_library`. If you're getting `prebuilt_dependencies` from your toolchain, you will likely want to extract those and pass them in as well. *This rule is temporary and only needed until the Bazel C/C++ "sandwich" (see [bazelbuild/bazel#2163][bazel-cpp-sandwich]) is implemented. This rule will be deprecated in the future.* Example: ```bzl haskell_library( name = "my-lib", ... ) cc_haskell_import( name = "my-lib-objects", dep = ":my-lib", ) cc_library( name = "my-cc", srcs = ["main.c", ":my-lib-objects"], ) ``` [bazel-cpp-sandwich]: https://github.com/bazelbuild/bazel/issues/2163 """
py
1a45e93e7be277b2b2beffbdba16b7d1d0ae75fb
from .cmaboss_1024n import *
py
1a45e977ffe541e824dc06caca0a2602c2d73838
""" WSGI config for blue_rice_29798 project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blue_rice_29798.settings') application = get_wsgi_application()
py
1a45ea4922487e00920a2bface789e21cd9918cf
# -------------- ##File path for the file file_path def read_file(path): file = open(path,mode='r') sentence = file.readline() file.close() return sentence sample_message = read_file(file_path) #Code starts here # -------------- #Code starts here message_1 = read_file(file_path_1) message_2 = read_file(file_path_2) print(message_1) print(message_2) def fuse_msg(message_a,message_b): message_a = int(message_a) message_b = int(message_b) quotient = message_b//message_a return str(quotient) secret_msg_1 = fuse_msg(message_1,message_2) # -------------- #Code starts here message_3 = read_file(file_path_3) print(message_3) def substitute_msg(message_c): sub = " " if message_c == 'Red': sub = 'Army General' elif message_c == 'Green': sub = 'Data Scientist' elif message_c == 'Blue': sub = 'Marine Biologist' return sub secret_msg_2 = substitute_msg(message_3) # -------------- # File path for message 4 and message 5 file_path_4 file_path_5 #Code starts here message_4 = read_file(file_path_4) message_5 = read_file(file_path_5) print(message_4) print(message_5) def compare_msg(message_d,message_e): a_list = message_d.split() b_list = message_e.split() c_list = [i for i in a_list if i not in b_list] final_msg = ' '.join(c_list) return final_msg secret_msg_3 = compare_msg(message_4,message_5) # -------------- #Code starts here message_6 = read_file(file_path_6) print(message_6) def extract_msg(message_f): a_list = message_6.split() even_word = lambda x:len(x)%2==0 b_list = filter(even_word,a_list) final_msg = ' '.join(b_list) return final_msg secret_msg_4 = extract_msg(message_6) # -------------- #Secret message parts in the correct order message_parts=[secret_msg_3, secret_msg_1, secret_msg_4, secret_msg_2] final_path= user_data_dir + '/secret_message.txt' #Code starts here secret_msg = " ".join(message_parts) def write_file(secret_msg,path): file = open(path,mode='a+') file.write(secret_msg) file.close() write_file(secret_msg,final_path) print(secret_msg)
py
1a45ed120c3c38f1b5c4e23ac12c9d7effcdccb4
#!/usr/bin/env python import pytest import random import os import filecmp from devtools_shorthand_sql import core random.seed(1234) @pytest.fixture def functionbuilder_basic(): fields = [core.IDField('id', 'test'), core.TextField('COL2', 'test2'), core.IntegerField('col1', 'test')] sql_writer = core.SQLiteWriter() x = core.FunctionBuilder('my_table', fields, sql_writer) return x def test_base_function(): name, text = 'name', 'text' base = core.BaseFunction(name, text) assert base.name == name assert base.text == text assert base.__str__() == text def test_sql_builder_properties(): fields = [core.IntegerField('col1', 'test'), core.TextField('COL2', 'test2')] sql_writer = core.SQLiteWriter x = core.FunctionBuilder('my_table', fields, sql_writer) assert x.arguments == 'col1: int, col2: str' assert x.field_names == 'col1, COL2' assert x.params == 'col1, col2' assert x.function_name_stem == 'my_table' assert x.has_idfield is False assert x.kwargs == 'col1=902, col2="ED73BYDMA9"' def test_sql_builder_create_table_statement(functionbuilder_basic): x = functionbuilder_basic result = x.create_table_statement() assert result == 'CREATE TABLE IF NOT EXISTS my_table (\nid test,\nCOL2 test2,\ncol1 test\n);' def test_sql_builder_create_insert_function_with_id(functionbuilder_basic): x = functionbuilder_basic result = x.create_insert_function_with_id() assert result.text == '\ndef insert_my_table(id: int, col2: str, col1: int) -> int:\n params = (id, col2, col1)\n id = YOUR_CONNECTOR_EXECUTOR("""INSERT INTO my_table (id, COL2, col1) VALUES(?,?,?);""",\n params)\n return id\n' def test_sql_builder_create_insert_function_without_id(functionbuilder_basic): x = functionbuilder_basic result = x.create_insert_function_without_id() assert result.text == '\ndef insert_my_table(id: int, col2: str, col1: int) -> None:\n params = (id, col2, col1)\n YOUR_CONNECTOR_EXECUTOR("""INSERT INTO my_table (id, COL2, col1) VALUES(?,?,?);""",\n params)\n return\n' def test_sql_builder_create_insert_function_with_id_test(functionbuilder_basic): expected = """ def test_insert_my_table(YOUR_CLEAN_DB_FIXTURE): expected = (1, 'AXRQDZ4S5I', 954) new_id = YOUR_MODULE.insert_my_table(col2="AXRQDZ4S5I", col1=954) result = YOUR_CONNECTOR_QUERY('SELECT * FROM my_table').fetchall()[0] assert result == expected assert new_id == 1 """ x = functionbuilder_basic result = x.create_insert_function_with_id_test() assert result.text == expected def test_sql_builder_create_insert_function_without_id_test(functionbuilder_basic): expected = """ def test_insert_my_table(YOUR_CLEAN_DB_FIXTURE): expected = (1, 'CYSB3CK4JX', 409) YOUR_MODULE.insert_my_table(col2="CYSB3CK4JX", col1=409) result = YOUR_CONNECTOR_QUERY('SELECT * FROM my_table').fetchall()[0] assert result == expected """ x = functionbuilder_basic result = x.create_insert_function_without_id_test() assert result.text == expected @pytest.mark.parametrize("source,sql_name_format,fixture_file", [ # Show none leaves sql columns unchange ("""# photo id,id SIZE,int filename,text date_taken,int""", 'none', 'basic_output.txt'), # Show upper makes sql columns upper ("""# photo id,id size,int filename,text date_taken,int""", 'upper', 'basic_output_upper.txt'), # Show lower makes sql columns lower ("""# photo ID,id SIZE,int FILENAME,text DATE_TAKEN,int""", 'lower', 'basic_output_lower.txt'), # Show proper makes sql columns proper ("""# photo ID,id SIZE,int FILENAME,text DATE_TAKEN,int""", 'proper', 'basic_output_proper.txt'), ]) def test_main_pass(tmpdir, source, sql_name_format, fixture_file): expected = os.path.join('tests', 'fixtures', fixture_file) filename = os.path.join(tmpdir, 'shorthand.txt') with open(filename, 'w') as f: f.write(source) output_filename = os.path.join(tmpdir, 'output.txt') core.main(filename, 'sqlite', output_filename, sql_name_format) if not filecmp.cmp(expected, output_filename): import shutil shutil.copy(output_filename, 'test_result.txt') assert filecmp.cmp(expected, output_filename)
py
1a45edd6956df3ed3eff4ddf3dcd6b6a1500632b
#!/usr/bin/env python import os from hummingbot.client.config.global_config_map import connector_keys import logging as _logging _logger = _logging.getLogger(__name__) master_host = "***REMOVED***" master_user = "***REMOVED***" master_password = "***REMOVED***" master_db = "***REMOVED***" slave_host = "127.0.0.1" slave_user = "reader" slave_password = "falcon" slave_db = "falcon" mysql_master_server = "***REMOVED***" mysql_slave_server = "***REMOVED***" mysql_user = "***REMOVED***" mysql_password = "***REMOVED***" mysql_db = "***REMOVED***" order_book_db = "***REMOVED***" sparrow_db = "***REMOVED***" order_books_db_2 = { "host": "***REMOVED***", "user": "***REMOVED***", "password": "***REMOVED***", "db": "**REMOVED***", } kafka_bootstrap_server = "***REMOVED***" # whether to enable api mocking in unit test cases mock_api_enabled = os.getenv("MOCK_API_ENABLED") # ALL TEST KEYS for key in connector_keys().keys(): locals()[key] = os.getenv(key.upper()) """ # AscendEX Tests ascend_ex_api_key = os.getenv("ASCEND_EX_KEY") ascend_ex_secret_key = os.getenv("ASCEND_EX_SECRET") # Binance Tests binance_api_key = os.getenv("BINANCE_API_KEY") binance_api_secret = os.getenv("BINANCE_API_SECRET") # Binance Perpetuals Tests binance_perpetuals_api_key = os.getenv("BINANCE_PERPETUALS_API_KEY") binance_perpetuals_api_secret = os.getenv("BINANCE_PERPETUALS_API_SECRET") # Coinbase Pro Tests coinbase_pro_api_key = os.getenv("COINBASE_PRO_API_KEY") coinbase_pro_secret_key = os.getenv("COINBASE_PRO_SECRET_KEY") coinbase_pro_passphrase = os.getenv("COINBASE_PRO_PASSPHRASE") # Huobi Tests huobi_api_key = os.getenv("HUOBI_API_KEY") huobi_secret_key = os.getenv("HUOBI_SECRET_KEY") # Loopring Tests loopring_accountid = os.getenv("LOOPRING_ACCOUNTID") loopring_exchangeid = os.getenv("LOOPRING_EXCHANGEID") loopring_api_key = os.getenv("LOOPRING_API_KEY") loopring_private_key = os.getenv("LOOPRING_PRIVATE_KEY") # Bittrex Tests bittrex_api_key = os.getenv("BITTREX_API_KEY") bittrex_secret_key = os.getenv("BITTREX_SECRET_KEY") # KuCoin Tests kucoin_api_key = os.getenv("KUCOIN_API_KEY") kucoin_secret_key = os.getenv("KUCOIN_SECRET_KEY") kucoin_passphrase = os.getenv("KUCOIN_PASSPHRASE") test_web3_provider_list = [os.getenv("WEB3_PROVIDER")] # Liquid Tests liquid_api_key = os.getenv("LIQUID_API_KEY") liquid_secret_key = os.getenv("LIQUID_SECRET_KEY") # Kraken Tests kraken_api_key = os.getenv("KRAKEN_API_KEY") kraken_secret_key = os.getenv("KRAKEN_SECRET_KEY") # OKEx Test okex_api_key = os.getenv("OKEX_API_KEY") okex_secret_key = os.getenv("OKEX_SECRET_KEY") okex_passphrase = os.getenv("OKEX_PASSPHRASE") # BitMart Test bitmart_api_key = os.getenv("BITMART_API_KEY") bitmart_secret_key = os.getenv("BITMART_SECRET_KEY") bitmart_memo = os.getenv("BITMART_MEMO") # CryptoCom Test crypto_com_api_key = os.getenv("CRYPTO_COM_API_KEY") crypto_com_secret_key = os.getenv("CRYPTO_COM_SECRET_KEY") # HitBTC Tests hitbtc_api_key = os.getenv("HITBTC_API_KEY") hitbtc_secret_key = os.getenv("HITBTC_SECRET_KEY") # Gate.io Tests gate_io_api_key = os.getenv("GATE_IO_API_KEY") gate_io_secret_key = os.getenv("GATE_IO_SECRET_KEY") # CoinZoom Test coinzoom_api_key = os.getenv("COINZOOM_API_KEY") coinzoom_secret_key = os.getenv("COINZOOM_SECRET_KEY") coinzoom_username = os.getenv("COINZOOM_USERNAME") # Wazirx Test wazirx_api_key = os.getenv("WAZIRX_API_KEY") wazirx_secret_key = os.getenv("WAZIRX_SECRET_KEY") # AltMarkets.io Test altmarkets_api_key = os.getenv("ALTMARKETS_API_KEY") altmarkets_secret_key = os.getenv("ALTMARKETS_SECRET_KEY") # CoinFLEX Test coinflex_api_key = os.getenv("COINFLEX_API_KEY") coinflex_api_secret = os.getenv("COINFLEX_API_SECRET") # Wallet Tests test_erc20_token_address = os.getenv("TEST_ERC20_TOKEN_ADDRESS") web3_test_private_key_a = os.getenv("TEST_WALLET_PRIVATE_KEY_A") web3_test_private_key_b = os.getenv("TEST_WALLET_PRIVATE_KEY_B") web3_test_private_key_c = os.getenv("TEST_WALLET_PRIVATE_KEY_C") coinalpha_order_book_api_username = "***REMOVED***" coinalpha_order_book_api_password = "***REMOVED***" """ kafka_2 = { "bootstrap_servers": "***REMOVED***", "zookeeper_servers": "***REMOVED***" }
py
1a45ee3a135a36d6f8ee7876e666174b67b84918
from django.db import models from django.contrib.postgres.fields import JSONField class Log(models.Model): started_on = models.DateTimeField(auto_now_add=True) finished_on = models.DateTimeField(blank=True, null=True) finished_successfully = models.NullBooleanField() command_name = models.TextField() args = JSONField(blank=True, null=True) stdout = models.TextField(blank=True, null=True) stderr = models.TextField(blank=True, null=True) traceback = models.TextField(blank=True, null=True) def save( self, force_insert=False, force_update=False, using=None, update_fields=None ): return super().save( force_insert=force_insert, force_update=force_update, using=using, update_fields=update_fields, ) def __str__(self): return f'Results of command "{self.command_name}" ran on {self.started_on}'