blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
045849a6dcf37e6dfd8deaa91796aebe1f3f2334 | c6609c161df66949656ca91d8a3d9f4d27a4c399 | /rates_project_04122021/rates_client/rates_client/rate_client.py | 71771ba20ad4886144213eb414be5cd5d7451817 | [
"MIT"
] | permissive | t4d-classes/advanced-python_04122021 | b93ea38c5b35af2b1eb06bc1d5fe6d3f0c1cf39f | 07b27aea8ac3c7170eb66d5243c5cd841f41322c | refs/heads/master | 2023-04-11T11:45:18.114381 | 2021-04-20T12:36:04 | 2021-04-20T12:36:04 | 357,016,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,063 | py | """ rate client module """
import socket
import sys
import pathlib
import yaml
from rates_shared.utils import read_config
def main() -> None:
"""Main Function"""
try:
config = read_config()
host = config["server"]["host"]
port = int(config["server"]["port"])
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client_socket:
client_socket.connect((host, port))
print(client_socket.recv(2048).decode("UTF-8"))
while True:
command = input("> ")
if command == "exit":
break
else:
client_socket.sendall(command.encode("UTF-8"))
print(client_socket.recv(2048).decode("UTF-8"))
client_socket.close()
except ConnectionResetError:
print("Server connection was closed.")
except ConnectionRefusedError:
print("Server is not running.")
except KeyboardInterrupt:
pass
sys.exit(0)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
6b542f6ede6cde3cb763bfadd5e6b97c989bfb7f | 051293c8772878ec04d9e7a81839ac1b4b7b01ee | /samples/sample_cli.py | dae2abcd1f8a8089cb4c7e33b9935f299b292489 | [
"Apache-2.0"
] | permissive | siliconjesus/TCGstorageAPI | 055b70b8db1de313137dc40017ed68f745f363f3 | 7fd3096d152e604bb836921f7cf3a03ffc987cab | refs/heads/master | 2022-04-25T02:26:33.950835 | 2020-04-27T17:16:13 | 2020-04-27T17:16:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,386 | py | #----------------------------------------------------------------------------
# Do NOT modify or remove this copyright
#
# Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#****************************************************************************
# \file sample_cli.py
# \brief Sample script showing how to use the TCGstorageAPI
# Note: this script is an example only and uses hardcoded passwords; please change them.
#--------------------------------------------------------------------------------------------------
import os
import sys
import logging
import logging.handlers
import argparse
import struct
import uuid
from TCGstorageAPI.tcgapi import PskCipherSuites
from TCGstorageAPI.tcgapi import Sed
from TCGstorageAPI import keymanager as keymanager
import TCGstorageAPI.tcgSupport as tcgSupport
import helper as verifyidentity
import datetime
class Sedcfg(object):
'''
This is a class for performing operations on the SED drive
Attributes:
dev: Device handle of the drive.
'''
#
# WARNING! WARNING! WARNING!
# This sample script uses hardcoded values for the drive credentials.
# This is not a good security practice.
# Change these credential values to something more secure (up to 32-bytes in length)!
#
cred_table = {
'SID': 'ADMIN',
'C_PIN_Admin1': 'ADMIN1',
'Admin1': 'ADMIN1',
'C_PIN_User1': 'USER1',
'User1' : 'USER1',
'User2' : 'USER2',
'C_PIN_User2': 'USER2',
'EraseMaster': 'ERASEMASTER',
'BandMaster0': 'BANDMASTER0',
'BandMaster1': 'BANDMASTER1',
'BandMaster2': 'BANDMASTER2'
}
def __init__(self, dev):
'''
The constructor for the class.
Parameters:
dev:Device handle of the drive.
'''
os_type = {'linux2':self.linux_platform,'linux':self.linux_platform, 'win32':self.windows_platform, 'freebsd12':self.freebsd_platform}
os_type[sys.platform](dev)
logging.basicConfig(
filename=self.log_filename,
format="%(asctime)s %(name)s (%(threadName)s) - %(message)s",
level=logging.DEBUG
)
self.logger = logging.getLogger(self.log_filename)
self.logger.debug('Start sedcfg Logger')
self.psk = None
self.keymanager = keymanager.KeyManager()
# Build the SED object for the drive
self.sed = Sed(self.devname, callbacks=self)
for key, val in list(self.cred_table.items()):
self.keymanager.setKey(key, val)
self.BandLayout = sedbandlayout()
self.BandLayout.bandauth(self)
self.initial_cred = self.sed.mSID
def linux_platform(self, devname):
'''
The function to initialize parameters for the linux platform.
Parameters:
devname:Device handle of the drive.
'''
self.log_filename = os.path.join(os.path.dirname(__file__), 'sedcfg.log')
self.devname = devname
def windows_platform(self, devname):
'''
The function to initialize parameters for the windows platform.
Parameters:
devname:Device handle of the drive.
'''
if getattr(sys, 'frozen', False):
# frozen
self.log_filename = os.path.join(os.path.dirname(sys.executable), 'sedcfg.log')
else:
# unfrozen
self.log_filename = os.path.join(os.path.dirname(__file__), 'sedcfg.log')
# For Windows we need to modify the input value from PD to the physical volume
# Extract PD from string and take the number value to be used and extrapolate into \\.\PhysicalDrive#
if ("PD" not in devname):
print("Please pass drive in as PD<drive number>")
print("Example: Disk 1 is PD1")
exit (1)
drive_number = devname[-1:]
self.devname = "\\\\.\\PhysicalDrive" + drive_number
def freebsd_platform(self, devname):
'''
The function to initialize parameters for the bsd platorm.
Parameters:
devanme:Device handle of the drive.
'''
self.log_filename = os.path.join(os.path.dirname(__file__), 'sedcfg.log')
self.devname = devname
def TlsOperation(self, args=None):
'''
The function to enable and disable TLS on the drive.
Parameters:
args - Commandline arguments,i.e enable/disable
'''
if sys.platform=="win32":
print("Tls support not provided for Windows")
return False
if self.BandLayout.authority[1] == 'Admin1'and self.sed.checkPIN(self.BandLayout.authority[0], self.sed.mSID) == True:
print("Please perform operation changecreds before Tls enable")
return False
auth = (self.BandLayout.authority[0], self.BandLayout.authority[1])
key = tcgSupport.getPsk(self.sed)
if key == None:
print("Pre-Shared Key not generated")
return False
toUse = self.sed.getPskEntry(0)
for entryId in range(4):
psk = self.sed.getPskEntry(entryId)
if psk is None:
print("Drive doesn't support TLS")
return True
if psk.Enabled == True and int(psk.CipherSuite,16) == PskCipherSuites.Value(self.sed.cipherSuite):
if args.enabledisable == 'enable':
print("Tls already enabled")
return True
if args.enabledisable == 'disable':
return self.sed.setPskEntry(auth, toUse, Enabled=False, CipherSuite=self.sed.cipherSuite, PSK=key)
if args.enabledisable == 'enable':
return self.sed.setPskEntry(auth, toUse, Enabled=True, CipherSuite=self.sed.cipherSuite, PSK=key)
elif args.enabledisable == 'disable':
print(" TLS already disabled on the drive")
return True
else:
print("Please enter your input to either enable or disable Tls on the drive")
return False
def device_identification(self):
'''
The function to perform device identity attestation by validating the device certificate and digital signature
Uses Tpersign method to sign an input string to return the signature.
Succeeds if a drive is Seagate specific,fails otherwise
'''
self.sed.fipsCompliance = self.sed.fipsCompliance()
if self.sed.fipsCompliance != None:
print("Drive being tested is a FIPS drive, device identification not supported")
return
# Pull the drive certificate
self.logger.debug('Obtaining Drive certificate')
device_cert = self.sed.get_tperSign_cert()
# Validate the drive_certificate against the root certificate
identity = verifyidentity.VerifyIdentity(device_cert)
identity.validate_drive_cert()
# Send a string to obtain the device signature
string = str(datetime.datetime.today())
self.logger.debug('Performing digital signing operation')
signature = self.sed.tperSign(bytes(string,encoding='utf8'))
# Validate drive signature
verify = identity.validate_signature(string, signature)
if verify == True:
print("Device identification successfull, drive being tested is a Seagate drive")
else:
print("Drive being tested is not a Seagate drive")
return
def take_ownership(self, args=None):
'''
The function to take owenership of the drive by changing default Admin credentials, to create band authorities and changing
credentials of the created band authorities.
Parameters:
args - Commandline arguments
Returns:
True: Successful completion of taking drive ownership.
False: Failure of taking drive ownership.
'''
self.logger.debug('Taking ownership of the drive')
if self.sed.checkPIN(self.BandLayout.authority[0], bytes(self.sed.mSID,encoding='utf8')) == False:
print("Revert the drive to factory state,Drive ownership already taken")
return False
# Change PIN of Admin to a new PIN from default value
good = self.sed.changePIN(self.BandLayout.authority[0], self.keymanager.getKey(self.BandLayout.authority[0]), (None, self.initial_cred))
if good is True:
if self.BandLayout.authority[1] == 'Admin1':
# Activate the Locking SP of the drive only for OPAL case
if self.sed.activate(self.BandLayout.authority[0]) == False:
return False
self.initial_cred = tcgSupport.getCred(self.keymanager,'SID')
# Change PIN of Admin of Locking SP
if self.sed.changePIN(self.BandLayout.authority[1], self.keymanager.getKey(self.BandLayout.authority[1]), (None, self.initial_cred), self.BandLayout.auth_objs[0]) == False:
return False
if self.enable_authority() is True:
print('Credentials of the drive are changed successfully')
return True
return False
def enable_authority(self):
'''
The function to enable authorities and change their credentials.
Returns:
True: Enable Authority successfull.
False: Failure to Enable Authority.
'''
self.logger.debug('Enable Authority on the drive')
# Enable two users User1 and User2 and change their password to USER1 and USER2, Bandmaster1 is enabled by default in case of Enterprise.
for obj in self.BandLayout.auth_objs[3:]:
if self.sed.enableAuthority(self.BandLayout.authority[1], True, obj) is True:
continue
else:
return False
# By default global range is enabled in Entperise drives
if self.BandLayout.enabled_bands:
if self.sed.changePIN(self.BandLayout.enabled_bands[0], self.keymanager.getKey(self.BandLayout.enabled_bands[0]), (None, self.initial_cred), self.BandLayout.enabled_bands[0])!= True:
return False
# Change pin of band authorities to a new value
for (obj, auth) in zip(self.BandLayout.auth_objs[1:], self.BandLayout.authority[2:]):
if self.BandLayout.authority[1] == 'Admin1':
auth = 'Admin1'
self.initial_cred = self.keymanager.getKey(auth)
if self.sed.changePIN(auth, self.keymanager.getKey(obj), (None, self.initial_cred), obj) == False:
return False
else:
continue
return True
def configure_bands(self, args):
'''
The function to configure bands on the drive and assign bands to authorities.
Parameters:
args - Commandline arguments:
Bandno: Bandnumber to be configured
RangeStart: RangeStart value
Rangelength:Rangelength value
LockOnReset: True or False
Returns:
True: Successfull completion of configuring bands.
False: Failure to configure bands.
'''
self.logger.debug('Configuring bands on the drive')
if self.sed.checkPIN(self.BandLayout.authority[0], self.sed.mSID) == True:
print("Take ownership of the drive before configuring the drive")
return False
# Enable band and set ranges for band
if self.BandLayout.authority[1] == 'Admin1':
auth = 'Admin1'
else:
auth = 'BandMaster' + args.Bandno
if auth == 'Admin1' and args.Bandno == '0':
print("Global range not present in Opal drives")
return False
elif args.Bandno == '0' and args.RangeStart != None:
print("Can't change range for global locking range")
return False
elif args.Bandno != '0'and args.RangeStart == None:
print("Please provide RangeStart and RangeLength values")
return False
configure = self.sed.setRange(auth, int(args.Bandno), authAs=(auth, self.keymanager.getKey(auth)), RangeStart=int(args.RangeStart) if args.RangeStart is not None else None, RangeLength=int(args.RangeLength) if args.RangeLength is not None else None,
ReadLockEnabled=1, WriteLockEnabled=1, LockOnReset=args.LockOnReset,
ReadLocked=0, WriteLocked=0)
if auth == 'Admin1' and configure is True:
# Give access to users to read and write unlock range only in OPAL case, Bands are assigned to authorities by default in case of Enterprise.
range_objs = ['ACE_Locking_Range1_Set_RdLocked', 'ACE_Locking_Range1_Set_WrLocked',
'ACE_Locking_Range2_Set_RdLocked', 'ACE_Locking_Range2_Set_WrLocked']
if args.Bandno == '1':
range_obj = range_objs[:2]
else:
range_obj = range_objs[2:]
for objts in range_obj:
ret = self.sed.enable_range_access(objts, 'User' + args.Bandno, auth)
if ret == False:
return False
if configure == True:
print('Band{} is configured'.format(args.Bandno))
return True
return False
def enable_fipsmode(self, args=None):
'''
The function to enable FIPS mode on the drive.
Returns:
True: Successfull completion of enable fips.
False: Failure to enable fips.
'''
self.logger.debug('Enabling FIPS mode')
# Retrieve FIPS status
if self.fips_status(self.sed) is True:
return True
# Check the credentials of authorities to confirm ownership
for auth in self.BandLayout.authority:
if self.sed.checkPIN(auth, self.sed.mSID) is True:
print("Please take the ownership of the drive before FIPS enable operation")
return False
# Check whether Locking is enabled for any of the bands
if self.BandLayout.authority[1] == 'Admin1':
auth, start = 'Admin1', 1
else:
auth, start = 'Anybody', 0
lock_enabled = False
for bandnumber in range (start, 3):
locking_info, status = self.sed.getRange(bandnumber, auth)
if status is True and locking_info is not None:
if getattr(locking_info, 'ReadLockEnabled') == True or getattr(locking_info, 'WriteLockEnabled') == True:
lock_enabled = True
break
if lock_enabled == False:
print("Please set ReadLockEnabled and WriteLockEnabled to True for any of the enabled bands by performing configure operation")
return False
# Disable Makers Authority
if self.sed.enableAuthority('SID', False, 'C_PIN_Makers') == False:
print("Failed to disable Makers Authority")
return False
# Disable Firmware Download
for uid in self.sed.ports.keys():
p = self.sed.getPort(uid)
if p is not None and hasattr(p, 'Name') and p.Name == 'FWDownload':
if p.PortLocked != True:
if self.sed.setPort(uid, PortLocked=True, LockOnReset=True) == False:
print("Failed to disable firmware download port")
return False
print("FIPS mode of the drive enabled successfully")
return True
def lock_unlock_bands(self, args):
'''
The function to lock and unlock the bands present on the drive
Parameters:
args - Command line arguments:
lock/unlock: Lock/Unlock the band
bandno: Bandnumber
Returns:
True : Successfull completion of the operation.
False: Failure of the operation
'''
if self.sed.checkPIN(self.BandLayout.authority[0], self.sed.mSID) == True:
print("Take ownership of the drive and configure band before lock/unlock")
return False
if args.bandno == '0' and self.BandLayout.authority[1] == 'Admin1':
print("Global range not present in Opal drives")
return False
Range_info = self.sed.getRange(int(args.bandno), self.BandLayout.authority[1])
if Range_info == False:
return False
print("Band state before lock/unlock =\n{}".format(Range_info[0]))
self.logger.debug('Locking/Unlocking bands on the drive')
if(args.lockunlock == "lock"):
lock_unlock = 1
if (Range_info[0].ReadLocked == 1):
print("Band{} already in locked state".format(args.bandno))
return True
elif(args.lockunlock == "unlock"):
lock_unlock = 0
if (Range_info[0].ReadLocked == 0):
print("Band{} already in unlocked state".format(args.bandno))
return True
# Perform a lock-unlock on the range
auth = 'User' + args.bandno if self.BandLayout.authority[1] == 'Admin1' else 'BandMaster' + args.bandno
lock_unlock = self.sed.setRange(auth, int(args.bandno), authAs=(auth, self.keymanager.getKey(auth)), ReadLocked=lock_unlock, WriteLocked=lock_unlock)
if lock_unlock == True:
print("Band{} {}ed successfully by {}".format(args.bandno, args.lockunlock, auth))
#print(self.sed.getRange(int(args.bandno), self.BandLayout.authority[1])[0])
return True
print("Range not configured properly")
return False
def datastore(self, args):
'''
The function to read/write small amount of data to the datastore on the drive.
Returns:
True: Successfull completion of read/write data.
False: Failure to read/write data.
'''
auth = self.BandLayout.authority[1]
self.table_number = 0
if auth == 'Admin1' and self.sed.checkPIN('SID', self.sed.mSID):
print("Please perform operation changecreds before using the datastore")
return False
for entryId in range(4):
psk = self.sed.getPskEntry(entryId)
if psk is None:
break
if psk.Enabled == True and psk.CipherSuite == self.sed.cipherSuite:
print("Please disable Tls")
return False
self.data = nvdata = {
'fips': self.sed.fipsCompliance , # Store the FIPS status of the drive.
'iv': uuid.uuid4().bytes, # initialization vector used for hashes/wrappings
'Ids': [None, None, None, None], # keyID for each credential
}
self.sed.data_length = (len(tcgSupport.serialize(self.data)))
self.logger.debug('Reading/Writing data to the datastore on the drive')
if args.readwrite == "write":
if auth == 'Admin1':
if self.sed.writeaccess('User1', self.table_number) == False:
return False
if self.sed.writeData(self.BandLayout.authority[2], self.data) == True:
return True
return False
if args.readwrite == "read":
if auth == 'Admin1':
if self.sed.readaccess('User1', self.table_number) == False:
return False
readData = self.sed.readData(self.BandLayout.authority[2])
if readData == None:
print("DataStore is empty, no data to read")
return True
elif readData == False:
return False
print(readData)
return True
def erase_drive(self, args):
'''
The function to revert the drive back to factory state.
Parameters:
args - Commadline arguments.
psid: PSID number of the drive
Returns:
True : Successfull completion of the operation.
False: Failure of the operation
'''
self.logger.debug('Erasing the drive')
result = self.sed.revert(args.psid)
if (result == True):
return True
else:
print("Wrong PSID")
return False
@staticmethod
def fips_status(sed):
'''
The function to retrieve the FIPS compliance and FIPS operating mode from the drive
Parameters:
sed - SED object
Returns:
True - If drive is FIPS and drive not operating in FIPS mode.
False - If drive is not FIPS and if drive is FIPS and operating in FIPS mode.
'''
# Checking Fips Compliance Descriptor
if sed.fipsCompliance == None or sed.fipsCompliance["standard"] != "FIPS 140-2" and sed.fipsCompliance["standard"] != "FIPS 140-3":
print("Drive doesn't support FIPS 140-2 or FIPS 140-3 Standard")
return True
# Checking FIPS approved mode
if sed.fipsApprovedMode is True:
print("Drive operating in FIPS mode")
return True
class sedbandlayout(object):
'''
This a class defining the band Layout of the drive.
'''
# Class can be modified to add multiple users in a dynamic fashion
def __init__(self):
'''
The function defines parameters for the BandLayout of the drive.
'''
self.Ent_auth = ['SID', 'EraseMaster', 'BandMaster1', 'BandMaster2']
self.Opal_auth = ['SID', 'Admin1', 'User1', 'User2']
self.Ent_objs = ['EraseMaster', 'BandMaster1', 'BandMaster2', 'C_PIN_BandMaster1', 'C_PIN_BandMaster2']
self.Opal_objs = ['C_PIN_Admin1', 'C_PIN_User1', 'C_PIN_User2', 'User1', 'User2']
def bandauth(self, sedbandcfg):
'''
The function to choose between Enterprise and Opal band layout.
'''
if sedbandcfg.sed.SSC == 'Enterprise':
self.authority = self.Ent_auth
self.auth_objs = self.Ent_objs
self.enabled_bands = ['BandMaster0']
else:
self.authority = self.Opal_auth
self.auth_objs = self.Opal_objs
self.enabled_bands = None
class argParser(object):
'''
This is a class to parse the command line arguments.
'''
prog = 'sample_cli'
description = 'Sample CLI that implements TCG protocol for SED operations'
def getParser(self):
'''
The Function to parse command line arguments and initialize operations.
'''
main = self.main = argparse.ArgumentParser(
prog=self.prog,
description=self.description,
)
main.add_argument('device', help='Specific wwn or device names of drives to operate on')
subparser = main.add_subparsers(title='subcommand')
enableTls = subparser.add_parser('Tls', help='EnableTls on the Drive')
enableTls.add_argument('enabledisable', help='enable or disable Tls communication')
enableTls.set_defaults(operation=Sedcfg.TlsOperation)
datastore = subparser.add_parser('store', help='Use the DataStore on the Drive')
datastore.add_argument('readwrite', help='Read/Write the data from the DataStore')
datastore.set_defaults(operation=Sedcfg.datastore)
revert = subparser.add_parser('revert', help='Revert the drive back to factory state')
revert.add_argument('psid', help='PSID of the drive used to revert the drive back to factory state')
revert.set_defaults(operation=Sedcfg.erase_drive)
changecreds = subparser.add_parser('changecreds', help='Change the drive default credentials')
changecreds.set_defaults(operation=Sedcfg.take_ownership)
configure = subparser.add_parser('configure', help='Configure the bands by setting new band ranges')
configure.add_argument('Bandno', help='Band number to configure')
configure.add_argument('--RangeStart', help='Rangestart value, Default(4097)')
configure.add_argument('--RangeLength', help='RangeLength value, Default(219749770)')
configure.add_argument('LockOnReset', help='True or False value for LockOnReset')
configure.set_defaults(operation=Sedcfg.configure_bands)
enablefips = subparser.add_parser('enablefips', help='Enable FIPS mode on the fips drive')
enablefips.set_defaults(operation=Sedcfg.enable_fipsmode)
bandops = subparser.add_parser('bandops', help='Perform a lock or an unlock on the band')
bandops.add_argument('lockunlock', help='Lock, Unlock the band')
bandops.add_argument('bandno', help='band number to be locked unlocked')
bandops.set_defaults(operation=Sedcfg.lock_unlock_bands)
return main
def doParse(self, args):
'''
The function to obtain arguments.
'''
if args is not None:
args = shlex.split(args)
else:
args = sys.argv[1:]
namespace = self.getParser().parse_args(args)
return namespace
def main(args=None):
drive_namespace = argParser().doParse(args)
sedcfg = Sedcfg(drive_namespace.device)
if sedcfg.sed.SSC != 'Enterprise' and sedcfg.sed.SSC != 'Opalv2':
print("Unable to retrieve SED functionality of the device. Enable OS to allow secure commands ")
return 1
sedcfg.device_identification()
rv = drive_namespace.operation(sedcfg, drive_namespace)
if rv is not True:
print("Operation failed")
return 1
else:
print("Operation completed successfully")
if __name__ == "__main__":
sys.exit(main())
| [
"[email protected]"
] | |
58ef79ce89e1f6bd6f29e494daffced31fdb85b4 | f51f978026c9b04f78ede0a7465cf0db0a863777 | /Packages/numerical.py | cb77cf4829203f873f68cd4cb8dbfdb50c2bd828 | [] | no_license | TurakaSrilakshmi123/Problem-Solving-Programming-in-Python-June-2019 | ef7397caa5aad561155784e48e5dedfe82f46e1f | de943b797e2345b9d3b187dd588b2d689097618e | refs/heads/master | 2020-06-03T08:34:32.422311 | 2019-06-27T04:16:35 | 2019-06-27T04:16:35 | 191,511,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | def isPrime(n):
flag = 1
if n == 2:
return True
for i in range(2,n//2+1):
if n % i == 0:
flag = 0
return False
if flag == 1:
return True
def numberOfPrimeFactors(n):
if isPrime(n):
return 1
count = 0
for i in range(2,n//2+1):
if(isPrime(i) and n % i == 0):
count = count + 1
return count | [
"[email protected]"
] | |
6ebdda91bfa552e387b3967c6aef821e345699a3 | 97e6c6c61154f7d40846c01b34e6cec7bb09d980 | /quickdb/sql2mapreduce/agg.py | 465fef43f1a3fc8014a4bf6c6e5b462102ac9185 | [] | no_license | michitaro/quickdb | f975f0a979517299911f797c519fea78f9c48168 | 6444795ddb869545bf166c870f5ba0d1f0d56944 | refs/heads/master | 2020-06-15T14:00:38.701760 | 2020-02-19T01:13:07 | 2020-02-19T01:13:07 | 195,318,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,521 | py | import abc
from quickdb.datarake.safeevent import SafeEvent
from typing import (
Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Type, Union, cast)
import numpy
from quickdb.datarake.interface import Progress, ProgressCB, RunMakeEnv
from quickdb.sql2mapreduce.numpy_context import NumpyContext
from quickdb.sql2mapreduce.sqlast.sqlast import (
ColumnRefExpression, Context, Expression, FuncCallExpression, Select,
SqlError)
from quickdb.sspcatalog.patch import Patch
class AggCall(metaclass=abc.ABCMeta): # pragma: no cover
@abc.abstractmethod
def __init__(self, args: List[Expression], named_args: Dict[str, Expression], agg_star: bool):
...
@abc.abstractmethod
def mapper(self, context: Context):
...
@abc.abstractmethod
def reducer(self, a, b):
...
@abc.abstractmethod
def finalizer(self, a):
...
@property
def subaggrs(self) -> List['AggCall']:
return []
def result(self, context: 'AggContext'):
return context._agg_results[self][context._group_value]
class AggContext(NumpyContext):
def __init__(self, patch: Patch, agg_results: Dict, group_value, shared: Dict = None):
super().__init__(patch, shared=shared)
self._group_value = group_value
self._agg_results = agg_results
def evaluate_FuncCallExpression(self, e: FuncCallExpression):
if e in self._agg_results:
return self._agg_results[e][self._group_value]
return super().evaluate_FuncCallExpression(e)
def sliced_context(self, slice: Union[slice, numpy.ndarray], group_value):
return AggContext(self._patch[slice], self._agg_results, group_value, self._shared)
@property
def size(self):
return self._patch.size
class FinalizeContext(AggContext):
def __init__(self, agg_results: Dict, group_value, shared: Dict = None):
super().__init__(None, agg_results, group_value, shared=shared) # type: ignore
class AggQueryResult(NamedTuple):
group_by: Dict[Any, List]
target_names: List[str]
class PickOneAggCall(AggCall):
def __init__(self, args: List[Expression], named_args: Dict[str, Expression]):
self.a = args[0]
def mapper(self, context: Context):
a = self.a(context)
if len(numpy.unique(a)) >= 2: # pragma: no cover
raise SqlError(f'Non unique values in {self.a}')
return a[0]
def reducer(self, a, b):
if a != b: # pragma: no cover
raise SqlError(f'Non unique values in {self.a}')
return a
def finalizer(self, a):
return a
def run_agg_query(select: Select, run_make_env: RunMakeEnv, shared: Dict = None, progress: ProgressCB = None, interrupt_notifiyer: SafeEvent = None):
from .agg_functions import agg_functions
make_env = '''
from quickdb.sql2mapreduce.agg import agg1_env
rerun, mapper, reducer, finalizer = agg1_env(agg, select, agg_results, shared)
'''
check_select(select)
aggs: List[Tuple[Optional[Expression], AggCall]] = []
def pick_aggs(e: Expression):
if isinstance(e, FuncCallExpression) and e.name in agg_functions:
cls = cast(Type[AggCall], agg_functions[e.name]) # We need `cast` due to pyright's bug
a = cls(e.args, e.named_args, e.agg_star)
walk_subaggrs(a, lambda sa: aggs.append((None, sa)))
aggs.append((e, a))
for target in select.target_list:
target.val.walk(pick_aggs)
if is_context_dependent(target.val):
aggs.append((target.val, PickOneAggCall([target.val], {})))
if len(aggs) == 0:
raise SqlError(f'No aggregation operation')
# run aggregation queries
agg_results: Dict[Union[Expression, AggCall], Any] = {}
for i, (e, agg) in enumerate(aggs):
def progress1(p1: Progress):
if progress:
progress(Progress(done=p1.done + i * p1.total, total=p1.total * len(aggs)))
env_context = {'agg': agg, 'select': select, 'agg_results': agg_results, 'shared': shared}
result = run_make_env(make_env, env_context, progress1, interrupt_notifiyer)
agg_results[agg] = result
if e:
agg_results[e] = result
group_values = next(iter(agg_results.values())).keys()
target_list = {}
for gv in group_values:
context = FinalizeContext(agg_results, gv, shared=shared)
target_list[gv] = [
agg_results[t.val][gv] if t.val in agg_results else t.val(context)
for t in select.target_list
]
return AggQueryResult(
target_list,
[t.name or f'col{i}' for i, t in enumerate(select.target_list)],
)
def is_context_dependent(root: Expression):
from .agg_functions import agg_functions
context_dependent_expressions: List[Expression] = []
def probe(e: Expression):
if isinstance(e, ColumnRefExpression):
context_dependent_expressions.append(e)
def is_agg(e: Expression):
return isinstance(e, FuncCallExpression) and e.name in agg_functions
root.walk(probe, is_agg)
return len(context_dependent_expressions) > 0
def check_select(select: Select): # pragma: no cover
if select.sort_clause:
raise SqlError(f'ORDER clause is not allowed in aggregation query')
if select.limit_count:
raise SqlError('LIMIT clause is not allowed in aggregation query')
if select.limit_offset is not None:
raise SqlError('OFFSET is not supported')
def walk_subaggrs(a: AggCall, f: Callable[[AggCall], None]):
q = a.subaggrs
while len(q) > 0:
a = q.pop(0)
f(a)
q += a.subaggrs
MapperResult = Dict
def agg1_env(agg: AggCall, select: Select, agg_results: Dict, shared: Dict):
rerun = select.from_clause.relname
def mapper(patch: Patch) -> MapperResult:
context = AggContext(patch, agg_results, group_value=None, shared=shared)
if select.where_clause:
context = context.sliced_context(select.where_clause(context), None)
if select.group_clause:
mapped_values = {}
group_values = [gc(context) for gc in select.group_clause]
gvs, gi = multi_column_unique(group_values)
for i, gv in enumerate(gvs):
mapped_values[gv] = agg.mapper(context.sliced_context(gi == i, gv))
return mapped_values
else:
if context.size > 0:
return {None: agg.mapper(context)}
else:
return {}
def reducer(a: MapperResult, b: MapperResult):
for k, v in b.items():
if k in a:
a[k] = agg.reducer(a[k], v)
else:
a[k] = v
return a
def finalizer(a):
return {k: agg.finalizer(v) for k, v in a.items()}
return rerun, mapper, reducer, finalizer
def multi_column_unique(arr: List[numpy.ndarray]) -> Tuple[List[Tuple], numpy.ndarray]:
'''
Returns V, I
V: list of group values
I: group index
'''
if len(arr) == 1: # just for performance
V, I = numpy.unique(arr[0], return_inverse=True)
V = [(v,) for v in V]
else:
u = [numpy.unique(a, return_inverse=True) for a in arr]
ii = numpy.array([i for v, i in u]).T
vv = [v for v, i in u]
II, I = numpy.unique(ii, axis=0, return_inverse=True)
V = [tuple(vv[k][l] for k, l in enumerate(j)) for j in II]
return V, I # type: ignore
| [
"[email protected]"
] | |
abe2dcd4cac371c04bab58961142da554d221fc3 | 6cc14ff36ef4a6f46000e2b5b1ecbe9d9ffa289d | /Python/Regular_Expressions.py | 57420cb310a87f716551312c2c8dc9f51ab1d162 | [] | no_license | mominpasha/Practice-Code-Samples | bc45b5b49b68252c67b57a7a031b27d1018c6474 | f241a4e35a35d414fee0ee529dd090a438d0dbee | refs/heads/master | 2021-08-28T14:14:30.519680 | 2017-12-12T12:42:24 | 2017-12-12T12:42:24 | 103,299,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,524 | py | #%%
import re
result = re.match("Momin","My name is Momin Pasha")
print(result)
#%%
result = re.search("Momin","My name is Momin Pasha")
print(result)
#%%
result.group()
result.start()
result.end()
result.span()
#%%
result = re.findall("the","The history of the great king of the kingdom of the apes lies in the east of the west of the world")
print(result)
#%%
result = re.search("\d","My name is Momin Pasha. My ID is 3301")
print(result.group(),result.span())
result = re.search("\D","My name is Momin Pasha")
print(result.group(),result.span())
result = re.search("\s","My name is Momin Pasha")
print(result.group(),result.span())
result = re.search("\S","My name is Momin Pasha")
print(result.group(),result.span())
result = re.search("\w"," My name is Momin Pasha")
print(result.group(),result.span())
result = re.search("\W","My name is Momin Pasha")
print(result.group(),result.span())
#%%
result = re.search(".","My name is Momin Pasha")#matches any charcter other than newline character
print(result.group(),result.span())
result = re.search(".","\nMy name is Momin Pasha")
print(result.group(),result.span())
#%%
#[] specifies a character class one wishes to match
result = re.search("[1$]","My name is Momin Pasha")
print(result.group(),result.span())
#%%
po = re.compile("momin") #returns a pattern object which can be used anywhere
print(re.search(po,"my name is momin pasha"))
#%%
po = re.compile("wipro")
string = "wipro wipro wipro wipro wipro"
re.split(po,string) | [
"[email protected]"
] | |
f9ed10bc581a959ecacc6f7e395dd6fef7ea68b0 | 016f96e528141db111f15a4c00a0fc46e61cdff6 | /lib/emailses/urls.py | 5fb0d6077dd92bf4cd7ecabb113f5b0498156de9 | [
"BSD-2-Clause"
] | permissive | hdknr/emailqueue | 3d02407b06a492cdf9b89fde2b06c766cd500555 | 05e108562f4fb612440f769973b9a3d02c11afcd | refs/heads/master | 2021-01-23T20:13:04.807258 | 2015-10-08T08:41:51 | 2015-10-08T08:41:51 | 20,243,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | from django.conf.urls import url
import views
urlpatterns = [
url(r'(?P<topic>.+)', views.notify, name='emailses_notify'),
]
| [
"[email protected]"
] | |
193e2e78f289aa1bb05e4b344b5c7d17b61c984e | 6b0161214e4db57a81d3b4432d82c874c7106f13 | /couchbase/_pyport.py | 20700a813860e5ed162b08ac93b31aa54775c92d | [
"Apache-2.0"
] | permissive | neilalbrock/couchbase-python-client | de9d6115d1240f56f4cb7b57aee7e8765c5c7d1f | 95789e3d49c42613fe719bbd02e6d9ad30216334 | refs/heads/master | 2021-01-15T18:51:31.311163 | 2013-10-14T13:58:28 | 2013-10-14T13:58:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,140 | py | #
# Copyright 2013, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This module contains various mappings for modules which have had
# their names changed across Python major versions
try:
import urllib.parse as ulp
from urllib.request import urlopen
from urllib.parse import parse_qs
except ImportError:
import urllib as ulp
from urllib2 import urlopen
from urlparse import parse_qs
try:
long = long
except NameError:
long = int
try:
xrange = xrange
except NameError:
xrange = range
try:
basestring = basestring
except NameError:
basestring = str
| [
"[email protected]"
] | |
bf90be02d2fafe1561a49ed955d60d56bb46aba3 | 466114a10af7b2e148cc39a4af1461fd169d42af | /apps/movies/urls.py | f40b4e2921541aaa75d58c8c657d193fa9fef670 | [] | no_license | peter-zsn/api_test | 0c46b91d7ed5f98d15c67c5caa9cee994ea2e880 | 4bc4e866a79810affa5020d9da8fbabcec2c7bb3 | refs/heads/master | 2023-02-24T10:32:46.156718 | 2021-01-25T08:02:08 | 2021-01-25T08:02:08 | 209,009,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | # coding: utf-8
from django.conf.urls import url, include
from apps.movies import views
urlpatterns = [
url(r'^getList$', views.getList), # 获取电影列表信息接口
] | [
"[email protected]"
] | |
3055ffae2a22fc351a17eea8579fba30bbc3769e | d6f67251c016e7d105f8ce2fd3a94af449657e8e | /2021/day_02/__init__.py | 9ca25cf3a85cf0452b140ce915fa65f028fe97ab | [] | no_license | DanaMC18/advent_of_code | 6f8dedec0907f34bd99a4b50798cdbb852f800b6 | 32ab8170e25bf375db8ddc5e42fb6cc9379fce4a | refs/heads/master | 2022-12-15T14:27:16.524364 | 2022-12-08T04:14:52 | 2022-12-08T04:14:52 | 225,783,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23 | py | """Init 2021 day 2."""
| [
"[email protected]"
] | |
7e768dbc4e7dc51e1a08a60fbd0065f7bdc79ec2 | c2ebcd88d1ba2324530ab9e6ff89a63a5242900f | /plugins/gavanna_plugin.py | c535bd0920161def32ca6b0f86235ac54ba25cc1 | [
"MIT"
] | permissive | joel-wright/DDRPi | 83ddc326f7be4871f46a076909452be8ed4bcf8b | 9d51f0f8702af0fba068f0346e051d6579e05e16 | refs/heads/master | 2016-09-06T15:08:38.974215 | 2013-02-17T16:49:24 | 2013-02-17T16:49:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,204 | py | __authors__ = ['Andrew Taylor']
import logging
import pygame
import random
import time
from datetime import datetime
from DDRPi import DDRPiPlugin
class GavannaPlugin(DDRPiPlugin):
pulse_rate = 2000
pulse_increasing = 1
pulse_last_ratio = 0
def post_invalidate(self):
self.changed = 1
def configure(self, config, image_surface):
"""
This is an example of an end user module - need to make sure we can get
the main image surface and config to write to them both...
"""
self.ddrpi_config = config
self.ddrpi_surface = image_surface
self.clock = pygame.time.Clock()
def __name__(self):
return 'Text Plugin'
def start(self):
"""
Start writing to the surface
"""
# Setup recurring events
return None
def stop(self):
"""
Stop writing to the surface and clean up
"""
# Stop recurring events
return None
def pause(self):
return None
def resume(self):
self.post_invalidate()
return None
def handle(self, event):
"""
Handle the pygame event sent to the plugin from the main loop
"""
return None
def draw_heart(self, colour, x_pos, y_pos, fill):
w = self.ddrpi_surface.width
h = self.ddrpi_surface.height
heart = (0x06, 0x09, 0x11, 0x22, 0x11, 0x09, 0x06);
if (fill > 0):
heart = (0x06, 0x0F, 0x1F, 0x3E, 0x1F, 0x0F, 0x06);
heart_height = 6
heart_width = len(heart)
for x in range(0, heart_width):
for y in range(0, heart_height):
pixel_value = (heart[x] >> y) & 0x01
if (pixel_value == 1):
self.ddrpi_surface.draw_tuple_pixel(x+x_pos,y+y_pos, colour)
return None
def update_surface(self):
"""
Write the updated plugin state to the dance surface and blit
"""
w = self.ddrpi_surface.width
h = self.ddrpi_surface.height
for x in range(0,w):
for y in range(0,h):
self.ddrpi_surface.draw_tuple_pixel(x,y, (0,0,0))
self.ddrpi_surface.draw_text("Gav", (0xFF,0xFF,0xFF), 3, 0)
self.ddrpi_surface.draw_text("Anna", (0xFF,0xFF,0xFF), 0, 11)
# Calculate the red value for the heart's centre
ratio = int(255.0 * (float(pygame.time.get_ticks() % self.pulse_rate) / float(self.pulse_rate)))
# Increase then decrease the value
self.pulse_increasing = 1
pulse_mod = pygame.time.get_ticks() % (2*self.pulse_rate)
# Calculate which
if (pygame.time.get_ticks() % (2*self.pulse_rate) > self.pulse_rate):
self.pulse_increasing = -1
# Work out the red value
red_value = ratio
if (self.pulse_increasing == -1):
red_value = 255 - ratio
# Draw the fading heart...
self.draw_heart((red_value, 0x00, 0x00), w/2 -4, h/2 - 2, 1)
# .. and a solid outline
self.draw_heart((0xFF, 0x00, 0x00), w/2 -4, h/2 - 2, 0)
# Limit the frame rate
self.ddrpi_surface.blit()
# Rate limit it
self.clock.tick(25)
def display_preview(self):
"""
Construct a splash screen suitable to display for a plugin selection menu
"""
w = self.ddrpi_surface.width
h = self.ddrpi_surface.height
# Background is black
for x in range(0,w):
for y in range(0,h):
self.ddrpi_surface.draw_tuple_pixel(x,y, (0,0,0))
# Draw a solid red heart in the middle (ish)
self.draw_heart((0xFF, 0x00, 0x00), w/2 -4, h/2 - 2, 1)
self.ddrpi_surface.blit()
| [
"[email protected]"
] | |
6c1b90f51e34233524e5e26a5c9a8f7430194532 | 30e7adc325de1f52c624047dfd8775d9713ef74f | /bin/rename_contigs.py | fbfd52a6e67c1443ba7e599e61f23f7daa20e904 | [
"MIT"
] | permissive | Arcadia-Science/metagenomics | f44572e256baf7125d48c7984d2399754605c866 | 3f49be3a1706a9561a68051d34065c2464acf726 | refs/heads/main | 2023-06-11T08:31:48.251809 | 2023-05-25T18:28:00 | 2023-05-25T18:28:00 | 550,378,025 | 27 | 2 | MIT | 2023-05-24T21:51:22 | 2022-10-12T16:57:58 | Nextflow | UTF-8 | Python | false | false | 1,926 | py | #!/usr/bin/env python3
import argparse
import os
import sys
import gzip
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
# Arguments
def parse_args(args=None):
Description = "Rename contigs in a FASTA file after assembly."
Epilog = "Example usage: rename_contigs.py <FASTA> <ASSEMBLER> <OUTPUT>"
parser = argparse.ArgumentParser(description="Rename assembled contig headers produced from assembly programs")
parser.add_argument("--input", metavar="FASTA", help="Assembly file in FASTA format")
parser.add_argument(
"--assembler",
metavar="ASSEMBLER",
nargs="?",
help="Assembly algorithm that produced the FASTA file for propagating in contig names",
)
parser.add_argument(
"--output",
metavar="OUTPUT",
help="Output name of reconfigured assembly FASTA file with new contig header names",
)
return parser.parse_args(args)
# Read in fasta file and rename contigs
def rename_contigs(fasta, assembler, output):
contig_id = 0
name = os.path.basename(fasta).replace(".fasta.gz", "").strip().splitlines()[0]
with gzip.open(output, "wb") as outfile:
with gzip.open(fasta, "rt") as handle:
for seq_record in SeqIO.parse(handle, "fasta"):
contig_id = contig_id + 1
newid = str(contig_id).zfill(7)
if assembler is not None:
header = ">" + assembler + "_" + name + "_contig_" + str(newid) + "\n"
else:
header = ">" + name + "_contig_" + str(newid) + "\n"
seq = str(seq_record.seq) + "\n"
outfile.write(header.encode())
outfile.write(seq.encode())
handle.close()
outfile.close()
def main(args=None):
args = parse_args(args)
rename_contigs(args.input, args.assembler, args.output)
if __name__ == "__main__":
sys.exit(main())
| [
"[email protected]"
] | |
219d2f333e3269a900b48b27c08cc48a24363b80 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02707/s167454120.py | 27b53e15e36617817a3cd3b043a523410132faad | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | import math
n = int(input())
c= [0]*n
s = map(int,input().split())
for i in s:
c[i-1]+=1
for i in c:
print(i) | [
"[email protected]"
] | |
d0be37c6a1df802e6cf7ddcc9ab9bcf87b07dd64 | 3aed304ea163b9b910133bb8e33c29028a5ca8c0 | /setup.py | 8d64c26b2060fb79e23fda1bcf245264cc5fab5a | [
"ISC"
] | permissive | arnaudrenaud/django-djaffar | cce4e3cb6922783d9e2dc92079dedd46cb0c20bd | 068851951729d4a1de0b8beca4fbde510517ac59 | refs/heads/master | 2022-07-12T22:06:52.456444 | 2017-01-04T11:28:21 | 2017-01-04T11:28:21 | 77,291,688 | 1 | 0 | ISC | 2019-11-15T09:28:17 | 2016-12-24T16:37:59 | Python | UTF-8 | Python | false | false | 2,512 | py | import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
package = 'djaffar'
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
setup(
name='django-djaffar',
version='0.1.10',
packages=get_packages(package),
package_data=get_package_data(package),
include_package_data=True,
license='BSD License',
description='An asynchronous user activity tracking API for Django.',
long_description=README,
url='https://github.com/arnaudrenaud/django-djaffar',
download_url='https://github.com/arnaudrenaud/django-djaffar/tarball/0.1.10',
author='Arnaud Renaud',
author_email='[email protected]',
install_requires=[
'Django>=1.8',
'djangorestframework>=3.3',
'python-dateutil>=2.6',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
],
test_suite="runtests.runtests",
)
| [
"[email protected]"
] | |
2f0c54950c0ab91c97cc12a6797f81d91b85ec0b | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/stmt_for_str_same_var-9.py | f87144cd7e69039ad5482181a2f622e922dcd6a8 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | x:str = $STRING
for x in x:
print(x)
| [
"[email protected]"
] | |
84c1a89c806b2cba9d39dadc938e2b05f0a7bd4f | e7ce192fb7a4c980af71b42a3a89383f4a3f1396 | /EmployeeWage.py | b48614e6adcc9a741540e97e4906d3b0d41c2965 | [] | no_license | Archana-Bhamare/Employee_Wage_Python | 112c2152c9f97b42ddc5b93da1d40e8ae66ae213 | 7ece8ccdcbbc4ba1ba0237e4d3d1d4341a41760a | refs/heads/master | 2023-01-19T00:45:09.222132 | 2020-11-29T13:11:18 | 2020-11-29T13:11:18 | 316,681,224 | 0 | 0 | null | 2020-11-29T13:11:19 | 2020-11-28T07:33:44 | Python | UTF-8 | Python | false | false | 1,685 | py | import random
class EmployeeWage:
IS_FULL_TIME = 1
IS_PART_TIME = 2
empHours = 0
EMP_WAGE_PER_HOUR = 20
NUM_OF_WORKING_DAYS = 20
MAX_HRS_IN_MONTH = 100
dailyWages = []
day = {}
def checkEmpAttendance(self):
attendance = random.randint(0, 2)
if attendance == EmployeeWage.IS_FULL_TIME:
EmployeeWage.empHours = 8
print("Employee is present for Full Time")
elif attendance == EmployeeWage.IS_PART_TIME:
EmployeeWage.empHours = 4
print("Employee is present for Part Time")
else:
EmployeeWage.empHours = 0
print("Employee is Absent")
def calculateMonthlyWages(self):
totalSalary = 0
totalEmpHours = 0
totalWorkingDays = 0
while totalEmpHours < EmployeeWage.MAX_HRS_IN_MONTH and totalWorkingDays < EmployeeWage.NUM_OF_WORKING_DAYS:
totalWorkingDays += 1
self.checkEmpAttendance()
totalEmpHours += EmployeeWage.empHours
dailyWage = dailyWage = EmployeeWage.EMP_WAGE_PER_HOUR * EmployeeWage.empHours
print(f"Day : {totalWorkingDays}\tEmployee Hours : {EmployeeWage.empHours}")
print(f"Employee Daily Wage : {dailyWage}")
EmployeeWage.dailyWages.append(dailyWage)
totalSalary = EmployeeWage.EMP_WAGE_PER_HOUR * totalEmpHours
for (i,item) in enumerate(EmployeeWage.dailyWages,start=1):
print("Day : " + str(i) + "\tDailyWage : " + str(item))
print(f"Employee Wage for Month is : {totalSalary}")
if __name__ == "__main__":
employee = EmployeeWage()
employee.calculateMonthlyWages()
| [
"[email protected]"
] | |
b055872ca036b87a5294a9e1ff3502871bb0dbd5 | 6b7efc1fbc4ec9fa47e9b91876a8abd319fa762a | /manage.py | 1b022f576275907781a701bd98e87bf82c21ccf1 | [] | no_license | hectwor/API_sneapp-DJango | 6cd6bbca12c4c337c44edfdac141c7ba65a93fb9 | a5a3cc177050b23dbf6232929db215542e24b434 | refs/heads/master | 2020-03-23T06:46:53.795678 | 2018-07-17T03:59:43 | 2018-07-17T03:59:43 | 141,229,235 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "API_sneapp.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
b274c5b6d77f22cc1da6e46b67d0d9571653bfef | b82c7b44c28e27519626e29bb74ec0d7dfae1177 | /snmpdesk/traffic.py | a6c7339e4b456920bcee3eb3fa0567dfd51bab73 | [] | no_license | uralbash/snmpdesk | 77f59f80ce550390cb7689cb737f4899333d0b4c | afde63a3060d4405e1033092900229b205bf9e6d | refs/heads/master | 2020-04-06T04:22:12.387805 | 2012-03-15T06:24:23 | 2012-03-15T06:24:23 | 3,039,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,440 | py | import sys
import collections
from pysnmp.entity.rfc3413.oneliner import cmdgen
'''def datafrommib(mib, community, ip):
value = tuple([int(i) for i in mib.split('.')])
generator = cmdgen.CommandGenerator()
comm_data = cmdgen.CommunityData('server', community, 1) # 1 means version SNMP v2c
transport = cmdgen.UdpTransportTarget((ip, 161))
real_fun = getattr(generator, 'nextCmd')
res = (errorIndication, errorStatus, errorIndex, varBindTable)\
= real_fun(comm_data, transport, value)
if not errorIndication is None or errorStatus is True:
print "Error: %s %s %s %s" % res
yield None
else:
for varBindTableRow in varBindTable:
# varBindTableRow:
# in: [(ObjectName(1.3.6.1.2.1.2.2.1.10.8), Counter32(180283794))]
data = varBindTableRow[0]
port = data[0]._value[len(value):]
octets = data[1]
yield {'port': port[0], 'octets': octets}
'''
def datafrommib(mib, community, conn):
value = tuple([int(i) for i in mib.split('.')])
#res = (errorIndication, errorStatus, errorIndex, varBindTable)\
# = real_fun(comm_data, transport, value)
res = (errorIndication, errorStatus, errorIndex, varBindTable)\
= conn[3](conn[1], conn[2], value)
if not errorIndication is None or errorStatus is True:
print "Error: %s %s %s %s" % res
yield None
else:
for varBindTableRow in varBindTable:
# varBindTableRow:
# in: [(ObjectName(1.3.6.1.2.1.2.2.1.10.8), Counter32(180283794))]
data = varBindTableRow[0]
port = data[0]._value[len(value):]
octets = data[1]
yield {'port': port[0], 'octets': octets}
def load(ip, community):
# for use snmptool try:
# In: snmpwalk -c mymypub -v2c <ip> 1.3.6.1.2.1.2.2.1.10.2
# Out: snmpwalk -c mymypub -v2c <ip> 1.3.6.1.2.1.2.2.1.16.2
# e.t.c...
generator = cmdgen.CommandGenerator()
comm_data = cmdgen.CommunityData('server', community, 1) # 1 means version SNMP v2c
transport = cmdgen.UdpTransportTarget((ip, 161))
real_fun = getattr(generator, 'nextCmd')
conn = (generator, comm_data, transport, real_fun)
mibs = [('1.3.6.1.2.1.2.2.1.16', 'out'),
('1.3.6.1.2.1.2.2.1.10', 'in'),
('1.3.6.1.2.1.2.2.1.11', 'ucast'),
('1.3.6.1.2.1.2.2.1.12', 'nucast'),
('1.3.6.1.2.1.2.2.1.13', 'discards'),
('1.3.6.1.2.1.2.2.1.14', 'errors')]
ports = collections.defaultdict(dict)
for mib in mibs:
data = datafrommib(mib[0], community, conn)
for row in data:
if row:
ports[row['port']][mib[1]] = int(row['octets'])
else:
return None
return ports
if __name__ == '__main__':
try:
ip = sys.argv[1]
community = sys.argv[2]
except IndexError:
print "Please run command like:"
print "python %s <ip> <community>" % __file__
sys.exit(0)
# == debug ==
#import profile
#profile.run("load('%s', '%s')" % (ip, community))
ports = load(ip, community)
if ports:
for key, value in ports.items():
print key, ('in: %(in)s out: %(out)s ucast: %(ucast)s' +\
' nucast: %(nucast)s discards: %(discards)s' +\
' errors: %(errors)s') % value
| [
"[email protected]"
] | |
1bfe7aea8483c772458514da7fd037f980ab3784 | 9a77adbac4aa41a3a230d551369f613bda796473 | /codes/MyPoisson.py | 66390b21deeb57354e857e98850ffb9646fa13f4 | [
"MIT"
] | permissive | statcompute/py_countreg | 6d74565ff6042cd1cf621c1a4cd0c0837b064a66 | 3f62b8f16b95be5be46cacb93f544bbca6b1ec55 | refs/heads/main | 2023-03-01T15:59:06.107550 | 2021-02-07T04:21:30 | 2021-02-07T04:21:30 | 328,307,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,105 | py | import numpy, scipy
from statsmodels.base.model import GenericLikelihoodModel
def _negll_poisson(y, X, beta):
mu = numpy.exp(numpy.dot(X, beta))
pr = numpy.exp(-mu) * numpy.power(mu, y) / scipy.special.factorial(y)
ll = numpy.log(pr)
return(-ll)
class StdPoisson(GenericLikelihoodModel):
def __init__(self, endog, exog, **kwds):
super(StdPoisson, self).__init__(endog, exog, **kwds)
def nloglikeobs(self, params):
beta = params
ll = _negll_poisson(self.endog, self.exog, beta)
return(ll)
def fit(self, start_params = None, maxiter = 10000, maxfun = 5000, **kwds):
if start_params == None:
start_params = numpy.zeros(self.exog.shape[1])
start_params[-1] = numpy.log(self.endog.mean())
return(super(StdPoisson, self).fit(start_params = start_params,
maxiter = maxiter, maxfun = maxfun, **kwds))
import pandas
df = pandas.read_csv("data/credit_count.csv")
y = df.MAJORDRG
xnames = ['AGE', 'ACADMOS', 'MINORDRG', 'OWNRENT']
X = df.loc[:, xnames]
X["constant"] = 1
mdl = StdPoisson(y, X)
mdl.fit().summary()
| [
"[email protected]"
] | |
2ce770cc188a83a86cae96a14bc9779493541139 | 2053a90c97b277cf8f816efc938c16482a68bcb2 | /3-1线性回归.py | f743e4bfbbccd8392e6b938521cfd9489326967d | [] | no_license | xxNB/tensorflow_study | 703365e3a47b73364131d6ed969ca44918a7169b | 3749574e9d7bd0f67b1f57be6ff71c3c8ffed3f0 | refs/heads/master | 2021-03-30T15:37:09.235052 | 2017-11-23T08:04:50 | 2017-11-23T08:04:50 | 110,522,570 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,538 | py | # -*- coding: utf-8 -*-
"""
Created on 2017/11/13 下午4:37
@author: SimbaZhang
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#使用numpy生成200个随机点
x_data = np.linspace(-0.5, 0.5, 200)[:, np.newaxis]
# print(x_data)
noise = np.random.normal(0, 0.02, x_data.shape)
y_data = np.square(x_data) + noise
# 定义俩个placehold, float32:浮点型
x = tf.placeholder(tf.float32, [None, 1])
y = tf.placeholder(tf.float32, [None, 1])
#定义一个神经网络的中间层
Weight_L1 = tf.Variable(tf.random_normal([1, 10]))
biase_L1 = tf.Variable(tf.zeros([1, 10]))
Wx_plus_b_L1 = tf.matmul(x, Weight_L1) + biase_L1
L1 = tf.nn.tanh(Wx_plus_b_L1)
#定义神经网络输出层
Weight_L2 = tf.Variable(tf.random_normal([10, 1]))
biase_L2 = tf.Variable(tf.zeros([1, 1]))
Wx_plus_b_L2 = tf.matmul(L1, Weight_L2) + biase_L2
prediction = tf.nn.tanh(Wx_plus_b_L2)
#定义一个二次代价函数, reduce_mean:求平均值
loss = tf.reduce_mean(tf.square(prediction - y))
#定义一个梯度下降法来训练的优化器
optimizer = tf.train.GradientDescentOptimizer(0.1)
#最小化代价函数
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for _ in range(2000):
sess.run(train, feed_dict={x:x_data, y:y_data})
#获得预测值
prediction_value = sess.run(prediction, feed_dict={x:x_data})
plt.figure()
plt.scatter(x_data, y_data)
plt.plot(x_data, prediction_value, 'r-', lw=5)
plt.show()
| [
"[email protected]"
] | |
2d9971c4c44a021182d7e06c30a1b9cfe7dff063 | cfb08ae8db8f20985ce3f6aa6426642f9891f67d | /apps/users/auth.py | 1fced72c4d1c82603d506af2eeddd3ec9e94abac | [] | no_license | mylove132/web_backed | 707b7df236c2ffaddf2720a0f5df45530629273b | f30a32d33bd0eabcb2cae515ac1c57897cd4bc21 | refs/heads/master | 2022-05-01T16:46:21.597813 | 2019-06-26T09:55:43 | 2019-06-26T09:55:43 | 188,396,244 | 0 | 0 | null | 2022-04-22T21:19:48 | 2019-05-24T09:44:45 | Python | UTF-8 | Python | false | false | 1,545 | py | from rest_framework.authentication import BaseAuthentication
from rest_framework import exceptions
from .models import Token
class TokenAuthentication(BaseAuthentication):
def authenticate(self, request):
token = request.META.get("HTTP_TOKEN")
if not token:
raise exceptions.AuthenticationFailed('请传入token值')
else:
token_obj = Token.objects.filter(token=token).first()
if not token_obj:
raise exceptions.AuthenticationFailed('token验证失败,请检查')
else:
update_time = token_obj.update_time
import datetime
update_time = update_time.strftime('%Y-%m-%d %H:%M:%S')
now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
delta = datetime.datetime.strptime(now_time, '%Y-%m-%d %H:%M:%S') - datetime.datetime.strptime(
update_time, '%Y-%m-%d %H:%M:%S')
import web_backed.settings as setting
if delta.seconds > setting.TOKEN_EFFETIVE_TIME:
raise exceptions.AuthenticationFailed('token失效')
else:
return token_obj.user, token_obj.token
def authenticate_header(self, request):
"""
Return a string to be used as the value of the `WWW-Authenticate`
header in a `401 Unauthenticated` response, or `None` if the
authentication scheme should return `403 Permission Denied` responses.
"""
pass
| [
"[email protected]"
] | |
4b2f0957666496d3fd757c2ee95c3310fcc03314 | 173624dde6173ac15d2bf5b9737d3934116dc6cd | /Leetcode题解/31. 下一个排列.py | 363ce4f96f1300d4c01ee006cda5cf4f4e012a70 | [] | no_license | jiufang7/git_repository | 79bb780e563f1aad5fe91ce53b091abaaf5b9a31 | e3d69ec8640f1caab74f1a4473888022f72de644 | refs/heads/master | 2023-01-21T01:31:18.265359 | 2020-12-01T05:21:31 | 2020-12-01T05:21:31 | 304,177,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | class Solution:
def nextPermutation(self, nums):
"""
Do not return anything, modify nums in-place instead.
"""
if len(nums) < 2:
return
i = len(nums) - 1
while i > 0 and nums[i] <= nums[i-1]:
i -= 1
a, b = i, len(nums)-1
while a < b:
nums[a],nums[b] = nums[b],nums[a]
a += 1
b -= 1
j = i-1
for k in range(i, len(nums)):
if nums[k] > nums[j]:
nums[j], nums[k] = nums[k], nums[j]
break
# 2020.11.16
# 源于离散数学及其应用的算法:(以3 4 5 2 1 为例)
# 从后往前寻找第一次出现的正序对:(找到 4,5)
# 之后因为从5 开始都是逆序,所以把他们反转就是正序:3 4 1 2 5
# 之后4 的位置应该是:在它之后的,比他大的最小值(5)
# 交换这两个值:得到 3 5 1 2 4
# 对于初始即为逆序的序列,将在反转步骤直接完成 | [
"[email protected]"
] | |
392cb84f7fb97fc10dd40ee30b52ea4dccc43d0f | 990ae6fd62d25eefe033003716827951c5fdac16 | /HW3/hw2/attention.py | 332eee2b39e92c972e19bf9936dc50d33ae61f42 | [] | no_license | wwzoe/MT | 3df537b609bd028392868597cffee16c3cb78dee | acd437b23bc1d756f621692d41c6c41a98253d35 | refs/heads/master | 2021-01-01T06:19:25.100293 | 2017-07-16T20:12:12 | 2017-07-16T20:12:12 | 97,404,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,689 | py | # coding: utf-8
#---------------------------------------------------------------------
'''
Neural Machine Translation - Encoder Decoder model
Chainer implementation of an encoder-decoder sequence to sequence
model using bi-directional LSTM encoder
'''
#---------------------------------------------------------------------
# In[]:
import numpy as np
import chainer
from chainer import cuda, Function, gradient_check, report, training, utils, Variable
from chainer import datasets, iterators, optimizers, serializers
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
from chainer.training import extensions
from chainer.functions.array import concat
# In[]:
# Import configuration file
from nmt_config import *
# In[]:
class EncoderDecoder(Chain):
'''
Constructor to initialize model
Params:
vsize_enc - vocabulary size for source language (fed into encoder)
vsize_dec - vocabulary size for target language (fed into decoder)
n_units - size of the LSTMs
attn - specifies whether to use attention
'''
def __init__(self, vsize_enc, vsize_dec,
nlayers_enc, nlayers_dec,
n_units, gpuid, attn=False):
super(EncoderDecoder, self).__init__()
#--------------------------------------------------------------------
# add encoder layers
#--------------------------------------------------------------------
# add embedding layer
self.add_link("embed_enc", L.EmbedID(vsize_enc, n_units))
'''
___QUESTION-1-DESCRIBE-A-START___
- Explain the following lines of code
- Think about what add_link() does and how can we access Links added in Chainer.
- Why are there two loops or adding links?
'''
self.lstm_enc = ["L{0:d}_enc".format(i) for i in range(nlayers_enc)]
for lstm_name in self.lstm_enc:
self.add_link(lstm_name, L.LSTM(n_units, n_units))
self.lstm_rev_enc = ["L{0:d}_rev_enc".format(i) for i in range(nlayers_enc)]
for lstm_name in self.lstm_rev_enc:
self.add_link(lstm_name, L.LSTM(n_units, n_units))
'''
___QUESTION-1-DESCRIBE-A-END___
'''
#--------------------------------------------------------------------
# add decoder layers
#--------------------------------------------------------------------
# add embedding layer
'''
___QUESTION-1-DESCRIBE-B-START___
Comment on the input and output sizes of the following layers:
- L.EmbedID(vsize_dec, 2*n_units)
- L.LSTM(2*n_units, 2*n_units)
- L.Linear(2*n_units, vsize_dec)
Why are we using multipliers over the base number of units (n_units)?
'''
self.add_link("embed_dec", L.EmbedID(vsize_dec, 2*n_units))
# add LSTM layers
self.lstm_dec = ["L{0:d}_dec".format(i) for i in range(nlayers_dec)]
for lstm_name in self.lstm_dec:
self.add_link(lstm_name, L.LSTM(2*n_units, 2*n_units))
if attn > 0:
# __QUESTION Add attention
pass
self.add_link("attention", L.Linear(2*2*n_units, 2*n_units))
# Save the attention preference
# __QUESTION you should use this flag to check if attention
# has been selected. Your code should work with and without attention
self.attn = attn
# add output layer
self.add_link("out", L.Linear(2*n_units, vsize_dec))
'''
___QUESTION-1-DESCRIBE-B-END___
'''
# Store GPU id
self.gpuid = gpuid
self.n_units = n_units
def reset_state(self):
# reset the state of LSTM layers
for lstm_name in self.lstm_enc + self.lstm_rev_enc + self.lstm_dec:
self[lstm_name].reset_state()
self.loss = 0
'''
___QUESTION-1-DESCRIBE-C-START___
Describe what the function set_decoder_state() is doing. What are c_state and h_state?
'''
def set_decoder_state(self):
xp = cuda.cupy if self.gpuid >= 0 else np
c_state = F.concat((self[self.lstm_enc[-1]].c, self[self.lstm_rev_enc[-1]].c))
h_state = F.concat((self[self.lstm_enc[-1]].h, self[self.lstm_rev_enc[-1]].h))
self[self.lstm_dec[0]].set_state(c_state, h_state)
'''___QUESTION-1-DESCRIBE-C-END___'''
'''
Function to feed an input word through the embedding and lstm layers
args:
embed_layer: embeddings layer to use
lstm_layer: list of names of lstm layers to use
'''
def feed_lstm(self, word, embed_layer, lstm_layer_list, train):
# get embedding for word
embed_id = embed_layer(word)
embed_id=F.dropout(embed_id,ratio=0.3)
# feed into first LSTM layer
hs = self[lstm_layer_list[0]](embed_id)
hs=F.dropout(hs,ratio=0.3)
# feed into remaining LSTM layers
for lstm_layer in lstm_layer_list[1:]:
hs = self[lstm_layer](hs)
hs=F.dropout(hs,ratio=0.3)
# Function to encode an source sentence word
def encode(self, word, lstm_layer_list, train):
self.feed_lstm(word, self.embed_enc, lstm_layer_list, train)
# Function to decode a target sentence word
def decode(self, word, train):
self.feed_lstm(word, self.embed_dec, self.lstm_dec, train)
'''
'''
def encode_list(self, in_word_list, train=True):
xp = cuda.cupy if self.gpuid >= 0 else np
# convert list of tokens into chainer variable list
var_en = (Variable(xp.asarray(in_word_list, dtype=np.int32).reshape((-1,1)),
volatile=(not train)))
var_rev_en = (Variable(xp.asarray(in_word_list[::-1], dtype=np.int32).reshape((-1,1)),
volatile=(not train)))
# array to store hidden states for each word
# enc_states = xp.empty((0,2*self.n_units), dtype=xp.float32)
first_entry = True
# encode tokens
for f_word, r_word in zip(var_en, var_rev_en):
'''
___QUESTION-1-DESCRIBE-D-START___
- Explain why we are performing two encode operations
'''
self.encode(f_word, self.lstm_enc, train)
self.encode(r_word, self.lstm_rev_enc, train)
'''___QUESTION-1-DESCRIBE-D-END___'''
# __QUESTION -- Following code is to assist with ATTENTION
# enc_states stores the hidden state vectors of the encoder
# this can be used for implementing attention
if first_entry == False:
forward_states = F.concat((forward_states, self[self.lstm_enc[-1]].h), axis=0)
backward_states = F.concat((self[self.lstm_rev_enc[-1]].h, backward_states), axis=0)
else:
forward_states = self[self.lstm_enc[-1]].h
backward_states = self[self.lstm_rev_enc[-1]].h
first_entry = False
enc_states = F.concat((forward_states, backward_states), axis=1)
return enc_states
# Select a word from a probability distribution
# should return a chainer variable
def select_word(self, prob, train=True, sample=False):
xp = cuda.cupy if self.gpuid >= 0 else np
if not sample:
indx = xp.argmax(prob.data[0])
pred_word = Variable(xp.asarray([indx], dtype=np.int32), volatile=not train)
else:
prob = xp.argmax(prob.data[0])
index=xp.random.choice(range(len(prob)),p=prob)
pred_word = Variable(xp.asarray([indx], dtype=np.int32), volatile=not train)
'''
___QUESTION-2-SAMPLE
- Add code to sample from the probability distribution to
choose the next word
'''
pass
return pred_word
def encode_decode_train(self, in_word_list, out_word_list, train=True, sample=False):
xp = cuda.cupy if self.gpuid >= 0 else np
self.reset_state()
# Add GO_ID, EOS_ID to decoder input
decoder_word_list = [GO_ID] + out_word_list + [EOS_ID]
# encode list of words/tokens
enc_states = self.encode_list(in_word_list, train=train)
# initialize decoder LSTM to final encoder state
self.set_decoder_state()
# decode and compute loss
# convert list of tokens into chainer variable list
var_dec = (Variable(xp.asarray(decoder_word_list, dtype=np.int32).reshape((-1,1)),
volatile=not train))
# Initialise first decoded word to GOID
pred_word = Variable(xp.asarray([GO_ID], dtype=np.int32), volatile=not train)
# compute loss
self.loss = 0
# decode tokens
for next_word_var in var_dec[1:]:
self.decode(pred_word, train=train)
if self.attn == NO_ATTN:
predicted_out = self.out(self[self.lstm_dec[-1]].h)
else:
# __QUESTION Add attention
pass
c=F.matmul((self[self.lstm_dec[-1]].h),enc_states,transb=True)
score=F.softmax(c)
ct=F.matmul(score,enc_states)
s=F.concat((ct, (self[self.lstm_dec[-1]].h)))
hs=F.tanh(s)
predict=self.attention(hs)
predicted_out=self.out(predict)
# compute loss
prob = F.softmax(predicted_out)
pred_word = self.select_word(prob, train=train, sample=False)
# pred_word = Variable(xp.asarray([pred_word.data], dtype=np.int32), volatile=not train)
'''
___QUESTION-1-DESCRIBE-E-START___
Explain what loss is computed with an example
What does this value mean?
'''
self.loss += F.softmax_cross_entropy(predicted_out, next_word_var)
'''___QUESTION-1-DESCRIBE-E-END___'''
report({"loss":self.loss},self)
return self.loss
def decoder_predict(self, start_word, enc_states, max_predict_len=MAX_PREDICT_LEN, sample=False):
xp = cuda.cupy if self.gpuid >= 0 else np
# __QUESTION -- Following code is to assist with ATTENTION
# alpha_arr should store the alphas for every predicted word
alpha_arr = xp.empty((0,enc_states.shape[0]), dtype=xp.float32)
# return list of predicted words
predicted_sent = []
# load start symbol
pred_word = Variable(xp.asarray([start_word], dtype=np.int32), volatile=True)
pred_count = 0
# start prediction loop
while pred_count < max_predict_len and (int(pred_word.data) != (EOS_ID)):
self.decode(pred_word, train=False)
if self.attn == NO_ATTN:
prob = F.softmax(self.out(self[self.lstm_dec[-1]].h))
else:
# __QUESTION Add attention
pass
c=F.matmul((self[self.lstm_dec[-1]].h),enc_states,transb=True)
score=F.softmax(c)
alpha=F.softmax(score)
alpha_arr=xp.append(alpha_arr,alpha.data,axis=0)
ct=F.matmul(score,enc_states)
s=F.concat((ct, (self[self.lstm_dec[-1]].h)))
hs=F.tanh(s)
predict=self.attention(hs)
predicted_out=self.out(predict)
prob = F.softmax(predicted_out)
pred_word = self.select_word(prob, train=False, sample=sample)
# add integer id of predicted word to output list
predicted_sent.append(int(pred_word.data))
pred_count += 1
# __QUESTION Add attention
# When implementing attention, make sure to use alpha_array to store
# your attention vectors.
# The visualisation function in nmt_translate.py assumes such an array as input.
return predicted_sent, alpha_arr
def encode_decode_predict(self, in_word_list, max_predict_len=20, sample=False):
xp = cuda.cupy if self.gpuid >= 0 else np
self.reset_state()
# encode list of words/tokens
in_word_list_no_padding = [w for w in in_word_list if w != PAD_ID]
enc_states = self.encode_list(in_word_list, train=False)
# initialize decoder LSTM to final encoder state
self.set_decoder_state()
# decode starting with GO_ID
predicted_sent, alpha_arr = self.decoder_predict(GO_ID, enc_states,
max_predict_len, sample=sample)
return predicted_sent, alpha_arr
# In[]:
| [
"[email protected]"
] | |
b49a99e04713d520e0e7809f8c87bf5075857b17 | 059f13774530ba14cac8da46baf60aecabf48770 | /lang/preprocess_lm.py | e12fbc1233c8728b48a43ecd8ef569a2a5e23912 | [
"BSD-3-Clause"
] | permissive | rainyrainyguo/ARAE | 76796501ee8180d20dac44225e6bb9ddf891a43a | f12499d70f5fe61ca36bae4d2f57ccae28b1c5bc | refs/heads/master | 2020-03-27T05:45:18.550942 | 2018-08-24T23:41:41 | 2018-08-24T23:41:41 | 146,047,307 | 0 | 1 | BSD-3-Clause | 2018-08-24T23:04:36 | 2018-08-24T23:04:36 | null | UTF-8 | Python | false | false | 8,788 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Create the data for the LSTM.
"""
import os
import sys
import argparse
import numpy as np
import h5py
import itertools
from collections import defaultdict
class Indexer:
def __init__(self, symbols = ["<pad>","<unk>","<s>","</s>"]):
self.vocab = defaultdict(int)
self.PAD = symbols[0]
self.UNK = symbols[1]
self.BOS = symbols[2]
self.EOS = symbols[3]
self.d = {self.PAD: 0, self.UNK: 1, self.BOS: 2, self.EOS: 3}
self.idx2word = {}
def add_w(self, ws):
for w in ws:
if w not in self.d:
self.d[w] = len(self.d)
def convert(self, w):
return self.d[w] if w in self.d else self.d[self.UNK]
def convert_sequence(self, ls):
return [self.convert(l) for l in ls]
def write(self, outfile):
out = open(outfile, "w")
items = [(v, k) for k, v in self.d.items()]
items.sort()
for v, k in items:
out.write(" ".join([k, str(v)]) + "\n")
out.close()
def prune_vocab(self, k, cnt = False):
vocab_list = [(word, count) for word, count in self.vocab.items()]
if cnt:
self.pruned_vocab = {pair[0]:pair[1] for pair in vocab_list if pair[1] > k}
else:
vocab_list.sort(key = lambda x: x[1], reverse=True)
k = min(k, len(vocab_list))
self.pruned_vocab = {pair[0]:pair[1] for pair in vocab_list[:k]}
for word in self.pruned_vocab:
if word not in self.d:
self.d[word] = len(self.d)
for word, idx in self.d.items():
self.idx2word[idx] = word
def load_vocab(self, vocab_file):
self.d = {}
for line in open(vocab_file, 'r'):
v, k = line.strip().split()
self.d[v] = int(k)
for word, idx in self.d.items():
self.idx2word[idx] = word
def pad(ls, length, symbol):
if len(ls) >= length:
return ls[:length]
return ls + [symbol] * (length -len(ls))
def get_data(args):
indexer = Indexer(["<pad>","<unk>","<s>","</s>"])
def make_vocab(textfile, seqlength, train=1):
num_sents = 0
for sent in open(textfile, 'r'):
sent = sent.strip().split()
if len(sent) > seqlength or len(sent) < 1:
continue
num_sents += 1
if train == 1:
for word in sent:
indexer.vocab[word] += 1
return num_sents
def convert(textfile, batchsize, seqlength, outfile, num_sents, max_sent_l=0,shuffle=0):
newseqlength = seqlength + 2 #add 2 for EOS and BOS
sents = np.zeros((num_sents, newseqlength), dtype=int)
sent_lengths = np.zeros((num_sents,), dtype=int)
dropped = 0
sent_id = 0
for sent in open(textfile, 'r'):
sent = [indexer.BOS] + sent.strip().split() + [indexer.EOS]
max_sent_l = max(len(sent), max_sent_l)
if len(sent) > seqlength + 2 or len(sent) < 3:
dropped += 1
continue
sent_pad = pad(sent, newseqlength, indexer.PAD)
sents[sent_id] = np.array(indexer.convert_sequence(sent_pad), dtype=int)
sent_lengths[sent_id] = (sents[sent_id] != 0).sum()
sent_id += 1
if sent_id % 100000 == 0:
print("{}/{} sentences processed".format(sent_id, num_sents))
print(sent_id, num_sents)
if shuffle == 1:
rand_idx = np.random.permutation(sent_id)
sents = sents[rand_idx]
sent_lengths = sent_lengths[rand_idx]
#break up batches based on source lengths
sent_lengths = sent_lengths[:sent_id]
sent_sort = np.argsort(sent_lengths)
sents = sents[sent_sort]
sent_l = sent_lengths[sent_sort]
curr_l = 1
l_location = [] #idx where sent length changes
for j,i in enumerate(sent_sort):
if sent_lengths[i] > curr_l:
curr_l = sent_lengths[i]
l_location.append(j)
l_location.append(len(sents))
#get batch sizes
curr_idx = 0
batch_idx = [0]
nonzeros = []
batch_l = []
batch_w = []
for i in range(len(l_location)-1):
while curr_idx < l_location[i+1]:
curr_idx = min(curr_idx + batchsize, l_location[i+1])
batch_idx.append(curr_idx)
for i in range(len(batch_idx)-1):
batch_l.append(batch_idx[i+1] - batch_idx[i])
batch_w.append(sent_l[batch_idx[i]])
# Write output
f = h5py.File(outfile, "w")
f["source"] = sents
f["batch_l"] = np.array(batch_l, dtype=int)
f["source_l"] = np.array(batch_w, dtype=int)
f["sents_l"] = np.array(sent_l, dtype = int)
f["batch_idx"] = np.array(batch_idx[:-1], dtype=int)
f["vocab_size"] = np.array([len(indexer.d)])
print("Saved {} sentences (dropped {} due to length/unk filter)".format(
len(f["source"]), dropped))
f.close()
return max_sent_l
print("First pass through data to get vocab...")
num_sents_train = make_vocab(args.trainfile, args.seqlength)
print("Number of sentences in training: {}".format(num_sents_train))
num_sents_valid = make_vocab(args.valfile, args.seqlength, 0)
print("Number of sentences in valid: {}".format(num_sents_valid))
num_sents_test = make_vocab(args.testfile, args.seqlength, 0)
print("Number of sentences in test: {}".format(num_sents_test))
if args.vocabminfreq >= 0:
indexer.prune_vocab(args.vocabminfreq, True)
else:
indexer.prune_vocab(args.vocabsize, False)
if args.vocabfile != '':
print('Loading pre-specified source vocab from ' + args.vocabfile)
indexer.load_vocab(args.vocabfile)
indexer.write(args.outputfile + ".dict")
print("Vocab size: Original = {}, Pruned = {}".format(len(indexer.vocab),
len(indexer.d)))
max_sent_l = 0
max_sent_l = convert(args.valfile, args.batchsize, args.seqlength,
args.outputfile + "-val.hdf5", num_sents_valid,
max_sent_l, args.shuffle)
max_sent_l = convert(args.testfile, args.batchsize, args.seqlength,
args.outputfile + "-test.hdf5", num_sents_test,
max_sent_l, args.shuffle)
max_sent_l = convert(args.trainfile, args.batchsize, args.seqlength,
args.outputfile + "-train.hdf5", num_sents_train,
max_sent_l, args.shuffle)
print("Max sent length (before dropping): {}".format(max_sent_l))
def main(arguments):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--vocabsize', help="Size of source vocabulary, constructed "
"by taking the top X most frequent words. "
" Rest are replaced with special UNK tokens.",
type=int, default=70000)
parser.add_argument('--vocabminfreq', help="Minimum frequency for vocab, if using frequency cutoff",
type=int, default=-1)
parser.add_argument('--trainfile', help="Path to training data.", required=True)
parser.add_argument('--valfile', help="Path validation data.", required=True)
parser.add_argument('--testfile', help="Path to test data.", required=True)
parser.add_argument('--batchsize', help="Size of each minibatch.", type=int, default=32)
parser.add_argument('--seqlength', help="Maximum source sequence length. Sequences longer "
"than this are dropped.", type=int, default=200)
parser.add_argument('--outputfile', help="Prefix of the output file names. ", type=str)
parser.add_argument('--vocabfile', help="If working with a preset vocab, "
"then including this will ignore srcvocabsize and use the"
"vocab provided here.",
type = str, default='')
parser.add_argument('--shuffle', help="If = 1, shuffle sentences before sorting (based on "
"source length).",
type = int, default = 1)
args = parser.parse_args(arguments)
get_data(args)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| [
"[email protected]"
] | |
e83b9d3c0be7a33e00b85393338fd4d1bb2d0e8d | cbc7ca332ff4cb8c98cb9eb37af654ee63297802 | /torch/_C/_distributed_c10d.pyi | 1cbf030e4afedebc674b36fe035faa072792c35e | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | yfzheng11/pytorch | 307ebedbf6c5f23ed49584cf9536e47cff0a3ab0 | 1aa14fcb14dd1ecc8382f747de6f2070d929ed02 | refs/heads/master | 2023-05-26T14:07:05.459920 | 2021-06-03T20:28:45 | 2021-06-03T20:30:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,379 | pyi | from datetime import timedelta
from enum import Enum
from typing import Optional, List, Any, Tuple, overload
from torch import Tensor
# This module is defined in torch/csrc/distributed/c10d/init.cpp
_DEFAULT_FIRST_BUCKET_BYTES: int
_DEFAULT_NO_TIMEOUT: timedelta
_DEFAULT_PG_TIMEOUT: timedelta
class BuiltinCommHookType(Enum):
ALLREDUCE = ...
FP16_COMPRESS = ...
def _register_comm_hook(reducer: Reducer, state: Any, comm_hook: Any): ...
def _register_builtin_comm_hook(
reducer: Reducer, comm_hook_type: BuiltinCommHookType
): ...
class GradBucket:
def __init__(
self,
index: int,
tensor: Tensor,
offsets: List[int],
lengths: List[int],
sizes_list: List[Tuple[int]],
): ...
def get_index(self) -> int: ...
def get_tensor(self) -> Tensor: ...
def get_per_parameter_tensors(self) -> List[Tensor]: ...
def is_the_last_bucket_to_allreduce(self) -> bool: ...
def set_tensor(self, tensor: Tensor) -> None: ...
class Reducer:
def __init__(
self,
replicas: List[List[Tensor]],
bucket_indices: List[List[int]],
process_group: ProcessGroup,
expect_sparse_gradients: List[List[bool]],
bucket_bytes_cap: int,
find_unused_parameters: bool,
gradient_as_bucket_view: bool,
): ...
...
class Logger:
def __init__(self, reducer: Reducer): ...
def set_construction_data_and_log(
self,
module_name: str,
device_ids: List[int],
output_device: int,
broadcast_buffers: bool,
): ...
...
def _get_debug_mode(): ...
class _DistributedDebugLevel(Enum):
OFF = ...
INFO = ...
DETAIL = ...
class ReduceOp(Enum):
SUM = ...
PRODUCT = ...
MIN = ...
MAX = ...
BAND = ...
BOR = ...
BXOR = ...
UNUSED = ...
class BroadcastOptions:
rootRank: int
rootTensor: int
timeout: timedelta
class AllreduceOptions:
reduceOp: ReduceOp
timeout: timedelta
class AllreduceCoalescedOptions(AllreduceOptions): ...
class ReduceOptions:
reduceOp: ReduceOp
rootRank: int
rootTensor: int
timeout: timedelta
class AllGatherOptions:
timeout: timedelta
class GatherOptions:
rootRank: int
timeout: timedelta
class ScatterOptions:
rootRank: int
timeout: timedelta
class ReduceScatterOptions:
reduceOp: ReduceOp
timeout: timedelta
class BarrierOptions:
device_ids: List[int]
timeout: timedelta
class AllToAllOptions:
timeout: timedelta
class Store:
def set(self, key: str, value: str): ...
def get(self, key: str) -> bytes: ...
def add(self, key: str, value: int) -> int: ...
def compare_set(self, key: str, expected_value: str, desired_value: str) -> bytes: ...
def delete_key(self, key: str) -> bool: ...
def num_keys(self) -> int: ...
def set_timeout(self, timeout: timedelta): ...
@overload
def wait(self, keys: List[str]): ...
@overload
def wait(self, keys: List[str], timeout: timedelta): ...
class FileStore(Store):
def __init__(self, path: str, numWorkers: int): ...
class HashStore(Store):
def __init__(self): ...
class TCPStore(Store):
def __init__(
self,
host_name: str,
port: int,
world_size: int = ...,
is_master: bool = ...,
timeout: timedelta = ...,
wait_for_workers: bool = ...
): ...
class PrefixStore(Store):
def __init__(self, prefix: str, store: Store): ...
class Work:
def is_completed(self) -> bool: ...
def is_success(self) -> bool: ...
def exception(self) -> Any: ...
def wait(self, timeout: timedelta = _DEFAULT_NO_TIMEOUT) -> bool: ...
def source_rank(self) -> int: ...
def _source_rank(self) -> int: ...
def result(self) -> List[Tensor]: ...
def synchronize(self): ...
...
class ProcessGroup:
class Options: ...
def __init__(self): ...
def rank(self) -> int: ...
def size(self) -> int: ...
@overload
def broadcast(
self,
tensors: List[Tensor],
opts=BroadcastOptions(),
) -> Work: ...
@overload
def broadcast(
self,
tensor: Tensor,
root: int,
) -> Work: ...
@overload
def allreduce(
self,
tensors: List[Tensor],
opts: AllreduceOptions = AllreduceOptions(),
) -> Work: ...
@overload
def allreduce(
self,
tensors: List[Tensor],
op=ReduceOp.SUM,
) -> Work: ...
@overload
def allreduce(
self,
tensor: Tensor,
op=ReduceOp.SUM,
) -> Work: ...
def allreduce_coalesced(
self,
tensors: List[Tensor],
opts=AllreduceCoalescedOptions(),
) -> Work: ...
@overload
def reduce(
self,
tensors: List[Tensor],
opts=ReduceOptions(),
) -> Work: ...
@overload
def reduce(
self,
tensor: Tensor,
root: int,
op=ReduceOp.SUM,
) -> Work: ...
@overload
def allgather(
self,
output_tensors: List[List[Tensor]],
input_tensors: List[Tensor],
opts=AllGatherOptions(),
) -> Work: ...
@overload
def allgather(
self,
output_tensors: List[Tensor],
input_tensor: Tensor,
) -> Work: ...
def _allgather_base(
self,
output: Tensor,
input: Tensor,
opts = AllGatherOptions(),
) -> Work: ...
def allgather_coalesced(
self,
output_lists: List[List[Tensor]],
input_list: List[Tensor],
opts=AllGatherOptions(),
) -> Work: ...
@overload
def gather(
self,
output_tensors: List[List[Tensor]],
input_tensors: List[Tensor],
opts=GatherOptions(),
) -> Work: ...
@overload
def gather(
self,
output_tensors: List[Tensor],
input_tensor: Tensor,
root: int,
) -> Work: ...
@overload
def scatter(
self,
output_tensors: List[Tensor],
input_tensors: List[List[Tensor]],
opts=ScatterOptions(),
) -> Work: ...
@overload
def scatter(
self,
output_tensor: Tensor,
input_tensors: List[Tensor],
root: int,
) -> Work: ...
@overload
def reduce_scatter(
self,
output_tensors: List[Tensor],
input_tensors: List[List[Tensor]],
opts=ReduceScatterOptions(),
) -> Work: ...
@overload
def reduce_scatter(
self,
output_tensors: Tensor,
input_tensor: List[Tensor],
) -> Work: ...
@overload
def alltoall_base(
self,
output_tensor: Tensor,
input_tensor: Tensor,
output_split_sizes: List[int],
input_split_sizes: List[int],
opts=AllToAllOptions(),
) -> Work: ...
@overload
def alltoall_base(
self,
output: Tensor,
input: Tensor,
output_split_sizes: List[int],
input_split_sizes: List[int],
) -> Work: ...
@overload
def alltoall(
self,
output_tensor: List[Tensor],
input_tensor: List[Tensor],
opts=AllToAllOptions(),
) -> Work: ...
@overload
def alltoall(
self,
output: List[Tensor],
input: List[Tensor],
) -> Work: ...
def send(
self,
tensors: List[Tensor],
dstRank: int,
tag: int,
) -> Work: ...
def recv(
self,
tensors: List[Tensor],
srcRank: int,
tag: int,
) -> Work: ...
def recv_anysource(self, tensors: List[Tensor], tag: int) -> Work: ...
def barrier(self, opts=BarrierOptions()) -> Work: ...
class ProcessGroupRoundRobin(ProcessGroup): ...
def _round_robin_process_groups(
process_groups: List[ProcessGroup],
) -> ProcessGroupRoundRobin: ...
class ProcessGroupGloo(ProcessGroup):
class Device: ...
class Options: ...
def __init__(
self,
store: Store,
rank: int,
size: int,
timeout: timedelta,
): ...
@staticmethod
def create_device(hostname=str(), interface=str()) -> Device: ...
...
@staticmethod
def create_default_device() -> Device: ...
...
class _ProcessGroupWrapper(ProcessGroup):
def __init__(
self,
pg: ProcessGroup,
gloo_pg: ProcessGroupGloo
): ...
class ProcessGroupNCCL(ProcessGroup):
class Options: ...
def __init__(
self,
store: Store,
rank: int,
size: int,
timeout: timedelta,
): ...
@staticmethod
def _group_start() -> None: ...
@staticmethod
def _group_end() -> None: ...
...
class ProcessGroupMPI(ProcessGroup):
def __init__(
self,
rank: int,
size: int,
pgComm: int,
): ...
@staticmethod
def create(ranks: List[int]) -> ProcessGroupMPI: ...
def _compute_bucket_assignment_by_size(
tensors: List[Tensor],
bucket_size: int,
expect_sparse_gradient: List[bool],
tensor_indices: List[int],
) -> List[List[int]]: ...
def _broadcast_coalesced(
process_group: ProcessGroup,
tensors: List[Tensor],
buffer_size: int,
src: int,
): ...
def _test_python_store(store: Store): ...
def _verify_model_across_ranks(
process_group: ProcessGroup, replicas: List[List[Tensor]]
): ...
| [
"[email protected]"
] | |
50682e61b81b2e670fa3d045ccd01007692a04f2 | 9b3160cf14c2be19cd4bb573272de22e2cbfc58e | /catkin_ws/build/catkin_generated/generate_cached_setup.py | 69aad08a3b4171c8ddd89ecf4e94b5bd12ff42e3 | [] | no_license | tdnvl/ROS | 4395628252da32fa6e0aada1182adf7ec9bcb418 | 95f2ca2003279f719757be191fd19942ea8113ce | refs/heads/master | 2021-01-02T23:01:48.193996 | 2017-09-08T11:30:04 | 2017-09-08T11:30:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,311 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/thomas/catkin_ws/devel;/opt/ros/kinetic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/thomas/ros/catkin_ws/devel/env.sh')
output_filename = '/home/thomas/ros/catkin_ws/build/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"[email protected]"
] | |
08f55e2eb709522cf7653be85bcdd8dc564d7a75 | c2ec0817c6a9573a67f616860bb18bde23b5ef91 | /BossSpider/pipelines.py | 1e92c7b57cf012052c0be5f1bdf02035599b8716 | [] | no_license | shuguo-ma/BossSpider | a3437d1a523af75cbf986c2459fb45dd9e17a360 | 3b429c797bc39a6343f941a04072f0e111916982 | refs/heads/master | 2020-03-18T21:47:47.798886 | 2018-05-29T13:54:11 | 2018-05-29T13:54:11 | 135,303,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
'''
import sqlite3
class BossspiderPipeline(object):
#打开爬虫时,连接数据库connect('database_name')
def open_spider(self,spider):
self.con = sqlite3.connect("zhipin.sqlite")
self.cu = self.con.cursor()
def process_item(self, item, spider):
#sql插入语句,将爬取的数据插入到数据库
insert_sql = "insert into zhipin (post,salary,company,area,exp,edu,industry) VALUES ('{}','{}','{}','{}','{}','{}','{}')"\
.format(item['post'],item['salary'],item['company'],item['area'],item['exp'],item['edu'],item['industry'])
#执行插入语句
self.cu.execute(insert_sql)
#数据提交(数据插入或更新)需要commit
self.con.commit()
return item
#爬虫结束时,数据库关闭
def spider_close(self,spider):
self.con.close()
'''
import pymysql
| [
"[email protected]"
] | |
930737c12616f230b3bc3b9803ca7fc4d51929cc | 390d2a378fd60d35f34d4e140887451ba74cc9e7 | /gamestonk_terminal/stocks/technical_analysis/finnhub_model.py | f344acf2f2601118b994c50f387f8c28144df069 | [
"MIT"
] | permissive | sechours/GamestonkTerminal | ebb7ae44c7ede1bbf40d7b3802cbf63cfa4be2cf | 0b554c6893aa4e805c27b606f7de7ae6c0eb454d | refs/heads/main | 2023-07-31T18:19:09.487365 | 2021-09-13T18:07:00 | 2021-09-13T18:07:00 | 406,403,995 | 1 | 0 | MIT | 2021-09-14T14:37:11 | 2021-09-14T14:37:10 | null | UTF-8 | Python | false | false | 1,074 | py | """Finnhub model"""
__docformat__ = "numpy"
import requests
import pandas as pd
from gamestonk_terminal import config_terminal as cfg
def get_pattern_recognition(ticker: str, resolution: str) -> pd.DataFrame:
"""Get pattern recognition data
Parameters
----------
ticker : str
Ticker to get pattern recognition data
resolution : str
Resolution of data to get pattern recognition from
Returns
-------
pd.DataFrame
Get datapoints corresponding to pattern signal data
"""
response = requests.get(
f"https://finnhub.io/api/v1/scan/pattern?symbol={ticker}&resolution={resolution}&token={cfg.API_FINNHUB_KEY}"
)
# pylint:disable=no-else-return
if response.status_code == 200:
d_data = response.json()
if "points" in d_data:
return pd.DataFrame(d_data["points"]).T
else:
print("Response is empty")
return pd.DataFrame()
else:
print(f"Error in requests with code: {response.status_code}")
return pd.DataFrame()
| [
"[email protected]"
] | |
02275546e99d17fd7465ff2cbf3e4eacf57003e3 | a64757759a7170478ad3e9c71429c484491426be | /autoconv.py | 85e7b1b45b29a9ad766ab17c57193b68b5453c93 | [] | no_license | fy0/autoconv | 940928810bcda472bf401c14c2452ef64359fd9c | 1073934a0d03eba5e5192ffb583629308ff74d13 | refs/heads/master | 2021-07-01T01:30:25.249292 | 2017-09-18T08:11:57 | 2017-09-18T08:11:57 | 103,892,929 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,704 | py | """
{
"global": {
"encoder": "opusenc.exe",
"input_dir": "input",
"output_dir": "output",
"watch_ext": [".wav"],
"output_ext": ".opus"
},
"types": {
"music": {
"--title": "track title"
}
}
}
"""
import os
import json
import time
import subprocess
from shlex import quote
from pathlib import Path # py3.4+
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
config = {}
def convert(type_name, filepath):
if len(filepath.parts) == 1:
type_name = ''
if filepath.suffix not in config['global']['watch_ext']:
return
if type_name in config['types']:
typeinfo = config['types'][type_name]
params = []
for k, v in typeinfo.items():
params.append('%s %s' % (k, v))
out_path = Path(config['global']['output_dir']).joinpath(filepath)
out_ext = config['global']['output_ext']
encoder = subprocess.list2cmdline([config['global']['encoder']])
cmd = [
str(Path(config['global']['input_dir']).joinpath(filepath)),
str(out_path)[:-len(out_path.suffix)] + out_ext # .absolute()
]
os.makedirs(os.path.dirname(out_path), exist_ok=True)
cmd_txt = encoder + ' ' + ' '.join(params) + subprocess.list2cmdline(cmd)
print('Running: %s' % cmd_txt)
os.system(cmd_txt)
return True
class FileEventHandler(FileSystemEventHandler):
def on_moved(self, event):
if event.is_directory:
print("directory moved from {0} to {1}".format(event.src_path,event.dest_path))
else:
path = Path(event.dest_path).relative_to(config['global']['input_dir'])
if convert(path.parts[0], path):
#print("file moved from {0} to {1}".format(event.src_path,event.dest_path))
print('[Encoded] %s' % event.src_path)
def on_modified(self, event):
if not event.is_directory:
path = Path(event.src_path).relative_to(config['global']['input_dir'])
if convert(path.parts[0], path):
#print("file modified: %s" % event.src_path)
print('[Encoded] %s' % event.src_path)
def main():
global config
config = json.loads(open('config.json', encoding='utf-8').read())
observer = Observer()
event_handler = FileEventHandler()
observer.schedule(event_handler, config['global']['input_dir'], True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
6c6e0e149c60270d3573a57dfa2fd3aa115c5361 | e1fe1ed4f2ba8ab0146ce7c08d65bc7947150fc8 | /credit11315/spiders/no_redis_detail_info_scrapy.py | 3fb233a4bbd3603929edc4969dbae24a6847b673 | [] | no_license | yidun55/credit11315 | 0d88ceef314efa444de58eb5da8939c1acff3abe | b048ec9db036a382287d5faacb9490ccbf50735c | refs/heads/master | 2021-01-20T01:03:30.617914 | 2015-07-31T09:58:24 | 2015-07-31T09:58:24 | 38,853,611 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,621 | py | #!usr/bin/env python
#coding: utf-8
"""
从11315全国企业征信系统http://www.11315.com/上
爬取企业信息
"""
from scrapy.spider import Spider
from scrapy.http import Request
from scrapy import log
from scrapy import signals
from scrapy import Selector
from scrapy.exceptions import DontCloseSpider
import sys
from credit11315.items import *
from credit11315.middlewares import UnknownResponseError, ForbbidenResponseError
from credit11315.tool.for_ominated_strip import for_ominated_data
from credit11315.tool.for_JCXX import extract_combine_JCXX
from credit11315.tool.for_all_blocks_info_extract import block_info_extract
from credit11315.tool.for_fundation_info_extract import fundation_info_extract
import HTMLParser
import redis
import urllib2
reload(sys)
sys.setdefaultencoding("utf-8")
class GetDetailInfo(Spider):
"""
从redis上读取url,并提取企业的信息
"""
name = 'noredisdetail'
start_urls = ['http://www.11315.com']
def set_crawler(self,crawler):
super(GetDetailInfo, self).set_crawler(crawler)
self.crawler.signals.connect(self.spider_idle,\
signal=signals.spider_idle)
def spider_idle(self):
raise DontCloseSpider
def parse(self,response):
urlPath = '/home/dyh/data/credit11315/detailUrl\
/uniq_all_detail_url'
f = open(urlPath, "r")
for url in f:
yield Request(url.strip(),callback=my_parse,\
dont_filter=True)
def my_parse(self, response):
"""
解析
"""
sel = Selector(text=response.body)
print len(sel.xpath(u"//b[text()='单位名称']"))!= 0, "parse 条件"
log.msg("parse 条件=%s"%str(len(sel.xpath(u"//b[text()='单位名称']")) != 0), level=log.INFO)
if (len(sel.xpath(u"//b[text()='单位名称']")) != 0): #判别是否为要输入验证码
pass
else:
log.msg("code=%s, %s"%(str(response.status),response.body), level=log.INFO)
raise UnknownResponseError
#========================================================
"""
第一部分:企业信用档案
"""
item = DetailInformation()
item['basic_info'] = fundation_info_extract(response)
#========================================================
#========================================================
"""
第一部分 政府监管信息
"""
item['regulatory_info'] = extract_combine_JCXX(response)
#========================================================
#========================================================
"""
第三部分 行业评价信息
"""
keywords_list = ['2-1.体系/产品/行业认证信息',
'2-2.行业协会(社会组织)评价信息',\
'2-3.水电气通讯等公共事业单位评价']
item['envaluated_info'] = block_info_extract(response,\
keywords_list)
#========================================================
"""
第四部分 媒体评价信息
"""
keywords_list = ['3-1.媒体评价信息']
item['media_env'] = block_info_extract(response, keywords_list)
#========================================================
"""
第五部分 金融信贷信息
"""
#url = 'http://www.11315.com/\
#getTradeLendingCount?companyId=%s'%response.url[7:15]
#header = {'User-Agent':"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.89 Safari/537.36",
# 'Referer':response.url}
#req = urllib2.Request(url=url, headers=header)
#xtml = urllib2.urlopen(req)
#Nums = xtml.read()
#print Nums, "this is Nums"
#Nums = eval(Nums).split(",")
#print Nums, "this is anothor Nums"
#total = str(sum([int(i) for i in Nums]))
#Nums.insert(0, total) #在头部插入
#if total == '0':
# t_url = ""
#else:
# t_url = sel.xpath(u"//script").re(ur"html\(\'<a href=\"([\w\W]*?)\"")[0]
#Nums.append(t_url)
#Nums_re = "|".join(Nums)
keywords_list = ['4-2.民间借贷评价信息']
item["credit_fin"] = block_info_extract(response, keywords_list)
#=======================================================
"""
第六部分 企业运营信息
"""
#keywords_list = ['5-3.水电煤气电话费信息',
#'5-4.纳税信息'] #要么运行js,要么模拟请求,破网站,就两行数据至于吗
#item['operation_info'] = block_info_extract(response, keywords_list)
#========================================================
"""
第七部分 市场反馈信息
"""
keywords_list = ['6-1.消费者评价信息',
'6-2.企业之间履约评价','6-3.员工评价信息',
'6-4.其他']
item['feedback_info'] = block_info_extract(response, keywords_list)
#========================================================
return item
#else:
# print "raise unknownresponseError in spider", response.request.meta
# #raise UnknownResponseError
# #raise ForbbidenResponseError("work or no nnnnnn")
# request = response.request
# retryreq = request.copy()
# retryreq.dont_filter = True
# log.msg("UnknowResponseError %s"%response.body, level=log.INFO)
# yield retryreq
| [
"[email protected]"
] | |
eaea7564dec27050ddfd3d3746903d8608f0a40b | c842ae21582fb91a19605744d3d9b026b5bf9cc5 | /cl3/settings.py | ae097bc7e618daea589f8c37329b6d28b6d8b32d | [] | no_license | Carlos-Daniel260/cl3 | 2e94bdf3254e38d646f8634092d2d3302886ee1a | bdaf12b3b88078e62cc6f59470892e01c653209f | refs/heads/master | 2022-04-05T16:02:20.765358 | 2020-02-22T18:13:03 | 2020-02-22T18:13:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,128 | py | """
Django settings for cl3 project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0&gvsoh!d=guow=z4yxgf4nfs)ug6joyhe9b*8=n$i+nap2=*0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['18.206.127.19', 'localhost', 'myhostup.ddns.net']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cl3.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cl3.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
733be284b833d1981ed58e617f84b58ca2f8dc00 | d7a6502e2bad57aefa1a509e13074f014821abfd | /00x-isolate_digits_from_number.py | d6486ccd0a9989a6235762b25840b6e3f5284dda | [] | no_license | anibalvy/katas | 5d63d5db6d7ec784f1e60de9c380be318fcb04f7 | a28c26e8e2c9c174594fa4b81e14d9733ba44dc3 | refs/heads/main | 2023-07-16T08:47:31.494670 | 2021-08-24T06:44:28 | 2021-08-24T06:44:28 | 399,362,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py | >> 1234//1000
1
>> 1234%1000//100
2
>> 1234%100//10
3
>> 1234%100%10
4
| [
"[email protected]"
] | |
f36e4bac20c903f91a082d88b22b765caafeac35 | a708f1d36586d2b01c99f2cb44aa4612b10192f6 | /周赛/week183/5376非递增顺序.py | eb36dbf409919c8981dd21302f726a93af63edc0 | [] | no_license | LeopoldACC/Algorithm | 2477e8a371e9cdc5a47b582ca2a454539b96071e | fc1b0bec0e28d31e9a6ff722b3a66eacb0278148 | refs/heads/master | 2023-01-25T02:28:14.422447 | 2020-12-03T15:01:10 | 2020-12-03T15:01:10 | 197,297,197 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,041 | py | class Solution:###最终版
def minSubsequence(self, nums):
nums = sorted(nums)
prefix_sum = nums[:]
for i in range(len(nums)-2,-1,-1):
prefix_sum[i]+=prefix_sum[i+1]
index = -1
for i in range(len(nums)-1,-1,-1):
if prefix_sum[i]>prefix_sum[0]//2:
index = i
break
return nums[index:][::-1]
class Solution0:
def minSubsequence(self, nums):
nums = sorted(nums)
prefix_sum =nums[:]
for i in range(len(nums)):
prefix_sum[i]+=nums[i]
target = prefix_sum[-1]//2
index = self.bisec(prefix_sum,target)
return nums[index:][::-1]
def bisec(self,prefix,target):
start,end = 0,len(prefix)-1
while start+1<end:
mid = (start+end)//2
if prefix[mid]<=target:
start = mid
else:
end = mid
return end if prefix[end]>target else start
s = Solution()
s.minSubsequence([4,4,7,6,7]) | [
"[email protected]"
] | |
499a03357c8ae0101e94a0ea850bdfd693fd861f | 77fc5af96da1d461c86c7f9668b64b99ca04a1b6 | /codes/montecarlo.py | 32c0ce13aa246aa42786f17cd7c0371a3c56965c | [] | no_license | rene-d/edupython | 5b6bc8ddb5eb8ec896ee70fb961d4e689af1075a | 1261d0c7aae17bb2d4ff3370860768b73ba4172d | refs/heads/master | 2020-11-24T10:07:18.504472 | 2019-12-21T21:03:08 | 2019-12-21T21:03:08 | 228,099,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | # Découpe d'un carré en 3 zones
# https://edupython.tuxfamily.org/sources/view.php?code=montecarlo
# Les zones sont les domaines du plan délimitées par les courbes
# des fonctions carré et racine carrée, à l'intérieur du carré unité,
# dans un repère orthonormal.
# Les aires sont obtenues par la méthode de Monte Carlo.
# On choisit un point au hasard dans le carré unité 10 000 fois
# Et on estime ainsi l'aire de chaque domaine.
a, b, c = 0, 0, 0
for i in range (10000) :
x, y = random(), random()
if y > sqrt (x) : a = a + 1
elif y > x * x : b = b + 1
else : c = c + 1
print ("On est dans la zone A", a, "fois sur 10 000.")
print ("On est dans la zone B", b, "fois sur 10 000.")
print ("On est dans la zone C", c, "fois sur 10 000.")
print ("Donc les aires respectives des zones A, B et C",end="")
print ("sont estimées à", a / 10000, ",", b / 10000, "et", c / 10000, "unités d'aire.")
| [
"[email protected]"
] | |
6a6975ede1ec62e3b6946074fc686843e6cf3c4a | d1cf5e22ddf7f03ca032a405455802794273bd7b | /init.wsgi | 9e8ff9cd8f5edeb7e8328139524937a2c02b3c63 | [] | no_license | bibek-p/psabots | 4af99dac0b4d2f3d82396ccb85bd3c2bde03ac3d | a9b763ae132cdab73a91794dbd6568e2d3be3e05 | refs/heads/main | 2023-08-02T16:41:04.053578 | 2021-09-16T05:56:40 | 2021-09-16T05:56:40 | 405,976,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | wsgi | activate_this = '/home/bitspan/testing_bitspanindia_com/venv/bin/activate_this.py'
with open(activate_this) as file_:
exec(file_.read(), dict(__file__=activate_this))
import sys
sys.path.insert(0,"/home/bitspan/")
from testing_bitspanindia_com import app as application
| [
"[email protected]"
] | |
86ed1a431a9ff503d5f122745fcd477b8c57065b | a379f5c5578ebd50998f634345f77f519c6b31a6 | /share/gnuradio/examples/digital/ofdm/tunnel.py | aaf7ca5ee5e83217c9b78b60295d1565a29327a1 | [] | no_license | bjarkimb/EQ2443-2445-Project-Group2 | 867cf3533ee2325b9966232cead6b3531031b3dc | dcbabc70fd0c865ab6e6ac724c6be606f9c61a38 | refs/heads/master | 2020-04-12T09:54:11.257264 | 2018-12-19T10:58:43 | 2018-12-19T10:58:43 | 162,412,306 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,352 | py | #!/usr/bin/python2
#
# Copyright 2005,2006,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
# /////////////////////////////////////////////////////////////////////////////
#
# This code sets up up a virtual ethernet interface (typically gr0),
# and relays packets between the interface and the GNU Radio PHY+MAC
#
# What this means in plain language, is that if you've got a couple
# of USRPs on different machines, and if you run this code on those
# machines, you can talk between them using normal TCP/IP networking.
#
# /////////////////////////////////////////////////////////////////////////////
from gnuradio import gr, digital
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
# from current dir
from receive_path import receive_path
from transmit_path import transmit_path
from uhd_interface import uhd_transmitter
from uhd_interface import uhd_receiver
import os, sys
import random, time, struct
#print os.getpid()
#raw_input('Attach and press enter')
# /////////////////////////////////////////////////////////////////////////////
#
# Use the Universal TUN/TAP device driver to move packets to/from kernel
#
# See /usr/src/linux/Documentation/networking/tuntap.txt
#
# /////////////////////////////////////////////////////////////////////////////
# Linux specific...
# TUNSETIFF ifr flags from <linux/tun_if.h>
IFF_TUN = 0x0001 # tunnel IP packets
IFF_TAP = 0x0002 # tunnel ethernet frames
IFF_NO_PI = 0x1000 # don't pass extra packet info
IFF_ONE_QUEUE = 0x2000 # beats me ;)
def open_tun_interface(tun_device_filename):
from fcntl import ioctl
mode = IFF_TAP | IFF_NO_PI
TUNSETIFF = 0x400454ca
tun = os.open(tun_device_filename, os.O_RDWR)
ifs = ioctl(tun, TUNSETIFF, struct.pack("16sH", "gr%d", mode))
ifname = ifs[:16].strip("\x00")
return (tun, ifname)
# /////////////////////////////////////////////////////////////////////////////
# the flow graph
# /////////////////////////////////////////////////////////////////////////////
class my_top_block(gr.top_block):
def __init__(self, callback, options):
gr.top_block.__init__(self)
self.source = uhd_receiver(options.args,
options.bandwidth,
options.rx_freq,
options.lo_offset, options.rx_gain,
options.spec, options.antenna,
options.clock_source, options.verbose)
self.sink = uhd_transmitter(options.args,
options.bandwidth, options.tx_freq,
options.lo_offset, options.tx_gain,
options.spec, options.antenna,
options.clock_source, options.verbose)
self.txpath = transmit_path(options)
self.rxpath = receive_path(callback, options)
self.connect(self.txpath, self.sink)
self.connect(self.source, self.rxpath)
def carrier_sensed(self):
"""
Return True if the receive path thinks there's carrier
"""
return self.rxpath.carrier_sensed()
def set_freq(self, target_freq):
"""
Set the center frequency we're interested in.
"""
self.u_snk.set_freq(target_freq)
self.u_src.set_freq(target_freq)
# /////////////////////////////////////////////////////////////////////////////
# Carrier Sense MAC
# /////////////////////////////////////////////////////////////////////////////
class cs_mac(object):
"""
Prototype carrier sense MAC
Reads packets from the TUN/TAP interface, and sends them to the PHY.
Receives packets from the PHY via phy_rx_callback, and sends them
into the TUN/TAP interface.
Of course, we're not restricted to getting packets via TUN/TAP, this
is just an example.
"""
def __init__(self, tun_fd, verbose=False):
self.tun_fd = tun_fd # file descriptor for TUN/TAP interface
self.verbose = verbose
self.tb = None # top block (access to PHY)
def set_flow_graph(self, tb):
self.tb = tb
def phy_rx_callback(self, ok, payload):
"""
Invoked by thread associated with PHY to pass received packet up.
Args:
ok: bool indicating whether payload CRC was OK
payload: contents of the packet (string)
"""
if self.verbose:
print "Rx: ok = %r len(payload) = %4d" % (ok, len(payload))
if ok:
os.write(self.tun_fd, payload)
def main_loop(self):
"""
Main loop for MAC.
Only returns if we get an error reading from TUN.
FIXME: may want to check for EINTR and EAGAIN and reissue read
"""
min_delay = 0.001 # seconds
while 1:
payload = os.read(self.tun_fd, 10*1024)
if not payload:
self.tb.txpath.send_pkt(eof=True)
break
if self.verbose:
print "Tx: len(payload) = %4d" % (len(payload),)
delay = min_delay
while self.tb.carrier_sensed():
sys.stderr.write('B')
time.sleep(delay)
if delay < 0.050:
delay = delay * 2 # exponential back-off
self.tb.txpath.send_pkt(payload)
# /////////////////////////////////////////////////////////////////////////////
# main
# /////////////////////////////////////////////////////////////////////////////
def main():
parser = OptionParser (option_class=eng_option, conflict_handler="resolve")
expert_grp = parser.add_option_group("Expert")
parser.add_option("-m", "--modulation", type="choice", choices=['bpsk', 'qpsk'],
default='bpsk',
help="Select modulation from: bpsk, qpsk [default=%%default]")
parser.add_option("-v","--verbose", action="store_true", default=False)
expert_grp.add_option("-c", "--carrier-threshold", type="eng_float", default=30,
help="set carrier detect threshold (dB) [default=%default]")
expert_grp.add_option("","--tun-device-filename", default="/dev/net/tun",
help="path to tun device file [default=%default]")
digital.ofdm_mod.add_options(parser, expert_grp)
digital.ofdm_demod.add_options(parser, expert_grp)
transmit_path.add_options(parser, expert_grp)
receive_path.add_options(parser, expert_grp)
uhd_receiver.add_options(parser)
uhd_transmitter.add_options(parser)
(options, args) = parser.parse_args ()
if len(args) != 0:
parser.print_help(sys.stderr)
sys.exit(1)
if options.rx_freq is None or options.tx_freq is None:
sys.stderr.write("You must specify -f FREQ or --freq FREQ\n")
parser.print_help(sys.stderr)
sys.exit(1)
# open the TUN/TAP interface
(tun_fd, tun_ifname) = open_tun_interface(options.tun_device_filename)
# Attempt to enable realtime scheduling
r = gr.enable_realtime_scheduling()
if r == gr.RT_OK:
realtime = True
else:
realtime = False
print "Note: failed to enable realtime scheduling"
# instantiate the MAC
mac = cs_mac(tun_fd, verbose=True)
# build the graph (PHY)
tb = my_top_block(mac.phy_rx_callback, options)
mac.set_flow_graph(tb) # give the MAC a handle for the PHY
print "modulation: %s" % (options.modulation,)
print "freq: %s" % (eng_notation.num_to_str(options.tx_freq))
tb.rxpath.set_carrier_threshold(options.carrier_threshold)
print "Carrier sense threshold:", options.carrier_threshold, "dB"
print
print "Allocated virtual ethernet interface: %s" % (tun_ifname,)
print "You must now use ifconfig to set its IP address. E.g.,"
print
print " $ sudo ifconfig %s 192.168.200.1" % (tun_ifname,)
print
print "Be sure to use a different address in the same subnet for each machine."
print
tb.start() # Start executing the flow graph (runs in separate threads)
mac.main_loop() # don't expect this to return...
tb.stop() # but if it does, tell flow graph to stop.
tb.wait() # wait for it to finish
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| [
"[email protected]"
] | |
6ed1bc1d026edbcc0983015edc3b0d6b6c053af7 | ea8e945af461ae6e5a2dcd9dce244391f14ec695 | /koyama/chapter10/knock98.py | e465cc971c6708ec9fbbc0331c437272abe9da90 | [] | no_license | tmu-nlp/100knock2020 | b5a98485e52b88003fa97966c8d6eef292c9f036 | 1133fa833ea32ad3e54833e420bcb1433f3ec2f3 | refs/heads/master | 2023-04-09T06:48:04.571566 | 2020-08-13T05:38:25 | 2020-08-13T05:38:25 | 258,825,143 | 1 | 2 | null | 2020-08-12T15:56:56 | 2020-04-25T16:43:13 | Python | UTF-8 | Python | false | false | 1,304 | py | # 98. ドメイン適応
# Japanese-English Subtitle Corpus (JESC)やJParaCrawlなどの翻訳データを活用し,
# KFTTのテストデータの性能向上を試みよ.
'''
CUDA_VISIBLE_DEVICES=6 nohup fairseq-train \
/work/aomi/100knock2020/chapter10/data/JESC/processed/bin \
--save-dir /work/aomi/100knock2020/chapter10/knock98/models/model_1111/pretraining \
--arch transformer \
--optimizer adam --adam-betas '(0.9, 0.98)' \
--lr 0.0005 --lr-scheduler inverse_sqrt \
--min-lr '1e-09' --warmup-init-lr '1e-07' \
--warmup-updates 4000 \
--dropout 0.3 \
--max-epoch 10 \
--clip-norm 1.0 \
--criterion label_smoothed_cross_entropy --label-smoothing 0.1 \
--max-tokens 1024 \
--seed 1111 > train.log &
'''
'''
CUDA_VISIBLE_DEVICES=6 nohup fairseq-train \
/work/aomi/100knock2020/chapter10/data/KFTT/processed/bin \
--save-dir /work/aomi/100knock2020/chapter10/knock98/models/model_1111/fine_tuning \
--arch transformer \
--optimizer adam --adam-betas '(0.9, 0.98)' \
--lr 0.0005 --lr-scheduler inverse_sqrt \
--min-lr '1e-09' --warmup-init-lr '1e-07' \
--warmup-updates 4000 \
--dropout 0.3 \
--max-epoch 40 \
--clip-norm 1.0 \
--criterion label_smoothed_cross_entropy --label-smoothing 0.1 \
--max-tokens 1024 \
--seed 1111 > train.log &
''' | [
"[email protected]"
] | |
dd755e188ecdc91d717143c7321fa6eaf8bdb91a | 99dcb18a9e3ea367272f740b8cbf3c34285a0c08 | /.sample_configs/param_handlers/cancel_data_labeling_job_sample.py | b11a0a22ff20d8c981aefa487bf8a6a00db46715 | [
"Apache-2.0"
] | permissive | googleapis/python-aiplatform | 926a4873f35dbea15b2fd86c0e16b5e6556d803e | 76b95b92c1d3b87c72d754d8c02b1bca652b9a27 | refs/heads/main | 2023-08-19T23:49:02.180075 | 2023-08-19T13:25:59 | 2023-08-19T13:27:27 | 298,017,988 | 418 | 240 | Apache-2.0 | 2023-09-14T21:08:33 | 2020-09-23T15:43:39 | Python | UTF-8 | Python | false | false | 716 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def make_name(name: str) -> str:
# Sample function parameter name in cancel_data_labeling_job_sample
name = name
return name
| [
"[email protected]"
] | |
bdb70c823e7f2efc492d211d2b17fc90893b5be9 | 40c4ffab36810c3d43825f61a14c61a4ede8b6d2 | /venv/mysite/polls/models.py | d347611bf71a2f8a1fa47626da08b2f7df5d760d | [] | no_license | Andrei705/pythonProject | d2ffa0c1731e4bc42f98a51edd0df5ccf6620260 | ac9c97b24c982edb6615e16a598583cbc828490f | refs/heads/master | 2023-09-01T03:05:25.929920 | 2021-10-19T10:55:53 | 2021-10-19T10:55:53 | 388,744,103 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | from django.db import models
# Create your models here.
class Object_folder(models.Model):
name = models.CharField('Папка', max_length=100)
name_films = models.CharField('Название фильма', max_length=100)
name_cinema = models.CharField('Название кинотеатра', max_length=100)
def __str__(self):
return self.name
class Meta:
verbose_name = 'Заполнение данными'
# class filing_folder(models.Model):
# folder = models.ForeignKey(Object_folder,on_delete=models.PROTECT) | [
"[email protected]"
] | |
80f832455983b37492aabb45c118bac2cd8e5ae4 | e49f2251e07a70c943b70bbae27c439631a31552 | /tfx/components/model_validator/component.py | d6bc178a4182f17e0e083067fa3c9cd3df96c6b5 | [
"Apache-2.0"
] | permissive | hephaex/tfx | eac03c1ab670368088ec2a49af28ff374dc95c4a | 76d8731cb54be3451e10d270d8bcb0589401135f | refs/heads/master | 2020-09-16T11:52:06.198631 | 2019-11-23T21:01:50 | 2019-11-23T21:45:46 | 223,760,941 | 1 | 0 | Apache-2.0 | 2019-11-24T14:53:08 | 2019-11-24T14:53:08 | null | UTF-8 | Python | false | false | 4,026 | py | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX ModelValidator component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Text
from tfx import types
from tfx.components.base import base_component
from tfx.components.base import executor_spec
from tfx.components.model_validator import driver
from tfx.components.model_validator import executor
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import ModelValidatorSpec
class ModelValidator(base_component.BaseComponent):
"""A TFX component to validate a newly trained model against a prior model.
The model validator component can be used to check model metrics threshold
and validate current model against a previously validated model. If there
isn't a prior validated model, model validator will just make sure the
threshold passed. Otherwise, ModelValidator compares a newly trained models
against a known good model, specifically the last model "blessed" by this
component. A model is "blessed" if the exported model's metrics are within
predefined thresholds around the prior model's metrics.
*Note:* This component includes a driver to resolve last blessed model.
## Possible causes why model validation fails
Model validation can fail for many reasons, but these are the most common:
- problems with training data. For example, negative examples are dropped or
features are missing.
- problems with the test or evaluation data. For example, skew exists between
the training and evaluation data.
- changes in data distribution. This indicates the user behavior may have
changed over time.
- problems with the trainer. For example, the trainer was stopped before
model is converged or the model is unstable.
## Example
```
# Performs quality validation of a candidate model (compared to a baseline).
model_validator = ModelValidator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'])
```
"""
SPEC_CLASS = ModelValidatorSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
DRIVER_CLASS = driver.Driver
def __init__(self,
examples: types.Channel,
model: types.Channel,
blessing: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct a ModelValidator component.
Args:
examples: A Channel of 'ExamplesPath' type, usually produced by
[ExampleGen](https://www.tensorflow.org/tfx/guide/examplegen) component.
_required_
model: A Channel of 'ModelExportPath' type, usually produced by
[Trainer](https://www.tensorflow.org/tfx/guide/trainer) component.
_required_
blessing: Output channel of 'ModelBlessingPath' that contains the
validation result.
instance_name: Optional name assigned to this specific instance of
ModelValidator. Required only if multiple ModelValidator components are
declared in the same pipeline.
"""
blessing = blessing or types.Channel(
type=standard_artifacts.ModelBlessing,
artifacts=[standard_artifacts.ModelBlessing()])
spec = ModelValidatorSpec(examples=examples, model=model, blessing=blessing)
super(ModelValidator, self).__init__(spec=spec, instance_name=instance_name)
| [
"[email protected]"
] | |
be22c1e11ed28eafca08cf5bcfe0da6b20b66836 | 7c13de6b7831f99b8790452e03953e5ded0aca64 | /classy_vision/generic/distributed_util.py | ec5211f4123496d80e7c9396eef6df0d0e8b1338 | [
"MIT"
] | permissive | vreis/ClassyVision-2 | 3f99d3c06ec422e81e29b0f38f02a7ce56e480d6 | 80aa4d421d1203b4b92bb9b848ccc866816e4f6d | refs/heads/master | 2021-07-15T18:03:14.212417 | 2019-12-06T16:48:19 | 2019-12-06T16:50:46 | 226,377,934 | 0 | 0 | MIT | 2019-12-06T17:27:46 | 2019-12-06T17:27:45 | null | UTF-8 | Python | false | false | 5,284 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
# Default to GPU 0
_cuda_device_index: int = 0
# Setting _cuda_device_index to -1 internally implies that we should use CPU
_CPU_DEVICE_INDEX = -1
def convert_to_distributed_tensor(tensor):
"""
For some backends, such as NCCL, communication only works if the
tensor is on the GPU. This helper function converts to the correct
device and returns the tensor + original device.
"""
orig_device = "cpu" if not tensor.is_cuda else "gpu"
if (
torch.distributed.is_available()
and torch.distributed.get_backend() == torch.distributed.Backend.NCCL
and not tensor.is_cuda
):
tensor = tensor.cuda()
return (tensor, orig_device)
def convert_to_normal_tensor(tensor, orig_device):
"""
For some backends, such as NCCL, communication only works if the
tensor is on the GPU. This converts the tensor back to original device.
"""
if tensor.is_cuda and orig_device == "cpu":
tensor = tensor.cpu()
return tensor
def is_distributed_training_run():
return (
torch.distributed.is_available()
and torch.distributed.is_initialized()
and (torch.distributed.get_world_size() > 1)
)
def is_master():
"""
Returns True if this is rank 0 of a distributed training job OR if it is
a single trainer job. Otherwise False.
"""
return get_rank() == 0
def all_reduce_mean(tensor):
"""
Wrapper over torch.distributed.all_reduce for performing mean reduction
of tensor over all processes.
"""
if is_distributed_training_run():
tensor, orig_device = convert_to_distributed_tensor(tensor)
torch.distributed.all_reduce(tensor, torch.distributed.ReduceOp.SUM)
tensor = tensor / torch.distributed.get_world_size()
tensor = convert_to_normal_tensor(tensor, orig_device)
return tensor
def all_reduce_sum(tensor):
"""
Wrapper over torch.distributed.all_reduce for performing sum
reduction of tensor over all processes in both distributed /
non-distributed scenarios.
"""
if is_distributed_training_run():
tensor, orig_device = convert_to_distributed_tensor(tensor)
torch.distributed.all_reduce(tensor, torch.distributed.ReduceOp.SUM)
tensor = convert_to_normal_tensor(tensor, orig_device)
return tensor
def gather_tensors_from_all(tensor):
"""
Wrapper over torch.distributed.all_gather for performing
'gather' of 'tensor' over all processes in both distributed /
non-distributed scenarios.
"""
if tensor.ndim == 0:
# 0 dim tensors cannot be gathered. so unsqueeze
tensor = tensor.unsqueeze(0)
if is_distributed_training_run():
tensor, orig_device = convert_to_distributed_tensor(tensor)
gathered_tensors = [
torch.zeros_like(tensor) for _ in range(torch.distributed.get_world_size())
]
torch.distributed.all_gather(gathered_tensors, tensor)
gathered_tensors = [
convert_to_normal_tensor(_tensor, orig_device)
for _tensor in gathered_tensors
]
else:
gathered_tensors = [tensor]
return gathered_tensors
def gather_from_all(tensor):
gathered_tensors = gather_tensors_from_all(tensor)
gathered_tensor = torch.cat(gathered_tensors, 0)
return gathered_tensor
def barrier():
"""
Wrapper over torch.distributed.barrier, returns without waiting
if the distributed process group is not initialized instead of throwing error.
"""
if not torch.distributed.is_available() or not torch.distributed.is_initialized():
return
torch.distributed.barrier()
def get_world_size():
"""
Simple wrapper for correctly getting worldsize in both distributed
/ non-distributed settings
"""
return (
torch.distributed.get_world_size()
if torch.distributed.is_available() and torch.distributed.is_initialized()
else 1
)
def get_rank():
"""
Simple wrapper for correctly getting rank in both distributed
/ non-distributed settings
"""
return (
torch.distributed.get_rank()
if torch.distributed.is_available() and torch.distributed.is_initialized()
else 0
)
def set_cuda_device_index(idx: int):
global _cuda_device_index
_cuda_device_index = idx
torch.cuda.set_device(_cuda_device_index)
def set_cpu_device():
global _cuda_device_index
_cuda_device_index = _CPU_DEVICE_INDEX
def get_cuda_device_index() -> int:
return _cuda_device_index
def init_distributed_data_parallel_model(model):
global _cuda_device_index
if _cuda_device_index == _CPU_DEVICE_INDEX:
# CPU-only model, don't specify device
return torch.nn.parallel.DistributedDataParallel(model, broadcast_buffers=False)
else:
# GPU model
return torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[_cuda_device_index],
output_device=_cuda_device_index,
broadcast_buffers=False,
)
| [
"[email protected]"
] | |
e0b5ee0cd516322f32a722536ad76fe2471c92fa | f682815bff031cf04e958ac39380d73b2e671fe5 | /ml_experiment.py | 01388edb34d76c41b99b015822717c06d053d5dd | [] | no_license | crocodoyle/deep-mri-qc | 8ec6665d48d8f92ea5baf034027dc04e4ac5e3d7 | 35058166ef1460f841728bd97a676d0758d13317 | refs/heads/master | 2020-06-28T18:39:13.509219 | 2018-12-06T22:54:21 | 2018-12-06T22:54:21 | 74,483,623 | 5 | 7 | null | 2018-01-12T01:33:55 | 2016-11-22T15:00:58 | Python | UTF-8 | Python | false | false | 573 | py | import os, pickle
def setup_experiment(workdir):
try:
experiment_number = pickle.load(open(workdir + 'experiment_number.pkl', 'rb'))
experiment_number += 1
except:
print('Couldnt find the file to load experiment number')
experiment_number = 0
print('This is experiment number:', experiment_number)
results_dir = workdir + '/experiment-' + str(experiment_number) + '/'
os.makedirs(results_dir)
pickle.dump(experiment_number, open(workdir + 'experiment_number.pkl', 'wb'))
return results_dir, experiment_number | [
"[email protected]"
] | |
73a032a0612a77d65c7a07200994e83c69f92ce3 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_2751486_0/Python/TheTaintedOne/solution.py | 746f30c4b1aac93a50dee4c7e781e4c5746595eb | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | import sys
lines = sys.stdin.readlines()
ntests = int(lines[0])
vowels = set(["a", "e", "i", "o", "u"])
linenum = 1;
for c in xrange(0, ntests):
name, csize = lines[linenum].split()
csize = int(csize)
# print "[" + name + "]"
# print start_size, num_others
cons = [];
for cc in name:
if cc in vowels:
cons.append(0)
else:
cons.append(1)
# print cons
runs = [];
curr_run = 0;
for pos in xrange(len(name)):
if cons[pos]==1:
curr_run = curr_run + 1
else:
curr_run = 0
if curr_run>= csize:
runs.append((pos, curr_run))
# print runs
res = 0
list_pos = 0
for pos in xrange(len(name)):
if list_pos < len(runs):
if pos>runs[list_pos][0]-csize+1:
list_pos = list_pos+1
if list_pos < len(runs):
res = res + (len(name)-runs[list_pos][0])
# print pos, runs[list_pos]
print "Case #" + str(c+1) + ": ", str(res)
linenum = linenum + 1
| [
"[email protected]"
] | |
5661147fb32392e5da9c640eb4c2970ac23560ef | f5b95b7b520d6986eed909eebba7a13d839b2164 | /function/variable length arguments/demo2.py | 45287652be2149fa1b24093b8f42d125b7be27e1 | [] | no_license | NeenaBenny1995/Luminaronlineclassneenabeny | 0a5e2e80c70aca5d60aee39f3e4872818abf4f20 | 2753b7f293e072b6654bf4b5efab05a6fe98756c | refs/heads/master | 2023-04-30T08:30:10.908393 | 2021-05-20T08:25:54 | 2021-05-20T08:25:54 | 368,791,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | def print_employee(**kwargs):#accept argument as key value pair
print(kwargs)
print_employee(id=100,name="Arjun",salary=10000) | [
"[email protected]"
] | |
3a87d592cde203deedef3e8df7131bca17d15baf | f508d22d4ce8a350c14dfbc783991a1fe59e5547 | /Video/Django_StreamingHttpResponse_and_countdown_in_loop.py | a753a1b1f057470f2356a97858ffb9af31804835 | [] | no_license | zDragon117/SomeCrapCode | e7459e42325e29c051da6d5065ea77cc1db600ac | 812c6fade4e4e63eea5ddfd75e3b629d68c6b0e9 | refs/heads/master | 2021-10-25T00:08:12.040472 | 2021-04-15T16:08:51 | 2021-04-15T16:08:51 | 249,888,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,618 | py | globalIsOver = False
def gen_camera_stream_resp(
camera_source,
fps=10,
scale_width=720,
scale_height=-1,
):
countdown(120)
while True and not over():
try:
frame = camera_source.get_stream_data(
scale_width=scale_width,
scale_height=scale_height,
).tobytes()
yield (
b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n'
)
time.sleep(float(1) / fps)
except Exception:
time.sleep(0.3)
break
def countdown(time):
t = Timer(time, set_over)
t.start()
def set_over():
global globalIsOver
globalIsOver = True
def over():
return globalIsOver
@gzip.gzip_page
@api_view(['GET'])
def live_stream(request, camera_id):
try:
global globalIsOver
globalIsOver = False
fps = int(request.GET.get('fps', 10))
scale_width = int(request.GET.get('scale_width', 720))
scale_height = int(request.GET.get('scale_height', -1))
camera_source = AlarmBus.get_worker(camera_id).get_video_source()
return StreamingHttpResponse(
gen_camera_stream_resp(
camera_source,
fps=fps,
scale_width=scale_width,
scale_height=scale_height,
),
content_type='multipart/x-mixed-replace;boundary=frame',
)
except Exception as exc:
return JsonResponse({
'Error': f'Bad Request: {exc}'
}, status=400) | [
"[email protected]"
] | |
6cfeff895436cedefcd17101ee2bfb6460f6e66d | da4dcd5dc90c8b3bbcc4b250da65017e090aafd5 | /9.27/scrapy/scrapyboss/scrapyboss/pipelines.py | 33d0caf8124ddacaacf491d9dcea3527ae61f6d2 | [] | no_license | yuebanwanwan/9.27 | 2a8100d987597056d1611590256114c67e77e24f | 1ee29ac21371524764c4be43c5dd90a3082e1ac6 | refs/heads/master | 2020-03-29T23:55:00.608808 | 2018-09-26T22:12:43 | 2018-09-26T22:12:43 | 150,495,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,333 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
import pymysql
from scrapy.exceptions import DropItem
class MongoPipeline(object):
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls, crawler):
return cls(
mongo_uri=crawler.settings.get('MONGO_URI'),
mongo_db=crawler.settings.get('MONGO_DB')
)
def open_spider(self, spider):
print('Mongo_open_spider')
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
def process_item(self, item, spider):
name = item.collection
self.db[name].insert(dict(item))
return item
def close_spider(self, spider):
print('Mongo_close_spider')
self.client.close()
class MysqlPipeline():
def __init__(self, host, database, user, password, port):
self.host = host
self.database = database
self.user = user
self.password = password
self.port = port
@classmethod
def from_crawler(cls, crawler):
return cls(
host=crawler.settings.get('MYSQL_HOST'),
database=crawler.settings.get('MYSQL_DATABASE'),
user=crawler.settings.get('MYSQL_USER'),
password=crawler.settings.get('MYSQL_PASSWORD'),
port=crawler.settings.get('MYSQL_PORT'),
)
def open_spider(self, spider):
print('Mysql_open_spider')
self.db = pymysql.connect(self.host, self.user, self.password, self.database, charset='utf8',
port=self.port)
self.cursor = self.db.cursor()
def close_spider(self, spider):
print('Mysql_close_spider')
self.db.close()
def process_item(self, item, spider):
data = dict(item)
keys = ', '.join(data.keys())
values = ', '.join(['%s'] * len(data))
sql = 'insert into %s (%s) values (%s)' % (item.table, keys, values)
self.cursor.execute(sql, tuple(data.values()))
self.db.commit()
return item
| [
"[email protected]"
] | |
7cb7899c5d50aeeb8857f7c68469cd872242a07d | 37183206f6b97a329367df0913f7bd1e9504278b | /services/equity_parser.py | dbe8f05e9ddc3c5e38ad56d0979784f2c80986ff | [] | no_license | imaayush/Equity | d9b31b45d7f57d22006c5cbcb0356884e15250c7 | 5a5851f1d436c874e383c78098f9342632f967a7 | refs/heads/master | 2021-09-02T00:02:11.163788 | 2017-12-29T09:13:55 | 2017-12-29T09:13:55 | 115,530,509 | 0 | 0 | null | 2017-12-29T08:41:49 | 2017-12-27T14:47:18 | Python | UTF-8 | Python | false | false | 3,013 | py | import os
from .constants import FIELDS_NAMES
class EquityParser():
"""Take .csv file path as input and return list of dic"""
def _is_readable_file(self, path):
return os.path.isfile(path) and os.access(path, os.R_OK)
def __init__(self):
self.column_names = FIELDS_NAMES
self.erros = {}
def _covert_line_into_dic(self, line, column_names_with_index):
"""Input is line and cloumn names with its index convert it into dic
Ex:
Input: 500002,ABB LTD.,A ,Q,1389.00,1399.90,1374.15,1391.05,1391.05,1378.05,321,95555,132897086.00,
Output: {"SC_CODE: 500002, "SC_NAME": "ABB LTD.",
"OPEN":1393.45, "HIGH":1433.40, "LOW": 1383.85,
"CLOSE": 1388.15}
"""
company_raw_details = line.split(',')
company_details = {}
for key, value in column_names_with_index.iteritems():
company_details[key] = company_raw_details[value].strip()
return company_details
def _find_company_columns_with_index(self, columns_in_file):
"""find position of cloumn name
Ex:
Input: SC_CODE,SC_NAME,SC_GROUP,SC_TYPE,OPEN,HIGH,LOW,CLOSE,LAST,PREVCLOSE,NO_TRADES,NO_OF_SHRS,NET_TURNOV,TDCLOINDI
Ouput:{"SC_CODE: 0, "SC_NAME": 1,
"OPEN":4, "HIGH":5, "LOW": 6,
"CLOSE": 7}
"""
if not self._check_required_columns(columns_in_file):
raise ValueError('missing, required field')
column_names_with_index = {}
for index_num in range(0, len(columns_in_file)):
if columns_in_file[index_num] in self.column_names:
column_names_with_index[columns_in_file[index_num]] = index_num
return column_names_with_index
def parse(self, path):
"""Take .csv file path as input and return list of dic"""
companies_details = []
if not self._is_readable_file(path):
return ValueError(
'{}: file "{}" does not exist, or is not readable'.format(path,
path)
)
with open(path, "rb") as finput:
self.lines = finput.read().split('\n')
column_names_with_index = self._find_company_columns_with_index(
self.lines[0].split(','))
companies_details = []
for line in self.lines[1:]:
if line:
companies_details.append(self._covert_line_into_dic(
line, column_names_with_index))
return companies_details
def _check_required_columns(self, columns_in_file):
clean_cloumn_names = []
for column_name in columns_in_file:
clean_cloumn_names.append(column_name.strip())
for column_name in self.column_names:
if not (column_name in clean_cloumn_names):
return False
return True
| [
"[email protected]"
] | |
00df633d7c8b33c3a85ec2a26c6b5ab806db56b6 | d2b4a7bcf22f494edd7bc8d91e256f2c95fc3e46 | /0x0A-python-inheritance/1-my_list.py | 586dffe1974e28f7ddbb7e400964310cf48a0d3c | [] | no_license | sidcarrollworks/holbertonschool-higher_level_programming | 72c56d90b24eab9b7fcb1861be17be2019db4041 | 26295d1e3c1e68ad22a7b3fe28fae2e938fadd9a | refs/heads/master | 2022-05-03T23:10:49.527658 | 2018-05-10T19:40:40 | 2018-05-10T19:40:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | #!/usr/bin/python3
'''
Inherit
'''
class MyList(list):
def print_sorted(self):
print(sorted(self))
| [
"[email protected]"
] | |
6eff69d26d7e3eaee11c0cb00bccf058d78dcb99 | 7640f7e8ea742cd8938e62d165929a044036cd31 | /BTransR_train.py | c4897af6963778cae662897ed0a2e671b06ded2a | [] | no_license | Jason101616/BTransX | 94fffe0598ca7f073e28319cbea3ca54a5cc9c39 | ab3e11b693705b3cd74c92404932b0542d988ad9 | refs/heads/master | 2021-06-20T19:49:28.063663 | 2017-08-15T03:51:03 | 2017-08-15T03:51:03 | 100,281,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,347 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2017-03-29 14:06
# @Author : Zhang Du
# @File : BTransR_train.py
import numpy as np
from constants import *
import json
import random
class BTransR:
def __init__(self, step = 0.001):
def load_aligned_triples():
with open(path_aligned_triples, mode='r', encoding='utf8') as file:
# aligned triples: [ZH_h, ZH_t, ZH_r, ZH_h_index, ZH_t_index, ZH_r_index,
# EN_h, EN_t, EN_r, EN_h_index, EN_t_index, EN_r_index]
self.aligned_triples = json.load(file)
def find_unique_zh_en_triples():
def delete_duplicate_triples(list_triples):
if len(list_triples) <= 1:
return list_triples
list_triples.sort(key=lambda x: (x[3], x[4], x[5]))
index = 0
for i in range(1, len(list_triples)):
if list_triples[index] != list_triples[i]:
index = index + 1
list_triples[index] = list_triples[i]
return list_triples[0: index + 1]
# extract Chinese and English triples in aligned triples and delete duplication
for i in self.aligned_triples:
temp_zh = i[0: 6] # ZH part
temp_en = i[6: 12] # EN part
self.Chinese_triples.append(temp_zh)
self.English_triples.append(temp_en)
self.Chinese_triples = delete_duplicate_triples(self.Chinese_triples)
self.English_triples = delete_duplicate_triples(self.English_triples)
def subscript_zh_en_triples():
# {num: [h, t, r, h_num, t_num, r_num], …, }
for i in range(len(self.Chinese_triples)):
self.zh_subscript_triples[i] = self.Chinese_triples[i]
for i in range(len(self.English_triples)):
self.en_subscript_triples[i] = self.English_triples[i]
def zh_en_dict_aligned_triples():
# self.zh_subscript_triples = {Chinese triple: [English triples,…,], … , }
# self.en_subscript_triples = {English triple: [Chinese triples, …,], …, }
for i in self.aligned_triples:
temp_zh = tuple(i[0: 6]) # cannot hash a list
temp_en = i[6: 12]
if self.zh_dict_aligned_triples.get(temp_zh) == None:
self.zh_dict_aligned_triples[temp_zh] = [temp_en]
else:
self.zh_dict_aligned_triples[temp_zh].append(temp_en)
for i in self.aligned_triples:
temp_zh = i[0: 6]
temp_en = tuple(i[6: 12]) # cannot hash a list
if self.en_dict_aligned_triples.get(temp_en) == None:
self.en_dict_aligned_triples[temp_en] = [temp_zh]
else:
self.en_dict_aligned_triples[temp_en].append(temp_zh)
def num2embedding():
for i in range(len(path_vec)):
with open(path_vec[i], mode='r', encoding='utf8') as file:
while True:
line = file.readline()
if line:
vectors = line.split('\t')
vectors = vectors[0: -1]
for j in range(len(vectors)):
vectors[j] = float(vectors[j])
# convert to numpy and transpose to column vector
vectors = np.array(vectors).transpose()
self.num_vector_list[i].append(vectors)
else:
break
# BTransR basic attibutes
self.dimension = 50
self.step = step
self.margin = 1
self.aligned_triples = []
load_aligned_triples()
self.BTransR_length = len(self.aligned_triples)
self.nbatches = 1
self.BTransR_batch = int(self.BTransR_length / self.nbatches)
self.BTransR_train_times = 3000
self.loss = 0
# assistant attributes
# find corrupted triples
self.Chinese_triples = []
self.English_triples = []
self.zh_subscript_triples = {}
self.en_subscript_triples = {}
find_unique_zh_en_triples()
self.length_zh_triples = len(self.Chinese_triples)
self.length_en_triples = len(self.English_triples)
subscript_zh_en_triples()
self.zh_dict_aligned_triples = {}
self.en_dict_aligned_triples = {}
zh_en_dict_aligned_triples()
# transition between num and embeddings
self.num_vector_entities_en = []
self.num_vector_relations_en = []
self.num_vector_entities_zh = []
self.num_vector_relations_zh = []
self.num_vector_list = [self.num_vector_entities_en, self.num_vector_relations_en,
self.num_vector_entities_zh, self.num_vector_relations_zh]
num2embedding()
self.matrix = []
for i in range(len(self.num_vector_relations_en)):
self.matrix.append(np.eye(self.dimension))
self.loss_list = []
self.min_loss = 9999
def norm(self, t3_h, t3_t, t3_r, corrupt_is_zh):
'''
||M*h||2 <= 1 ||M*t||2 <= 1
'''
def norm_specific(entity_embedding):
lambda_step = 1
while True:
x = 0
mul = self.matrix[t3_r].dot(entity_embedding)
for i in mul:
# x += i ** 0.5
x += i * i
x = x ** 0.5
if x > 1:
for ii in range(self.dimension):
tmp = self.matrix[t3_r][ii, 0: self.dimension].dot(entity_embedding)
for jj in range(self.dimension):
self.matrix[t3_r][ii][jj] -= self.step * lambda_step * tmp * entity_embedding[jj]
# entity_embedding[jj] -= self.step * lambda_step * tmp * self.matrix_head[ii][[jj]]
else:
break
if corrupt_is_zh:
pass
else:
norm_specific(self.num_vector_entities_en[t3_h])
norm_specific(self.num_vector_entities_en[t3_t])
def BTransR(self):
for epoch in range(self.BTransR_train_times):
self.loss = 0
for batch in range(self.nbatches):
for k in range(self.BTransR_batch):
i = random.randint(0, self.BTransR_length - 1) # randomize an aligned triples
random_num = random.randint(1, 1000)
if random_num <= 500:
corrupt_is_zh = True
corrupt_tri = self.corrupt_triples_former(self.aligned_triples[i])
# corrupted aligned triples, substitute a random triples
self.train_trans(self.aligned_triples[i][3], self.aligned_triples[i][4], self.aligned_triples[i][5],
self.aligned_triples[i][9], self.aligned_triples[i][10], self.aligned_triples[i][11],
corrupt_tri[0], corrupt_tri[1], corrupt_tri[2],
self.aligned_triples[i][9], self.aligned_triples[i][10], self.aligned_triples[i][11])
else:
corrupt_is_zh = False
corrupt_tri = self.corrupt_triples_latter(self.aligned_triples[i])
self.train_trans(self.aligned_triples[i][3], self.aligned_triples[i][4], self.aligned_triples[i][5],
self.aligned_triples[i][9], self.aligned_triples[i][10], self.aligned_triples[i][11],
self.aligned_triples[i][3], self.aligned_triples[i][4], self.aligned_triples[i][5],
corrupt_tri[0], corrupt_tri[1], corrupt_tri[2])
self.norm(corrupt_tri[0], corrupt_tri[1], corrupt_tri[2], corrupt_is_zh)
print("Step:", self.step, "epoch:", epoch, "loss:", self.loss)
self.loss_list.append(self.loss)
if min(self.min_loss, self.loss) == self.loss:
self.min_loss = self.loss
self.out_BTransR()
print("Current min_loss is", self.min_loss, "In epoch", epoch)
def out_BTransR(self):
for i in range(len(self.num_vector_relations_en)):
self.matrix[i].tofile(path_BTransR_Matrix[i])
def train_trans(self, t1_h, t1_t, t1_r, t2_h, t2_t, t2_r, t3_h, t3_t, t3_r, t4_h, t4_t, t4_r):
sum1 = self.calc_sum(t1_h, t1_t, t1_r, t2_h, t2_t, t2_r)
sum2 = self.calc_sum(t3_h, t3_t, t3_r, t4_h, t4_t, t4_r)
if sum1 + self.margin > sum2:
self.loss += self.margin + sum1 - sum2
self.gradient(t1_h, t1_t, t1_r, t2_h, t2_t, t2_r, -1, 1)
self.gradient(t3_h, t3_t, t3_r, t4_h, t4_t, t4_r, 1, 1)
def calc_sum(self, t1_h, t1_t, t1_r, t2_h, t2_t, t2_r):
t1_h_embedding = self.num_vector_entities_zh[t1_h]
t1_t_embedding = self.num_vector_entities_zh[t1_t]
t1_r_embedding = self.num_vector_relations_zh[t1_r]
t2_h_embedding = self.num_vector_entities_en[t2_h]
t2_t_embedding = self.num_vector_entities_en[t2_t]
t2_r_embedding = self.num_vector_relations_en[t2_r]
head = self.matrix[t2_r].dot(t2_h_embedding) - t1_h_embedding
head_fabs = np.fabs(head)
head_sum = np.sum(head_fabs)
tail = self.matrix[t2_r].dot(t2_t_embedding) - t1_t_embedding
tail_fabs = np.fabs(tail)
tail_sum = np.sum(tail_fabs)
relation = t2_r_embedding - t1_r_embedding
relation_fabs = np.fabs(relation)
relation_sum = np.sum(relation_fabs)
return head_sum + tail_sum + relation_sum
def gradient(self, t1_h, t1_t, t1_r, t2_h, t2_t, t2_r, belta, same):
t1_h_embedding = self.num_vector_entities_zh[t1_h]
t1_t_embedding = self.num_vector_entities_zh[t1_t]
t1_r_embedding = self.num_vector_relations_zh[t1_r]
t2_h_embedding = self.num_vector_entities_en[t2_h]
t2_t_embedding = self.num_vector_entities_en[t2_t]
t2_r_embedding = self.num_vector_relations_en[t2_r]
for ii in range(self.dimension):
x_head = t2_h_embedding[ii] - self.matrix[t2_r][ii, 0: self.dimension].dot(t1_h_embedding)
if x_head > 0:
x_head = belta * self.step
else:
x_head = -belta * self.step
for jj in range(self.dimension):
self.matrix[t2_r][ii][jj] -= x_head * (t1_h_embedding[jj] - t2_h_embedding[jj])
x_tail = t2_t_embedding[ii] - self.matrix[t2_r][ii, 0: self.dimension].dot(t1_t_embedding)
if x_tail > 0:
x_tail = belta * self.step
else:
x_tail = -belta * self.step
for jj in range(self.dimension):
self.matrix[t2_r][ii][jj] -= x_tail * (t1_t_embedding[jj]- t2_t_embedding[jj])
x_relation = t2_r_embedding[ii] - t1_r_embedding[ii]
if x_relation > 0:
x_relation = belta * self.step
else:
x_relation = -belta * self.step
for jj in range(self.dimension):
self.matrix[t2_r][ii][jj] -= x_relation * (t1_r_embedding[jj]- t2_r_embedding[jj])
# randomize a Chinese triple, guarantee this pair of triples is not aligned triples
# otherwise randomize again.
def corrupt_triples_former(self, aligned_triples):
aligned_en_triples = aligned_triples[6: 12]
while True:
rand_zh_subscript = random.randint(0, self.length_zh_triples - 1) # randomize the subscript of a Chinese triple
rand_Chinese_triple = self.zh_subscript_triples[rand_zh_subscript]
aligned_zh_triples = self.en_dict_aligned_triples[tuple(aligned_en_triples)]
if rand_Chinese_triple not in aligned_zh_triples:
return rand_Chinese_triple[3: 6]
# randomize an English triple, guarantee this pair of triples is not aligned triples
# otherwise randomize again.
def corrupt_triples_latter(self, aligned_triples):
aligned_zh_triples = aligned_triples[0: 6]
while True:
# randomize the subscript of a English triple
rand_en_subscript = random.randint(0, self.length_en_triples - 1)
# use subscript to find the original English triple
rand_English_triple = self.en_subscript_triples[rand_en_subscript]
# find English triples according to the original Chinese triple
aligned_en_triples = self.zh_dict_aligned_triples[tuple(aligned_zh_triples)]
# ensure the newly randomized triple is not in the original En and Zh triples set
if rand_English_triple not in aligned_en_triples:
return rand_English_triple[3: 6] # return the subscription of corrupt English triple
if __name__ == '__main__':
timenow("Program begin.")
test = BTransR()
test.BTransR()
timenow("Program end.")
| [
"[email protected]"
] | |
7ba038517977014232bb369d1a1be06c6546ca03 | fcc47f227245e523e5603c7ac2b86ed301e91874 | /chapter02/work04.py | eccc4f7cd6998df9d0ba6c7685efc6f4151b0cde | [] | no_license | programmingkids/python-level3 | f420566c104f2ffe411afbad0a67ae72acfd3584 | 52a0abdf3410060edfcf69f0395b85f754def06b | refs/heads/master | 2022-12-15T02:16:01.102846 | 2020-09-17T08:18:00 | 2020-09-17T08:18:00 | 289,796,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | colors = ["red","blue","green","yellow","pink"]
| [
"[email protected]"
] | |
0a458cffdfd12e0e313827f589640df8bbc8290b | b2b532b74a89510d71e108cf7d20507384d115d4 | /print_index.py | 5ddb8aa6ad516d27e645ef43e5c3f7a38368fbe2 | [] | no_license | jyurick/cmput397Assignment1 | 6b624cc2fb187be4cbae0e52cd3e5a4fbf5801cf | 363985076fd251d7a623a6826f933f325c8c7f2c | refs/heads/master | 2020-04-06T05:47:16.724276 | 2017-02-28T06:09:15 | 2017-02-28T06:09:15 | 82,961,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 965 | py | import sqlite3
import sys
#takes in system argument as the sqlite3 db_Name and connects to it
db_name = sys.argv[1]
conn = sqlite3.connect(db_name)
#selects data required and orders it by term and docid
stmt = """
SELECT TERM, DOCID, POSITIONS
FROM LISTINGS
ORDER BY TERM, DOCID ASC;
"""
curs = conn.execute(stmt)
count = 0
docPositions = dict()
for row in curs:
#in the very first row, print the term and then set old_term to that term
if count == 0:
old_term = row[0]
print(old_term + "\t", end = '')
term = row[0]
docid = row[1]
positions = row[2]
#once a new term is found, print a newline, the new term and the
#first doc/positions for the term
if term != old_term:
print("\n"+term + "\t" + docid + ":"+ str(positions) + ";", end = '')
#if the term is the same as the old_term, print the doc/positions along the same line
else:
print(docid + ":"+ str(positions) + ";", end = '')
old_term = term
count += 1
conn.close() | [
"[email protected]"
] | |
3b731d40bb96886cefc781104bd86c32f03bdaea | 67c2b8719515a00714ae07e6be1257d5986b9528 | /pyQT/笔记/列表.py | 697b5217758465f5b48de7193aa605a2a8f69167 | [] | no_license | zhangxf2020/jeee | b980cfda0e2b79b1e736ff5b4fea38fdfea9a89d | 922a9ec42591397bf1a7f94cd84f63d459f2dbae | refs/heads/master | 2021-02-19T10:04:53.804492 | 2020-03-16T01:25:40 | 2020-03-16T01:25:40 | 245,301,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29 | py | a = '32.123'
print('.' in a) | [
"[email protected]"
] | |
d95e8925cc8de16faad7f38369d751625be57b1b | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /pkgs/networkx-1.11-py27_0/lib/python2.7/site-packages/networkx/algorithms/centrality/betweenness.py | b8c2b125f647b1c691d87810ce7eb454c7f0ab78 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 11,680 | py | # coding=utf8
"""
Betweenness centrality measures.
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
from heapq import heappush, heappop
from itertools import count
import networkx as nx
import random
__author__ = """Aric Hagberg ([email protected])"""
__all__ = ['betweenness_centrality',
'edge_betweenness_centrality',
'edge_betweenness']
def betweenness_centrality(G, k=None, normalized=True, weight=None,
endpoints=False,
seed=None):
r"""Compute the shortest-path betweenness centrality for nodes.
Betweenness centrality of a node `v` is the sum of the
fraction of all-pairs shortest paths that pass through `v`
.. math::
c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)}
where `V` is the set of nodes, `\sigma(s, t)` is the number of
shortest `(s, t)`-paths, and `\sigma(s, t|v)` is the number of those
paths passing through some node `v` other than `s, t`.
If `s = t`, `\sigma(s, t) = 1`, and if `v \in {s, t}`,
`\sigma(s, t|v) = 0` [2]_.
Parameters
----------
G : graph
A NetworkX graph
k : int, optional (default=None)
If k is not None use k node samples to estimate betweenness.
The value of k <= n where n is the number of nodes in the graph.
Higher values give better approximation.
normalized : bool, optional
If True the betweenness values are normalized by `2/((n-1)(n-2))`
for graphs, and `1/((n-1)(n-2))` for directed graphs where `n`
is the number of nodes in G.
weight : None or string, optional
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
endpoints : bool, optional
If True include the endpoints in the shortest path counts.
Returns
-------
nodes : dictionary
Dictionary of nodes with betweenness centrality as the value.
See Also
--------
edge_betweenness_centrality
load_centrality
Notes
-----
The algorithm is from Ulrik Brandes [1]_.
See [4]_ for the original first published version and [2]_ for details on
algorithms for variations and related metrics.
For approximate betweenness calculations set k=#samples to use
k nodes ("pivots") to estimate the betweenness values. For an estimate
of the number of pivots needed see [3]_.
For weighted graphs the edge weights must be greater than zero.
Zero edge weights can produce an infinite number of equal length
paths between pairs of nodes.
References
----------
.. [1] Ulrik Brandes:
A Faster Algorithm for Betweenness Centrality.
Journal of Mathematical Sociology 25(2):163-177, 2001.
http://www.inf.uni-konstanz.de/algo/publications/b-fabc-01.pdf
.. [2] Ulrik Brandes:
On Variants of Shortest-Path Betweenness
Centrality and their Generic Computation.
Social Networks 30(2):136-145, 2008.
http://www.inf.uni-konstanz.de/algo/publications/b-vspbc-08.pdf
.. [3] Ulrik Brandes and Christian Pich:
Centrality Estimation in Large Networks.
International Journal of Bifurcation and Chaos 17(7):2303-2318, 2007.
http://www.inf.uni-konstanz.de/algo/publications/bp-celn-06.pdf
.. [4] Linton C. Freeman:
A set of measures of centrality based on betweenness.
Sociometry 40: 35–41, 1977
http://moreno.ss.uci.edu/23.pdf
"""
betweenness = dict.fromkeys(G, 0.0) # b[v]=0 for v in G
if k is None:
nodes = G
else:
random.seed(seed)
nodes = random.sample(G.nodes(), k)
for s in nodes:
# single source shortest paths
if weight is None: # use BFS
S, P, sigma = _single_source_shortest_path_basic(G, s)
else: # use Dijkstra's algorithm
S, P, sigma = _single_source_dijkstra_path_basic(G, s, weight)
# accumulation
if endpoints:
betweenness = _accumulate_endpoints(betweenness, S, P, sigma, s)
else:
betweenness = _accumulate_basic(betweenness, S, P, sigma, s)
# rescaling
betweenness = _rescale(betweenness, len(G),
normalized=normalized,
directed=G.is_directed(),
k=k)
return betweenness
def edge_betweenness_centrality(G, k=None, normalized=True, weight=None,
seed=None):
r"""Compute betweenness centrality for edges.
Betweenness centrality of an edge `e` is the sum of the
fraction of all-pairs shortest paths that pass through `e`
.. math::
c_B(e) =\sum_{s,t \in V} \frac{\sigma(s, t|e)}{\sigma(s, t)}
where `V` is the set of nodes,`\sigma(s, t)` is the number of
shortest `(s, t)`-paths, and `\sigma(s, t|e)` is the number of
those paths passing through edge `e` [2]_.
Parameters
----------
G : graph
A NetworkX graph
k : int, optional (default=None)
If k is not None use k node samples to estimate betweenness.
The value of k <= n where n is the number of nodes in the graph.
Higher values give better approximation.
normalized : bool, optional
If True the betweenness values are normalized by `2/(n(n-1))`
for graphs, and `1/(n(n-1))` for directed graphs where `n`
is the number of nodes in G.
weight : None or string, optional
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
Returns
-------
edges : dictionary
Dictionary of edges with betweenness centrality as the value.
See Also
--------
betweenness_centrality
edge_load
Notes
-----
The algorithm is from Ulrik Brandes [1]_.
For weighted graphs the edge weights must be greater than zero.
Zero edge weights can produce an infinite number of equal length
paths between pairs of nodes.
References
----------
.. [1] A Faster Algorithm for Betweenness Centrality. Ulrik Brandes,
Journal of Mathematical Sociology 25(2):163-177, 2001.
http://www.inf.uni-konstanz.de/algo/publications/b-fabc-01.pdf
.. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness
Centrality and their Generic Computation.
Social Networks 30(2):136-145, 2008.
http://www.inf.uni-konstanz.de/algo/publications/b-vspbc-08.pdf
"""
betweenness = dict.fromkeys(G, 0.0) # b[v]=0 for v in G
# b[e]=0 for e in G.edges()
betweenness.update(dict.fromkeys(G.edges(), 0.0))
if k is None:
nodes = G
else:
random.seed(seed)
nodes = random.sample(G.nodes(), k)
for s in nodes:
# single source shortest paths
if weight is None: # use BFS
S, P, sigma = _single_source_shortest_path_basic(G, s)
else: # use Dijkstra's algorithm
S, P, sigma = _single_source_dijkstra_path_basic(G, s, weight)
# accumulation
betweenness = _accumulate_edges(betweenness, S, P, sigma, s)
# rescaling
for n in G: # remove nodes to only return edges
del betweenness[n]
betweenness = _rescale_e(betweenness, len(G),
normalized=normalized,
directed=G.is_directed())
return betweenness
# obsolete name
def edge_betweenness(G, k=None, normalized=True, weight=None, seed=None):
return edge_betweenness_centrality(G, k, normalized, weight, seed)
# helpers for betweenness centrality
def _single_source_shortest_path_basic(G, s):
S = []
P = {}
for v in G:
P[v] = []
sigma = dict.fromkeys(G, 0.0) # sigma[v]=0 for v in G
D = {}
sigma[s] = 1.0
D[s] = 0
Q = [s]
while Q: # use BFS to find shortest paths
v = Q.pop(0)
S.append(v)
Dv = D[v]
sigmav = sigma[v]
for w in G[v]:
if w not in D:
Q.append(w)
D[w] = Dv + 1
if D[w] == Dv + 1: # this is a shortest path, count paths
sigma[w] += sigmav
P[w].append(v) # predecessors
return S, P, sigma
def _single_source_dijkstra_path_basic(G, s, weight='weight'):
# modified from Eppstein
S = []
P = {}
for v in G:
P[v] = []
sigma = dict.fromkeys(G, 0.0) # sigma[v]=0 for v in G
D = {}
sigma[s] = 1.0
push = heappush
pop = heappop
seen = {s: 0}
c = count()
Q = [] # use Q as heap with (distance,node id) tuples
push(Q, (0, next(c), s, s))
while Q:
(dist, _, pred, v) = pop(Q)
if v in D:
continue # already searched this node.
sigma[v] += sigma[pred] # count paths
S.append(v)
D[v] = dist
for w, edgedata in G[v].items():
vw_dist = dist + edgedata.get(weight, 1)
if w not in D and (w not in seen or vw_dist < seen[w]):
seen[w] = vw_dist
push(Q, (vw_dist, next(c), v, w))
sigma[w] = 0.0
P[w] = [v]
elif vw_dist == seen[w]: # handle equal paths
sigma[w] += sigma[v]
P[w].append(v)
return S, P, sigma
def _accumulate_basic(betweenness, S, P, sigma, s):
delta = dict.fromkeys(S, 0)
while S:
w = S.pop()
coeff = (1.0 + delta[w]) / sigma[w]
for v in P[w]:
delta[v] += sigma[v] * coeff
if w != s:
betweenness[w] += delta[w]
return betweenness
def _accumulate_endpoints(betweenness, S, P, sigma, s):
betweenness[s] += len(S) - 1
delta = dict.fromkeys(S, 0)
while S:
w = S.pop()
coeff = (1.0 + delta[w]) / sigma[w]
for v in P[w]:
delta[v] += sigma[v] * coeff
if w != s:
betweenness[w] += delta[w] + 1
return betweenness
def _accumulate_edges(betweenness, S, P, sigma, s):
delta = dict.fromkeys(S, 0)
while S:
w = S.pop()
coeff = (1.0 + delta[w]) / sigma[w]
for v in P[w]:
c = sigma[v] * coeff
if (v, w) not in betweenness:
betweenness[(w, v)] += c
else:
betweenness[(v, w)] += c
delta[v] += c
if w != s:
betweenness[w] += delta[w]
return betweenness
def _rescale(betweenness, n, normalized, directed=False, k=None):
if normalized is True:
if n <= 2:
scale = None # no normalization b=0 for all nodes
else:
scale = 1.0 / ((n - 1) * (n - 2))
else: # rescale by 2 for undirected graphs
if not directed:
scale = 1.0 / 2.0
else:
scale = None
if scale is not None:
if k is not None:
scale = scale * n / k
for v in betweenness:
betweenness[v] *= scale
return betweenness
def _rescale_e(betweenness, n, normalized, directed=False, k=None):
if normalized is True:
if n <= 1:
scale = None # no normalization b=0 for all nodes
else:
scale = 1.0 / (n * (n - 1))
else: # rescale by 2 for undirected graphs
if not directed:
scale = 1.0 / 2.0
else:
scale = None
if scale is not None:
if k is not None:
scale = scale * n / k
for v in betweenness:
betweenness[v] *= scale
return betweenness
| [
"[email protected]"
] | |
72739765e377bc2c3ededd2471ef4acd179e7817 | f0f80303b43aa2b248eb8bd8b97c0fa42e8dd7c2 | /src/data_visualization/display_pairwise.py | 22c49dc6d722b1fdd7ff44550dde17a85c52b273 | [
"MIT"
] | permissive | CrivelliLab/Deep-Protein-Scoring | 7377bb02bbd72ccddf4faaa25a5d90310097f767 | 059791a54133c2d03eb14cf98fba1c6ef27c8f49 | refs/heads/master | 2020-05-04T23:55:23.606379 | 2019-03-23T19:17:28 | 2019-03-23T19:17:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | '''
display_pairwise.py
Updated: 3/20/18
'''
import os
import h5py as hp
import numpy as np
import matplotlib.pyplot as plt
# Data parameters
data_folder = '../../data/T0882/'
################################################################################
if __name__ == '__main__':
# Set paths relative to this file
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# Load pairwise data
f = hp.File(data_folder+'pairwise_data.hdf5', "r")
data_set = f['dataset']
x = np.array(data_set[list(data_set.keys())[0]])
# Display histogram
for i in range(x.shape[2]):
plt.imshow(x[:,:,i], cmap='Blues')
plt.show()
| [
"[email protected]"
] | |
168173786349698c8ae6e3a87590ff088b046c10 | 13119e54911404e9be9fe6a9b7732f23a16f5e1f | /lists/tests/test_models.py | d70ca968d7fd6ac126e80610e5b95ae2db78d6ef | [
"MIT"
] | permissive | PatrickLeonard/superlists | d290f24c96f5faa3e06f7934ca301ac787f2b4c0 | c99cdb5ed32009a878016da9e3bb2659a267f851 | refs/heads/master | 2021-01-11T02:29:49.721734 | 2016-11-14T02:01:54 | 2016-11-14T02:01:54 | 70,960,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,197 | py | from django.contrib.auth import get_user_model
User = get_user_model()
from django.core.exceptions import ValidationError
from django.test import TestCase
from lists.models import Item, List
class ItemModelTest(TestCase):
def test_default_test(self):
item = Item()
self.assertEqual(item.text, '')
def test_item_is_related_to_list(self):
list_ = List.objects.create()
item = Item()
item.list = list_
item.save()
self.assertIn(item, list_.item_set.all())
def test_list_ordering(self):
list1 = List.objects.create()
item1 = Item.objects.create(list=list1, text='i1')
item2 = Item.objects.create(list=list1, text='item 2')
item3 = Item.objects.create(list=list1, text='3')
self.assertEqual(
list(Item.objects.all()),
[item1, item2, item3]
)
def test_string_representation(self):
item = Item(text='some text')
self.assertEqual(str(item), 'some text')
def test_cannot_save_empty_list_items(self):
list_ = List.objects.create()
item = Item(list=list_, text='')
with self.assertRaises(ValidationError):
item.save()
item.full_clean()
def test_duplicate_items_are_invalid(self):
list_ = List.objects.create()
Item.objects.create(list=list_, text='bla')
with self.assertRaises(ValidationError):
item = Item(list=list_, text='bla')
item.full_clean()
def test_CAN_save_same_item_to_different_lists(self):
list1 = List.objects.create()
list2 = List.objects.create()
Item.objects.create(list=list1,text='bla')
item = Item(list=list2, text='bla')
item.full_clean() # should not raise
class ListModelTest(TestCase):
def test_get_absolute_url(self):
list_ = List.objects.create()
self.assertEqual(list_.get_absolute_url(), '/lists/%d/' % (list_.id,))
def test_create_new_creates_list_and_first_item(self):
List.create_new(first_item_text='new item text')
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'new item text')
new_list = List.objects.first()
self.assertEqual(new_item.list, new_list)
def test_create_new_optionally_saves_owner(self):
user = User.objects.create()
List.create_new(first_item_text='new item text', owner=user)
new_list = List.objects.first()
self.assertEqual(new_list.owner, user)
def test_lists_can_have_owners(self):
List(owner=User()) # should not raise
def test_list_owner_is_optional(self):
List().full_clean() # should not raise
def test_list_name_is_first_item_text(self):
list_ = List.objects.create()
Item.objects.create(list=list_, text='first item')
Item.objects.create(list=list_, text='second item')
self.assertEqual(list_.name, 'first item')
def test_create_return_new_list_object(self):
returned = List.create_new(first_item_text='new item text')
new_list = List.objects.first()
self.assertEqual(returned, new_list)
| [
"[email protected]"
] | |
83497eb9655ea0c266abbffacf149e61e78c537c | f521f594f6c9189433b4fcab5155d1194fcbeac2 | /parse/log_decorator.py | 83d1568cc5671d5fc685d6ac4f49e3d36e860ef2 | [] | no_license | easyinplay/tor_spider_scrapy | cd1a8241ff1e744a10d812998ea49ce38a6c310e | 736285d1bd0a82b4e1db2857a232a1623993482a | refs/heads/master | 2023-02-02T11:03:45.312559 | 2020-12-22T08:57:43 | 2020-12-22T08:57:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,334 | py | import os
import logging
import functools
from datetime import datetime
def _logger():
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
sh = logging.StreamHandler()
today = datetime.now()
path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # 上一级目录
# fh = logging.FileHandler(path + "{}log{}es-{}-{}-{}.log".format(os.sep, os.sep, today.year, today.month, today.day), encoding='utf-8')
fmt = "[%(asctime)s][%(filename)s:%(lineno)d][%(levelname)s] - %(message)s"
formatter = logging.Formatter(fmt)
sh.setFormatter(formatter)
# fh.setFormatter(formatter)
logger.addHandler(sh)
# logger.addHandler(fh)
return logger
def exception_logger(logger):
"""
A decorator that wraps the passed in function and logs
exceptions should one occur
@param logger: The logging object
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
# log the exception
err = "There was an exception in "
err += func.__name__
logger.exception(err)
# re-raise the exception
raise
return wrapper
return decorator
| [
"[email protected]"
] | |
14bfb45539a15719c26658a0696082529fc4bc0f | 65465c6a8ffde330dea87c347b5a6f8345f0bdca | /p3/urls.py | 2cdfbf5928658f3f5bc0f17c9e063acab2b297b6 | [] | no_license | Saranyaram890/p3 | 27a0fa81ea16772e73b8a61fb306893c63977ba5 | 48fad442244da2d97f2444806540bc0a133e3e45 | refs/heads/master | 2022-11-22T14:51:58.800929 | 2020-07-23T02:04:37 | 2020-07-23T02:04:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,189 | py | """p3 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from p3 import views
urlpatterns = [
path('admin/', admin.site.urls),
path('index/',views.index,name="index"),
path('',views.home,name="home"),
path('second/',views.second,name="second"),
path('third/',views.third,name='third'),
path('fourth/',views.fourth,name="fourth"),
path('fifth/',views.fifth,name="fifth"),
path("url_data/<name>",views.urls_data,name="urls_data"),
path("ab/<ab>",views.ab,name="ab"),
path('vowels/<str>', views.vowels, name="vowels"),
]
| [
"[email protected]"
] | |
c8d58bad12f2d00dbaaa0a198b391fe827e89ccc | 79a60fa1daeaa9dbe0cb551423fe28c2f2cf7da3 | /websocket/bottle/mqttwsweb.py | a0e1c343be062aba3efa12502005552a8a3d9e11 | [
"MIT"
] | permissive | swkim01/mqtt | a505494815cd5f487cbc1e434fd0546c1bc08eac | 030d9106bf791b54538aac8789df872abaa96e17 | refs/heads/master | 2021-01-10T22:07:04.788958 | 2019-07-19T01:41:07 | 2019-07-19T01:41:07 | 42,844,241 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | #-*- coding: utf-8 -*-
from bottle import route, get, request, template, response, static_file
from bottle import run
#import json
host="<HOST IP>"
port=8008
wsport=9001
@route('/mqttws31.js')
def mqttws31():
return static_file("mqttws31.js", root=".")
@get('/mqttwschart')
def dht22chart():
return template("mqttwschart", host=host, port=wsport)
@get('/')
def index():
return template("mqttwsindex", host=host, port=wsport)
if __name__ == '__main__':
run(host=host, port=port)
| [
"[email protected]"
] | |
e0008060475c57100422a4370823f602a7d1dbaf | de4f05070b7194b00facd823ce369e2733034ea1 | /EC2/GetEC2Instance.py | 065371954d97b7998fa4965e5e5df48296e69405 | [] | no_license | JeongWonjae/AWS_CLI_Script | 2679e6072c210665f0c7c65705f73c67a6c591b6 | 91c5c122799a0b930f9eb3652ae1570ef1abbcec | refs/heads/master | 2023-06-01T22:27:32.824199 | 2021-06-24T05:46:59 | 2021-06-24T05:46:59 | 345,669,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,707 | py | import boto3
import json
session=boto3.session.Session(profile_name='DemoUser', region_name='us-east-1')
client=session.client('ec2')
instances=[]
# for minimize call API set 'MaxResults=1000'
response=client.describe_instances(MaxResults=1000)
# response = {'Reservations': [{'Groups': [], 'Instances': [{'AmiLaunchIndex': 0, 'ImageId': 'ami-0533f2ba8a1995cf9', 'InstanceId': 'i-09bbe296b89xxxxx', 'InstanceType': 't2.micro', 'KeyName': 'EC2DemoKey', 'LaunchTime': datetime.datetime(2021, 3, 23, 14, 28, 8, tzinfo=tzutc()), 'Monitoring': {'State': 'disabled'}, 'Placement': {'AvailabilityZone': 'us-east-1a', 'GroupName': '', 'Tenancy': 'default'}, 'PrivateDnsName': 'ip-172-31-1-111.ec2.internal', 'PrivateIpAddress': '172.31.1.111', 'ProductCodes': [], 'PublicDnsName': '', 'State': {'Code': 16, 'Name': 'running'}, 'StateTransitionReason': '', 'SubnetId': 'subnet-0245c04d58e1998d3', 'VpcId': 'vpc-4f65d132', 'Architecture': 'x86_64', 'BlockDeviceMappings': [{'DeviceName': '/dev/xvda', 'Ebs': {'AttachTime': datetime.datetime(2021, 3, 23, 14, 28, 9, tzinfo=tzutc()), 'DeleteOnTermination': True, 'Status': 'attached', 'VolumeId': 'vol-0d4dcaf9ea658bf8e'}}], 'ClientToken': '', 'EbsOptimized': False, 'EnaSupport': True, 'Hypervisor': 'xen', 'NetworkInterfaces': [{'Attachment': {'AttachTime': datetime.datetime(2021, 3, 23, 14, 28, 8, tzinfo=tzutc()), 'AttachmentId': 'eni-attach-04ef9e8fe41f0b730', 'DeleteOnTermination': True, 'DeviceIndex': 0, 'Status': 'attached', 'NetworkCardIndex': 0}, 'Description': 'Primary network interface', 'Groups': [{'GroupName': 'launch-wizard-1', 'GroupId': 'sg-028e98f8644954d4f'}], 'Ipv6Addresses': [], 'MacAddress': '12:f7:a1:50:f6:3f', 'NetworkInterfaceId': 'eni-0989b13d333faecaa', 'OwnerId': '416168070872', 'PrivateDnsName': 'ip-172-31-1-111.ec2.internal', 'PrivateIpAddress': '172.31.1.111', 'PrivateIpAddresses': [{'Primary': True, 'PrivateDnsName': 'ip-172-31-1-111.ec2.internal', 'PrivateIpAddress': '172.31.1.111'}], 'SourceDestCheck': True, 'Status': 'in-use', 'SubnetId': 'subnet-0245c04d58e1998d3', 'VpcId': 'vpc-4f65d132', 'InterfaceType': 'interface'}], 'RootDeviceName': '/dev/xvda', 'RootDeviceType': 'ebs', 'SecurityGroups': [{'GroupName': 'launch-wizard-1', 'GroupId': 'sg-028e98f8644954d4f'}], 'SourceDestCheck': True, 'VirtualizationType': 'hvm', 'CpuOptions': {'CoreCount': 1, 'ThreadsPerCore': 1}, 'CapacityReservationSpecification': {'CapacityReservationPreference': 'open'}, 'HibernationOptions': {'Configured': False}, 'MetadataOptions': {'State': 'applied', 'HttpTokens': 'optional', 'HttpPutResponseHopLimit': 1, 'HttpEndpoint': 'enabled'}, 'EnclaveOptions': {'Enabled': False}}], 'OwnerId': '416168070872', 'ReservationId': 'r-057f164d12660c27d'}], 'ResponseMetadata': {'RequestId': 'f136cfd9-fe0b-4b87-b0de-b9d755045070', 'HTTPStatusCode': 200, 'HTTPHeaders': {'x-amzn-requestid': 'f136cfd9-fe0b-4b87-b0de-b9d755045070', 'cache-control': 'no-cache, no-store', 'strict-transport-security': 'max-age=31536000; includeSubDomains', 'content-type': 'text/xml;charset=UTF-8', 'content-length': '6267', 'vary': 'accept-encoding', 'date': 'Tue, 23 Mar 2021 14:31:47 GMT', 'server': 'AmazonEC2'}, 'RetryAttempts': 0}}
# get information of instance up to 1000
for rev in response['Reservations']:
if rev.get('Instances'):
instances.extend(rev['Instances'])
# if ec2 instances are over 1000
while response.get('NextToken'):
response=client.describe_instances(MaxResults=1000, NextToken=response['NextToken'])
for rev in response['Reservations']:
if rev.get('Instances'):
instances.extend(rev['Instances'])
with open('./ec2-instances.json', 'w+') as f:
json.dump(instances, f, indent=4, default=str) | [
"[email protected]"
] | |
64822061c6bd698ceeda74ad46900f76caebdc88 | bfc5b2f0743c6e9adb877fa891d874529760b007 | /deb-update.py | 18034002a5b26f694c39deac88fe1fe16ca05cfc | [] | no_license | MrNathanSmith/arbitrary-python | 930603b760bf46250a71e5729517e90f8bff3f5e | 80e5dd256bddae750a1ab941dedbef8319870ed1 | refs/heads/master | 2021-01-21T23:24:02.032325 | 2017-06-23T18:10:11 | 2017-06-23T18:10:11 | 95,242,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | #!/usr/bin/python
import os
import sys, traceback
import time
print ""
print "A Simple Update/Upgrade Python Script For Debian"
print ""
#Options
print "~~~~~~~~~~~~~~~~~~~~~~~"
print "Update/Upgrade Debian"
print "~~~~~~~~~~~~~~~~~~~~~~~"
print "(1) System Update"
print "(2) System Upgrade"
print "(3) Distribution Upgrade"
print "(4) Exit"
print ""
choice = raw_input ("Please Select an Option: ")
if choice == "1":
print "System Update Selected, Please Wait..."
time.sleep(5)
cmd1 = os.system ("sudo apt-get update")
print ""
print "System Update Complete"
print ""
elif choice == "2":
print "System Upgrade Selected, Please Wait..."
time.sleep(5)
cmd1 = os.system ("sudo apt-get upgrade -y")
print ""
print "System Upgrade Complete"
print ""
elif choice == "3":
print "Distribution Upgrade Selected, Please Wait..."
time.sleep(5)
cmd1 = os.system ("sudo apt-get dist-upgrade -y")
print ""
print "Distribution Upgrade Complete"
print ""
elif choice == "4":
print ""
print "Goodbye, Have a nice day :)"
print ""
sys.exit(0)
| [
"[email protected]"
] | |
050414edb4e192e13b8368dffaf21774c78ca1cb | 163554ecfd9c3b059171e42166cf2b4c550e0ea7 | /dataclassificationapp/wsgi.py | 406ce971ba6d58416e1052f9bf20c706db98d671 | [] | no_license | danielRos01/DataCassifierBackend | 9ca2e18aa08a75538373f14570d2589c3bf26038 | 6fba2f5c5ca0f4c2e248e65223f33d17606d0e63 | refs/heads/main | 2023-01-19T02:38:12.629536 | 2020-11-22T20:22:32 | 2020-11-22T20:22:32 | 315,088,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | """
WSGI config for dataclassificationapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dataclassificationapp.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
80bfdaf5259b84ac700aab294d7db8d5372259c3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02546/s914300427.py | 3dad5d294d902104d5bf9e91b64a4bd3c9b6633e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | S = input()
if(S[-1] == "s"):
S += "es"
else:
S += "s"
print(S) | [
"[email protected]"
] | |
b34e8eb425d1099c4b4358e74814477818dfc003 | 88ae8695987ada722184307301e221e1ba3cc2fa | /third_party/vulkan-deps/vulkan-validation-layers/src/scripts/common_ci.py | 88e5f1af7cda75fbf03793a27e6bf5f32ce53c52 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later",
"BSD-3-Clause"
] | permissive | iridium-browser/iridium-browser | 71d9c5ff76e014e6900b825f67389ab0ccd01329 | 5ee297f53dc7f8e70183031cff62f37b0f19d25f | refs/heads/master | 2023-08-03T16:44:16.844552 | 2023-07-20T15:17:00 | 2023-07-23T16:09:30 | 220,016,632 | 341 | 40 | BSD-3-Clause | 2021-08-13T13:54:45 | 2019-11-06T14:32:31 | null | UTF-8 | Python | false | false | 10,787 | py | #!/usr/bin/python3 -i
#
# Copyright (c) 2015-2017, 2019-2023 The Khronos Group Inc.
# Copyright (c) 2015-2017, 2019-2023 Valve Corporation
# Copyright (c) 2015-2017, 2019-2023 LunarG, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import subprocess
import platform
import shutil
import argparse
if sys.version_info[0] != 3:
print("This script requires Python 3. Run script with [-h] option for more details.")
sys_exit(0)
# Use Ninja for all platforms for performance/simplicity
os.environ['CMAKE_GENERATOR'] = "Ninja"
# Utility for creating a directory if it does not exist. Behaves similarly to 'mkdir -p'
def make_dirs(path, clean=False):
if clean and os.path.isdir(path):
shutil.rmtree(path)
os.makedirs(path, exist_ok=True)
# helper to define paths relative to the repo root
def RepoRelative(path):
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..', path))
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.split(os.path.abspath(__file__))[0], '..'))
# TODO: Pass this in as arg, may be useful for running locally
EXTERNAL_DIR_NAME = "external"
BUILD_DIR_NAME = "build"
VVL_BUILD_DIR = RepoRelative(BUILD_DIR_NAME)
TEST_INSTALL_DIR = RepoRelative("build/install")
def externalDir(config): return os.path.join(RepoRelative(EXTERNAL_DIR_NAME), config)
# Runs a command in a directory and returns its return code.
# Directory is project root by default, or a relative path from project root
def RunShellCmd(command, start_dir = PROJECT_ROOT, env=None, verbose=False):
if start_dir != PROJECT_ROOT:
start_dir = RepoRelative(start_dir)
cmd_list = command.split(" ")
if verbose or ('VVL_CI_VERBOSE' in os.environ and os.environ['VVL_CI_VERBOSE'] != '0'):
print(f'CICMD({cmd_list}, env={env})')
subprocess.check_call(cmd_list, cwd=start_dir, env=env)
#
# Check if the system is Windows
def IsWindows(): return 'windows' == platform.system().lower()
#
# Set MACOSX_DEPLOYMENT_TARGET
def SetupDarwin(osx):
if platform.system() != "Darwin":
return
# By default it will use the latest MacOS SDK available on the system.
if osx == 'latest':
return
# Currently the Vulkan SDK targets 10.15 as the minimum for MacOS support.
# If we need to we can raise the minimim like we did for C++17 support.
os.environ['MACOSX_DEPLOYMENT_TARGET'] = "10.15"
print(f"Targeting {os.environ['MACOSX_DEPLOYMENT_TARGET']} MacOS Deployment Target", flush=True)
#
# Run VVL scripts
def CheckVVL(config):
ext_dir = externalDir(config)
vulkan_registry = ext_dir + "/Vulkan-Headers/registry"
spirv_unified = ext_dir + "/SPIRV-Headers/include/spirv/unified1/"
# Verify consistency of generated source code
print("Check Generated Source Code Consistency")
gen_check_cmd = f'python scripts/generate_source.py --verify {vulkan_registry} {spirv_unified}'
RunShellCmd(gen_check_cmd)
print('Run vk_validation_stats.py')
valid_usage_json = vulkan_registry + "/validusage.json"
text_file = RepoRelative(f'{VVL_BUILD_DIR}/layers/vuid_coverage_database.txt')
gen_check_cmd = f'python scripts/vk_validation_stats.py {valid_usage_json} -text {text_file}'
RunShellCmd(gen_check_cmd)
#
# Prepare the Validation Layers for testing
def BuildVVL(config, cmake_args, build_tests):
print("Log CMake version")
cmake_ver_cmd = 'cmake --version'
RunShellCmd(cmake_ver_cmd)
print("Run CMake for Validation Layers")
cmake_cmd = f'cmake -S . -B {VVL_BUILD_DIR} -DUPDATE_DEPS=ON -DCMAKE_BUILD_TYPE={config}'
# By default BUILD_WERROR is OFF, CI should always enable it.
cmake_cmd += ' -DBUILD_WERROR=ON'
cmake_cmd += f' -DBUILD_TESTS={build_tests}'
if cmake_args:
cmake_cmd += f' {cmake_args}'
RunShellCmd(cmake_cmd)
print("Build Validation Layers and Tests")
build_cmd = f'cmake --build {VVL_BUILD_DIR}'
RunShellCmd(build_cmd)
print("Install Validation Layers")
install_cmd = f'cmake --install {VVL_BUILD_DIR} --prefix {TEST_INSTALL_DIR}'
RunShellCmd(install_cmd)
#
# Prepare Loader for executing Layer Validation Tests
def BuildLoader():
LOADER_DIR = RepoRelative(os.path.join("%s/Vulkan-Loader" % EXTERNAL_DIR_NAME))
# Clone Loader repo
if not os.path.exists(LOADER_DIR):
print("Clone Loader Source Code")
clone_loader_cmd = 'git clone https://github.com/KhronosGroup/Vulkan-Loader.git'
RunShellCmd(clone_loader_cmd, EXTERNAL_DIR_NAME)
print("Run CMake for Loader")
LOADER_BUILD_DIR = RepoRelative("%s/Vulkan-Loader/%s" % (EXTERNAL_DIR_NAME, BUILD_DIR_NAME))
print("Run CMake for Loader")
cmake_cmd = f'cmake -S {LOADER_DIR} -B {LOADER_BUILD_DIR}'
cmake_cmd += ' -D UPDATE_DEPS=ON -D BUILD_TESTS=OFF -D CMAKE_BUILD_TYPE=Release'
# This enables better stack traces from tools like leak sanitizer by using the loader feature which prevents unloading of libraries at shutdown.
cmake_cmd += ' -D LOADER_DISABLE_DYNAMIC_LIBRARY_UNLOADING=ON'
if not IsWindows():
cmake_cmd += ' -D LOADER_ENABLE_ADDRESS_SANITIZER=ON'
RunShellCmd(cmake_cmd)
print("Build Loader")
build_cmd = f'cmake --build {LOADER_BUILD_DIR}'
RunShellCmd(build_cmd)
print("Install Loader")
install_cmd = f'cmake --install {LOADER_BUILD_DIR} --prefix {TEST_INSTALL_DIR}'
RunShellCmd(install_cmd)
#
# Prepare Mock ICD for use with Layer Validation Tests
def BuildMockICD():
VT_DIR = RepoRelative("%s/Vulkan-Tools" % EXTERNAL_DIR_NAME)
if not os.path.exists(VT_DIR):
print("Clone Vulkan-Tools Repository")
clone_tools_cmd = 'git clone https://github.com/KhronosGroup/Vulkan-Tools.git'
RunShellCmd(clone_tools_cmd, EXTERNAL_DIR_NAME)
ICD_BUILD_DIR = RepoRelative("%s/Vulkan-Tools/%s" % (EXTERNAL_DIR_NAME,BUILD_DIR_NAME))
print("Run CMake for ICD")
cmake_cmd = f'cmake -S {VT_DIR} -B {ICD_BUILD_DIR} -D CMAKE_BUILD_TYPE=Release '
cmake_cmd += '-DBUILD_CUBE=NO -DBUILD_VULKANINFO=NO -D INSTALL_ICD=ON -D UPDATE_DEPS=ON'
RunShellCmd(cmake_cmd)
print("Build Mock ICD")
build_cmd = f'cmake --build {ICD_BUILD_DIR}'
RunShellCmd(build_cmd)
print("Install Mock ICD")
install_cmd = f'cmake --install {ICD_BUILD_DIR} --prefix {TEST_INSTALL_DIR}'
RunShellCmd(install_cmd)
#
# Prepare Profile Layer for use with Layer Validation Tests
def BuildProfileLayer():
RunShellCmd('pip3 install jsonschema', EXTERNAL_DIR_NAME)
VP_DIR = RepoRelative("%s/Vulkan-Profiles" % EXTERNAL_DIR_NAME)
if not os.path.exists(VP_DIR):
print("Clone Vulkan-Profiles Repository")
clone_cmd = 'git clone https://github.com/KhronosGroup/Vulkan-Profiles.git'
RunShellCmd(clone_cmd, EXTERNAL_DIR_NAME)
BUILD_DIR = RepoRelative("%s/Vulkan-Profiles/%s" % (EXTERNAL_DIR_NAME, BUILD_DIR_NAME))
print("Run CMake for Profile Layer")
cmake_cmd = f'cmake -S {VP_DIR} -B {BUILD_DIR}'
cmake_cmd += ' -D CMAKE_BUILD_TYPE=Release'
cmake_cmd += ' -D UPDATE_DEPS=ON'
cmake_cmd += ' -D PROFILES_BUILD_TESTS=OFF'
RunShellCmd(cmake_cmd)
print("Build Profile Layer")
build_cmd = f'cmake --build {BUILD_DIR}'
RunShellCmd(build_cmd)
print("Install Profile Layer")
install_cmd = f'cmake --install {BUILD_DIR} --prefix {TEST_INSTALL_DIR}'
RunShellCmd(install_cmd)
#
# Run the Layer Validation Tests
def RunVVLTests():
print("Run Vulkan-ValidationLayer Tests using Mock ICD")
if IsWindows():
print("Not implemented yet")
exit(-1)
lvt_cmd = os.path.join(PROJECT_ROOT, BUILD_DIR_NAME, 'tests', 'vk_layer_validation_tests')
lvt_env = dict(os.environ)
# Because we installed everything to TEST_INSTALL_DIR all the libraries/json files are in pre-determined locations
# defined by GNUInstallDirs. This makes adding the LD_LIBRARY_PATH and VK_LAYER_PATH trivial/robust.
lvt_env['LD_LIBRARY_PATH'] = os.path.join(TEST_INSTALL_DIR, 'lib')
lvt_env['VK_LAYER_PATH'] = os.path.join(TEST_INSTALL_DIR, 'share/vulkan/explicit_layer.d')
lvt_env['VK_DRIVER_FILES'] = os.path.join(TEST_INSTALL_DIR, 'share/vulkan/icd.d/VkICD_mock_icd.json')
lvt_env['VK_INSTANCE_LAYERS'] = 'VK_LAYER_KHRONOS_validation' + os.pathsep + 'VK_LAYER_KHRONOS_profiles'
lvt_env['VK_KHRONOS_PROFILES_SIMULATE_CAPABILITIES'] = 'SIMULATE_API_VERSION_BIT,SIMULATE_FEATURES_BIT,SIMULATE_PROPERTIES_BIT,SIMULATE_EXTENSIONS_BIT,SIMULATE_FORMATS_BIT,SIMULATE_QUEUE_FAMILY_PROPERTIES_BIT'
# By default use the max_profile.json
if "VK_KHRONOS_PROFILES_PROFILE_FILE" not in os.environ:
lvt_env['VK_KHRONOS_PROFILES_PROFILE_FILE'] = RepoRelative('tests/device_profiles/max_profile.json')
# By default set portability to false
if "VK_KHRONOS_PROFILES_EMULATE_PORTABILITY" not in os.environ:
lvt_env['VK_KHRONOS_PROFILES_EMULATE_PORTABILITY'] = 'false'
lvt_env['VK_KHRONOS_PROFILES_DEBUG_REPORTS'] = 'DEBUG_REPORT_ERROR_BIT'
RunShellCmd(lvt_cmd, env=lvt_env)
print("Re-Running multithreaded tests with VK_LAYER_FINE_GRAINED_LOCKING disabled")
lvt_env['VK_LAYER_FINE_GRAINED_LOCKING'] = '0'
RunShellCmd(lvt_cmd + ' --gtest_filter=*Thread*', env=lvt_env)
def GetArgParser():
configs = ['release', 'debug']
default_config = configs[0]
osx_choices = ['min', 'latest']
osx_default = osx_choices[1]
parser = argparse.ArgumentParser()
parser.add_argument(
'-c', '--config', dest='configuration',
metavar='CONFIG', action='store',
choices=configs, default=default_config,
help='Build target configuration. Can be one of: {0}'.format(
', '.join(configs)))
parser.add_argument(
'--cmake', dest='cmake',
metavar='CMAKE', type=str,
default='', help='Additional args to pass to cmake')
parser.add_argument(
'--build', dest='build',
action='store_true', help='Build the layers')
parser.add_argument(
'--test', dest='test',
action='store_true', help='Tests the layers')
parser.add_argument(
'--osx', dest='osx', action='store',
choices=osx_choices, default=osx_default,
help='Sets MACOSX_DEPLOYMENT_TARGET on Apple platforms.')
return parser
| [
"[email protected]"
] | |
ef91d187951a98f0a2cc58320241353d9beb1284 | dd24ca271419b49be54a0f632eeaf684da3f4a71 | /python3/exercise/csdn.py | 2d1aca3e0e27ffb2b9403eb1adb22208f1a8af2b | [] | no_license | Soulor0725/PycharmProjects | fd766d7760900f7aa2c47f130974017f13c184db | 664bb3c54b7ece844263b5a3e3adc4731ea772ff | refs/heads/master | 2021-09-14T22:22:49.920606 | 2018-05-21T07:12:52 | 2018-05-21T07:12:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,789 | py | '''''
program: csdn博客爬虫
function: 实现对我的csdn主页所有博文的日期、主题、访问量、评论个数信息爬取
version: python 3.5.1
time: 2016/05/29
author: yr
'''
import urllib.request,re,time,random,gzip
#定义保存文件函数
def saveFile(data,i):
path = str(i+1)+".txt"
file = open(path,'wb')
page = '当前页:'+str(i+1)+'\n'
file.write(page.encode('gbk'))
#将博文信息写入文件(以utf-8保存的文件声明为gbk)
for d in data:
d = str(d)+'\n'
file.write(d.encode('gbk'))
file.close()
#解压缩数据
def ungzip(data):
try:
#print("正在解压缩...")
data = gzip.decompress(data)
#print("解压完毕...")
except:
print("未经压缩,无需解压...")
return data
#CSDN爬虫类
class CSDNSpider:
def __init__(self,pageIdx=1,url="http://blog.csdn.net/fly_yr/article/list/1"):
#默认当前页
self.pageIdx = pageIdx
self.url = url[0:url.rfind('/') + 1] + str(pageIdx)
self.headers = {
"Connection": "keep-alive",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/51.0.2704.63 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, sdch",
"Accept-Language": "zh-CN,zh;q=0.8",
"Host": "blog.csdn.net"
}
#求总页数
def getPages(self):
req = urllib.request.Request(url=self.url, headers=self.headers)
res = urllib.request.urlopen(req)
# 从我的csdn博客主页抓取的内容是压缩后的内容,先解压缩
data = res.read()
data = ungzip(data)
data = data.decode('utf-8')
pages = r'<div.*?pagelist">.*?<span>.*?共(.*?)页</span>'
#link = r'<div.*?pagelist">.*?<a.*?href="(.*?)".*?</a>'
# 计算我的博文总页数
pattern = re.compile(pages, re.DOTALL)
pagesNum = re.findall(pattern, data)
return pagesNum
#设置要抓取的博文页面
def setPage(self,idx):
self.url = self.url[0:self.url.rfind('/')+1]+str(idx)
#读取博文信息
def readData(self):
ret=[]
str = r'<dl.*?list_c clearfix">.*?date_t"><span>(.*?)</span><em>(.*?)</em>.*?date_b">(.*?)</div>.*?'+r'<a.*?set_old">(.*?)</a>.*?<h3.*?list_c_t"><a href="(.*?)">(.*?)</a></h3>.*?'+r'<div.*?fa fa-eye"></i><span>(.∗?)</span>.*?fa-comment-o"></i><span>(.∗?)</span></div>'
req = urllib.request.Request(url=self.url, headers=self.headers)
res = urllib.request.urlopen(req)
# 从我的csdn博客主页抓取的内容是压缩后的内容,先解压缩
data = res.read()
data = ungzip(data)
data = data.decode('utf-8')
pattern = re.compile(str,re.DOTALL)
items = re.findall(pattern,data)
for item in items:
ret.append(item[0]+'年'+item[1]+'月'+item[2]+'日'+'\t'+item[3]+'\n标题:'+item[5]
+'\n链接:http://blog.csdn.net'+item[4]
+'\n'+'阅读:'+item[6]+'\t评论:'+item[7]+'\n')
return ret
#定义爬虫对象
cs = CSDNSpider()
#求取
pagesNum = int(cs.getPages())
print("博文总页数: ",pagesNum)
for idx in range(pagesNum):
cs.setPage(idx)
print("当前页:",idx+1)
#读取当前页的所有博文,结果为list类型
papers = cs.readData()
saveFile(papers,idx) | [
""
] | |
cb32c7cea02c9d8b4ab326ff7d7b6217ae6c00d9 | 1d67056d57c656f6d64ab315b60c36c62317ad50 | /nameGender.py | 538bf13eedfa42510b9133812260f40bee940b4f | [] | no_license | Drob-AI/Book2Ontology | dd7d6462bf14f6c721368eb2afca90ed3284f1f5 | b3daa6a491acafe0b4388f4de9b566d7ec760792 | refs/heads/master | 2021-01-17T17:25:34.073352 | 2016-07-08T00:51:21 | 2016-07-08T00:51:21 | 62,723,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,844 | py | from nltk.collocations import TrigramAssocMeasures
from nltk.collocations import TrigramCollocationFinder
from random import sample
def read_raw_data(file_name):
f = open(file_name, 'r')
raw_data = []
for line in f:
raw_data.append(line)
f.close()
return raw_data
def char_type(character):
if character in '\n y':
return character
if character in "aeiou":
return 'V'
if character in "bdgvz":
return 'SC'
if character in "ptkfs":
return 'NC'
return 'C'
def char_with_type(character):
return (character, char_type(character))
def generate_corpus(raw_names):
corpus = [];
for name in raw_names:
corpus.append(char_with_type('\n'))
for character in name:
corpus.append(char_with_type(character))
# corpus.extend(name)
corpus.append(char_with_type('\n'))
corpus.append(char_with_type('\n'))
return corpus
def get_word_end_probabilites(names):
names_count = len(names)
letter_counts = {}
for name in names:
letter = name[-1:]
if not letter_counts.has_key(letter):
letter_counts[letter] = 0
letter_counts[letter] = letter_counts[letter] + 1
for key in letter_counts.keys():
letter_counts[key] = (letter_counts[key] + 0.0) / names_count
return letter_counts
# def vowel_count_probabilities(names):
# total_letter_count = 1.0
# vowel_count = 1.0
# for name in names:
# total_letter_count = total_letter_count + len(name)
# for character in name:
# if character in 'aoueiy':
# vowel_count = vowel_count + 1
# return vowel_count / total_letter_count;
def calculate_name_len_probabilites(names):
names_count = len(names)
name_len_counts = {}
for name in names:
name_len = len(name)
if not name_len_counts.has_key(name_len):
name_len_counts[name_len] = 0
name_len_counts[name_len] = name_len_counts[name_len] + 1
for key in name_len_counts.keys():
name_len_counts[key] = (name_len_counts[key] + 0.0) / names_count
return name_len_counts
class NameGenderData:
def __init__(self, raw_data, unisex_names):
self.data = raw_data
self.name_count = len(raw_data)
sample_size = len(raw_data) / 10
self.testing_set = set(sample(raw_data, sample_size))
self.training_set = (set(raw_data) - self.testing_set) - unisex_names
self.name_len_probabilities = calculate_name_len_probabilites(self.training_set);
self.last_letter_probabilities = get_word_end_probabilites(self.training_set);
self.base_name_len_probability = 1.0 / len(self.training_set);
corpus = generate_corpus(self.training_set)
invalid_trigram_count = self.name_count - 1
self.trigram_count = len(corpus) - 2 - invalid_trigram_count
self.base_frequency = 1.0 / self.trigram_count
colloc_finder = TrigramCollocationFinder.from_words(corpus)
colloc_finder.apply_freq_filter(3)
colloc_finder.apply_ngram_filter(lambda w1, w2, w3: w1 == '\n' and w2 == '\n')
self.colloc_finder = colloc_finder
def getNameScore(name, data):
name = name + '\n'
trigram_measures = TrigramAssocMeasures()
name_len = len(name) - 2
score = 1
for i in range(0, name_len):
trigram_score = data.colloc_finder.score_ngram(trigram_measures.raw_freq, char_with_type(name[i]), char_with_type(name[i + 1]), char_with_type(name[i + 2]))
if trigram_score is None:
score = score * data.base_frequency
else:
score = score * trigram_score
name_len_score = 0
if data.name_len_probabilities.has_key(len(name)):
name_len_score = data.name_len_probabilities[len(name)]
else:
name_len_score = data.base_name_len_probability
# last_letter_score = data.base_name_len_probability
# if data.last_letter_probabilities.has_key(name[-1:]):
# last_letter_score = data.last_letter_probabilities[name[-1:]]
return score * name_len_score * data.name_probability
def getNameGenderRatio(name):
maleScore = getNameScore(name, male_name_data)
femaleScore = getNameScore(name, female_name_data)
# print 'Scores for ' + name
# print 'male ' + str(maleScore)
# print 'female ' + str(femaleScore)
# print 'total ' + str(maleScore / (maleScore + femaleScore))
# print '-------------------'
return maleScore / (maleScore + femaleScore)
def train():
global male_name_data, female_name_data
male_names = set(read_raw_data('male.txt'))
female_names = set(read_raw_data('female.txt'))
unisex_names = male_names.intersection(female_names);
male_name_data = NameGenderData(male_names, unisex_names)
female_name_data = NameGenderData(female_names, unisex_names)
total_name_count = len(male_name_data.training_set) + len(female_name_data.training_set)
male_name_data.name_probability = (len(male_name_data.training_set) + 0.0) / total_name_count
female_name_data.name_probability = (len(female_name_data.training_set) + 0.0) / total_name_count
def test():
total_len = len(male_name_data.testing_set) + len(female_name_data.testing_set)
guessed_len = 0.0;
for name in male_name_data.testing_set:
if getNameGenderRatio(name) >= 0.5:
guessed_len = guessed_len + 1
for name in female_name_data.testing_set:
if getNameGenderRatio(name) < 0.5:
guessed_len = guessed_len + 1
return guessed_len / total_len
def run():
train()
return test()
sum = 0
test_runs = 100
for i in range(test_runs):
sum += run()
print i
print sum / test_runs | [
"[email protected]"
] | |
68ea6c85f64c98acc2cb88a841724df7e5d89167 | 925cb9f77ca120fa33f5b68bb0e733942c33fe0c | /Dojo_survey_with_validation/server.py | 4d011a44228c28efa4ee729c1b6a81967212a02e | [] | no_license | dhurataK/Python_Flask | 58c309813ad455e247b1e653ec8134f23e0667f4 | dd55a6800addf8eb6a87f65b4ef8f7b41d511d1b | refs/heads/master | 2020-12-31T06:22:40.073635 | 2016-09-23T15:28:43 | 2016-09-23T15:28:43 | 68,420,179 | 0 | 1 | null | 2016-09-18T22:47:04 | 2016-09-16T23:05:18 | HTML | UTF-8 | Python | false | false | 1,082 | py | from flask import Flask, render_template, request, redirect, session, flash
app = Flask(__name__)
app.secret_key = "verySecret!"
@app.route('/')
def home():
return render_template('index.html')
@app.route('/result', methods=['POST'])
def validate():
name = request.form['name']
location = request.form['location']
language = request.form['language']
comment = request.form['comment']
if len(name) == 0 and len(comment) == 0:
flash("Name shouldn't be blank!")
flash("Comment field shouldn't be blank!")
return redirect('/')
elif len(name) > 0 and len(comment) == 0:
flash("Comment field shouldn't be blank!")
return redirect('/')
elif len(name) == 0 and len(comment) > 0:
flash("Name shouldn't be blank!")
return redirect('/')
elif len(comment) > 120:
flash("Comments shouldn't exceed 120 characters!")
return redirect('/')
else:
return render_template('result.html', name= name, location= location, language = language, comment = comment)
app.run(debug=True)
| [
"[email protected]"
] | |
a24404703336545fc6ae88cede1fb645987258ff | 8c409c79ef8a9aeacb2ba8af99fcb8456529257f | /python/png2svg/png2svg.py | cc74b3131ad3546c06b263f5f417e292f9a29522 | [] | no_license | damo-wang/book | 97cec663770c6565d92f9657542b2fe38f851a0a | 3254d1d2afc4dac239b65c80a9ec425a628fd1e1 | refs/heads/master | 2022-10-16T17:36:34.127248 | 2022-10-14T15:04:17 | 2022-10-14T15:05:12 | 107,131,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | import aspose.words as aw
doc=aw.Document()
builder=aw.DocumentBuilder(doc)
shape=builder.insert_image("Input.png")
shape.image_data.save("Output.svg")
| [
"[email protected]"
] | |
c15f0bec4691b59ccddfa52d5cf261f07b0ea133 | 1789386464cb22e1d00fec011b440970cf4a923c | /ch2/ch2p1.py | 2a3e816d27344def35a743d2263c36b76696b3a7 | [] | no_license | milnorms/pearson_revel | 06f14d382ed106e24705c91e87b0244bfe81f2f4 | ac72692394962d3952888a90791e7468b73d9bf3 | refs/heads/master | 2020-12-01T16:56:45.509114 | 2019-12-29T05:05:25 | 2019-12-29T05:05:25 | 230,703,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 743 | py | '''
(Financial application: calculate tips)
Write a program that reads the subtotal and the gratuity rate and computes the gratuity and total. For example, if the user enters 10 for the subtotal and 15% for the gratuity rate, the program displays 1.5 as the gratuity and 11.5 as the total. Here is another sample run:
Enter the subtotal: 15.69
Enter the gratuity rate: 15
The gratuity is 2.35 and the total is 18.04
'''
userSubtotal = float(input("Enter the subtotal: "))
userGratuity = float(input("Enter the gratuity rate: "))
gratuity = userGratuity * 0.01
total = round(((userSubtotal * gratuity) + userSubtotal), 2)
grat = round((gratuity * userSubtotal), 2)
print("The gratuity is " + str(grat) + " and the total is " + str(total))
| [
"[email protected]"
] | |
227893b265ced510ed159a0e46ff9adff34c2178 | ba1d012f951b0d96c43805d79195bfa1d9c7892e | /backend/base/migrations/0003_auto_20210423_1734.py | de050484f5dbb22671769c496d0c2ccfca8cc833 | [] | no_license | ramoncelestino/react-django-blog | 89c22bf00b35ae35ca0a1e27e0d74e79a4b3dea3 | 7820bfb50bb9bdeaa0dce1d95a3b460c3074fae6 | refs/heads/main | 2023-04-28T01:04:24.984416 | 2021-05-01T01:26:13 | 2021-05-01T01:26:13 | 361,168,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | # Generated by Django 3.2 on 2021-04-23 20:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0002_auto_20210423_1730'),
]
operations = [
migrations.RenameField(
model_name='city',
old_name='city',
new_name='name',
),
migrations.AddField(
model_name='address',
name='street',
field=models.CharField(default=False, max_length=40, null=True),
),
]
| [
"[email protected]"
] | |
5961d295b23abd4a5c1995b3f10bf6ccb333c741 | 44600adf1731a449ff2dd5c84ce92c7f8b567fa4 | /colour_down/adaptation/fairchild1990.py | 4ce1a10481213f117c2508f1c43f594b728df699 | [] | no_license | ajun73/Work_Code | b6a3581c5be4ccde93bd4632d8aaaa9ecc782b43 | 017d12361f7f9419d4b45b23ed81f9856278e849 | refs/heads/master | 2020-04-11T23:16:43.994397 | 2019-12-28T07:48:44 | 2019-12-28T07:48:44 | 162,161,852 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,432 | py | # -*- coding: utf-8 -*-
"""
Fairchild (1990) Chromatic Adaptation Model
===========================================
Defines *Fairchild (1990)* chromatic adaptation model objects:
- :func:`colour.adaptation.chromatic_adaptation_Fairchild1990`
See Also
--------
`Fairchild (1990) Chromatic Adaptation Model Jupyter Notebook
<http://nbviewer.jupyter.org/github/colour-science/colour-notebooks/\
blob/master/notebooks/adaptation/fairchild1990.ipynb>`_
References
----------
- :cite:`Fairchild1991a` : Fairchild, M. D. (1991). Formulation and testing
of an incomplete-chromatic-adaptation model. Color Research & Application,
16(4), 243-250. doi:10.1002/col.5080160406
- :cite:`Fairchild2013s` : Fairchild, M. D. (2013). FAIRCHILD'S 1990 MODEL.
In Color Appearance Models (3rd ed., pp. 4418-4495). Wiley. ISBN:B00DAYO8E2
"""
from __future__ import division, unicode_literals
import numpy as np
from colour.adaptation import VON_KRIES_CAT
from colour.utilities import dot_vector, row_as_diagonal, tsplit, tstack
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2018 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'FAIRCHILD1990_XYZ_TO_RGB_MATRIX', 'FAIRCHILD1990_RGB_TO_XYZ_MATRIX',
'chromatic_adaptation_Fairchild1990', 'XYZ_to_RGB_Fairchild1990',
'RGB_to_XYZ_Fairchild1990', 'degrees_of_adaptation'
]
FAIRCHILD1990_XYZ_TO_RGB_MATRIX = VON_KRIES_CAT
"""
*Fairchild (1990)* colour appearance model *CIE XYZ* tristimulus values to cone
responses matrix.
FAIRCHILD1990_XYZ_TO_RGB_MATRIX : array_like, (3, 3)
"""
FAIRCHILD1990_RGB_TO_XYZ_MATRIX = np.linalg.inv(VON_KRIES_CAT)
"""
*Fairchild (1990)* colour appearance model cone responses to *CIE XYZ*
tristimulus values matrix.
FAIRCHILD1990_RGB_TO_XYZ_MATRIX : array_like, (3, 3)
"""
def chromatic_adaptation_Fairchild1990(XYZ_1,
XYZ_n,
XYZ_r,
Y_n,
discount_illuminant=False):
"""
Adapts given stimulus *CIE XYZ_1* tristimulus values from test viewing
conditions to reference viewing conditions using *Fairchild (1990)*
chromatic adaptation model.
Parameters
----------
XYZ_1 : array_like
*CIE XYZ_1* tristimulus values of test sample / stimulus in domain
[0, 100].
XYZ_n : array_like
Test viewing condition *CIE XYZ_n* tristimulus values of whitepoint.
XYZ_r : array_like
Reference viewing condition *CIE XYZ_r* tristimulus values of
whitepoint.
Y_n : numeric or array_like
Luminance :math:`Y_n` of test adapting stimulus in :math:`cd/m^2`.
discount_illuminant : bool, optional
Truth value indicating if the illuminant should be discounted.
Returns
-------
ndarray
Adapted *CIE XYZ_2* tristimulus values of stimulus.
Warning
-------
The input domain and output range of that definition are non standard!
Notes
-----
- Input *CIE XYZ_1*, *CIE XYZ_n* and *CIE XYZ_r* tristimulus values are
in domain [0, 100].
- Output *CIE XYZ_2* tristimulus values are in range [0, 100].
References
----------
- :cite:`Fairchild1991a`
- :cite:`Fairchild2013s`
Examples
--------
>>> XYZ_1 = np.array([19.53, 23.07, 24.97])
>>> XYZ_n = np.array([111.15, 100.00, 35.20])
>>> XYZ_r = np.array([94.81, 100.00, 107.30])
>>> Y_n = 200
>>> chromatic_adaptation_Fairchild1990(XYZ_1, XYZ_n, XYZ_r, Y_n)
... # doctest: +ELLIPSIS
array([ 23.3252634..., 23.3245581..., 76.1159375...])
"""
XYZ_1 = np.asarray(XYZ_1)
XYZ_n = np.asarray(XYZ_n)
XYZ_r = np.asarray(XYZ_r)
Y_n = np.asarray(Y_n)
LMS_1 = dot_vector(FAIRCHILD1990_XYZ_TO_RGB_MATRIX, XYZ_1)
LMS_n = dot_vector(FAIRCHILD1990_XYZ_TO_RGB_MATRIX, XYZ_n)
LMS_r = dot_vector(FAIRCHILD1990_XYZ_TO_RGB_MATRIX, XYZ_r)
p_LMS = degrees_of_adaptation(
LMS_1, Y_n, discount_illuminant=discount_illuminant)
a_LMS_1 = p_LMS / LMS_n
a_LMS_2 = p_LMS / LMS_r
A_1 = row_as_diagonal(a_LMS_1)
A_2 = row_as_diagonal(a_LMS_2)
LMSp_1 = dot_vector(A_1, LMS_1)
c = 0.219 - 0.0784 * np.log10(Y_n)
C = row_as_diagonal(tstack((c, c, c)))
LMS_a = dot_vector(C, LMSp_1)
LMSp_2 = dot_vector(np.linalg.inv(C), LMS_a)
LMS_c = dot_vector(np.linalg.inv(A_2), LMSp_2)
XYZ_c = dot_vector(FAIRCHILD1990_RGB_TO_XYZ_MATRIX, LMS_c)
return XYZ_c
def XYZ_to_RGB_Fairchild1990(XYZ):
"""
Converts from *CIE XYZ* tristimulus values to cone responses.
Parameters
----------
XYZ : array_like
*CIE XYZ* tristimulus values.
Returns
-------
ndarray
Cone responses.
Examples
--------
>>> XYZ = np.array([19.53, 23.07, 24.97])
>>> XYZ_to_RGB_Fairchild1990(XYZ) # doctest: +ELLIPSIS
array([ 22.1231935..., 23.6054224..., 22.9279534...])
"""
return dot_vector(FAIRCHILD1990_XYZ_TO_RGB_MATRIX, XYZ)
def RGB_to_XYZ_Fairchild1990(RGB):
"""
Converts from cone responses to *CIE XYZ* tristimulus values.
Parameters
----------
RGB : array_like
Cone responses.
Returns
-------
ndarray
*CIE XYZ* tristimulus values.
Examples
--------
>>> RGB = np.array([22.12319350, 23.60542240, 22.92795340])
>>> RGB_to_XYZ_Fairchild1990(RGB) # doctest: +ELLIPSIS
array([ 19.53, 23.07, 24.97])
"""
return dot_vector(FAIRCHILD1990_RGB_TO_XYZ_MATRIX, RGB)
def degrees_of_adaptation(LMS, Y_n, v=1 / 3, discount_illuminant=False):
"""
Computes the degrees of adaptation :math:`p_L`, :math:`p_M` and
:math:`p_S`.
Parameters
----------
LMS : array_like
Cone responses.
Y_n : numeric or array_like
Luminance :math:`Y_n` of test adapting stimulus in :math:`cd/m^2`.
v : numeric or array_like, optional
Exponent :math:`v`.
discount_illuminant : bool, optional
Truth value indicating if the illuminant should be discounted.
Returns
-------
ndarray
Degrees of adaptation :math:`p_L`, :math:`p_M` and :math:`p_S`.
Examples
--------
>>> LMS = np.array([20.00052060, 19.99978300, 19.99883160])
>>> Y_n = 31.83
>>> degrees_of_adaptation(LMS, Y_n) # doctest: +ELLIPSIS
array([ 0.9799324..., 0.9960035..., 1.0233041...])
>>> degrees_of_adaptation(LMS, Y_n, 1 / 3, True)
array([ 1., 1., 1.])
"""
LMS = np.asarray(LMS)
if discount_illuminant:
return np.ones(LMS.shape)
Y_n = np.asarray(Y_n)
v = np.asarray(v)
L, M, S = tsplit(LMS)
LMS_E = dot_vector(VON_KRIES_CAT, np.ones(LMS.shape)) # E illuminant.
L_E, M_E, S_E = tsplit(LMS_E)
Ye_n = Y_n ** v
def m_E(x, y):
"""
Computes the :math:`m_E` term.
"""
return (3 * (x / y)) / (L / L_E + M / M_E + S / S_E)
def P_c(x):
"""
Computes the :math:`P_L`, :math:`P_M` or :math:`P_S` terms.
"""
return (1 + Ye_n + x) / (1 + Ye_n + 1 / x)
p_L = P_c(m_E(L, L_E))
p_M = P_c(m_E(M, M_E))
p_S = P_c(m_E(S, S_E))
p_LMS = tstack((p_L, p_M, p_S))
return p_LMS
| [
"[email protected]"
] | |
68e3dbcc684161b2f8d32f752aaad8f778937993 | 9f6b9a40444df2b09960b5b531232ee6975e74dd | /level_1.py | 13d4ca664e2d9291979dc351d30188b94817ea48 | [] | no_license | nildiert/hodor | f60e94f4a64b8b0217c760104f501a6a586d4129 | 3c8b2df854ed2af6c5345250bc8f557b52761aee | refs/heads/master | 2020-06-02T06:19:44.740188 | 2019-06-10T02:59:23 | 2019-06-10T02:59:23 | 191,067,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | from lxml import html
import requests
import time
def req_values(url):
page = requests.get(url)
tree = html.fromstring(page.content)
me = tree.xpath(next_val)
return ([page, tree, me])
try:
url = 'http://158.69.76.135/level1.php'
data = {'id':'730','holdthedoor':'submit'}
next_val = '//td[contains(text(), "730")]/following-sibling::node()/text()'
page, tree, me = req_values(url)
data.update({"key":page.cookies["HoldTheDoor"]})
while ("".join(me) != '\n4095 '):
page, tree, me = req_values(url)
data.update({"key":page.cookies["HoldTheDoor"]})
status = requests.post(url, data, cookies=page.cookies)
print("{} {}".format(status ,me))
except Exception as e:
print(e)
| [
"[email protected]"
] | |
685535ed109dab696f4e5360794e0b67396276b8 | 91905ec87a4724d8e8d3c084574b616cc3ae03d4 | /mysite/urls.py | 9352a7c1d4216b202affaf2e23dedfb5b1c249c6 | [
"MIT"
] | permissive | evvrivas/mis_proyectos | c64a58ff2ad506063947f9cf1ac426ab6a7383a4 | ed4c9c1bc1b1ae8eeca968a10f77b8e1c1515e92 | refs/heads/master | 2021-05-06T15:42:54.998970 | 2020-06-11T05:45:01 | 2020-06-11T05:45:01 | 113,632,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,617 | py | #from django.conf.urls import patterns, include, url
#from django.contrib import admin
#from mysite.views import Index
##########################
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf.urls import url,include
from django.contrib import admin
from django.conf import settings
import mysite.settings
from django.contrib.auth.views import login, logout
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from mysite.views import *
from django.conf.urls import url
from django.views.generic import TemplateView
urlpatterns = [
# Examples:
# url(r'^$', 'artetronica.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', admin.site.urls),
#url(r'^admin/',include(admin.site.urls)),
#url(r'^$', Index.as_view(), name='index'),
url(r'^accounts/login/$', login,{'template_name': 'login.html'}),
url(r'^accounts/logout/$', logout),
url(r'^accounts/profile/$', pagina_principal),
#url(r'^static/(?P<path>.*)$','django.views.static.serve',{'document_root': settings.STATIC_ROOT}),
#url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
url(r'^$', pagina_principal),
url(r'^catalogo/(\d+)$', catalogo),
url(r'^informacion/$', informacion),
url(r'^informacion_vendedor/([^/]+)/$', informacion_vendedor),
url(r'^informacion_comprador/([^/]+)/$', informacion_comprador),
url(r'^crear_usuario/$',crear_usuario),
url(r'^editar_usuario/$',editar_usuario),
url(r'^crear_tienda/$',crear_tienda),
url(r'^editar_tienda/(\d+)/$',editar_tienda),
url(r'^crear_producto/([^/]+)/([^/]+)/$',crear_producto),
url(r'^editar_producto/([^/]+)/([^/]+)/(\d+)/$',editar_producto),
url(r'^ver_categorias/([^/]+)/$', ver_categorias),
url(r'^ver_categorias_tienda/([^/]+)/([^/]+)/([^/]+)/$', ver_mis_categorias),
url(r'^busqueda/([^/]+)/$', busqueda),
url(r'^busqueda_tienda/([^/]+)/([^/]+)/$', busqueda_tienda),
url(r'^busqueda_desde_app/([^/]+)/$', busqueda_desde_app),
url(r'^editar_pedido/([^/]+)/([^/]+)/(\d+)/$',editar_pedido),
url(r'^hacer_pedido/([^/]+)/([^/]+)/$',hacer_pedido),
url(r'^cambiar_estado_pedido/([^/]+)/([^/]+)/(\d+)/$',cambiar_estado_pedido),
url(r'^listado_pedido/([^/]+)/([^/]+)/([A-Z]+)/$', listado_pedido),
url(r'^carrusel/(\d+)/([^/]+)/([^/]+)/$', carrusel),
url(r'^carrusel_pedidos/(\d+)/([^/]+)/([^/]+)/$', carrusel_pedidos),
url(r'^cambiar_estado_producto/([^/]+)/([^/]+)/(\d+)/([^/]+)/$',cambiar_estado_producto),
url(r'^cambiar_estado_tienda/([^/]+)/(\d+)/([^/]+)/$',cambiar_estado_tienda),
url(r'^descargar/([^/]+)/([^/]+)/(\d+)/$',descargar),
url(r'^centro_comercial/([^/]+)/([^/]+)/$',centro_comercial),
url(r'^crear_categorias/$',crear_categorias),
url(r'^ver_las_preferidas/$',ver_las_preferidas),
url(r'^mis_cuentas/$',mis_cuentas),
url(r'^agregar_producto_al_carrito/(\d+)/([^/]+)/$',agregar_producto_al_carrito),
url(r'^ver_el_carrito/([^/]+)/$',ver_el_carrito),
url(r'^ver_el_carrito_personal_y_de_tienda/([^/]+)/(\d+)/$',ver_el_carrito_personal_y_de_tienda),
url(r'^ver_el_carrito_de_tienda/(\d+)/$',ver_el_carrito_de_tienda),
url(r'^ver_el_carrito_personal/([^/]+)/$',ver_el_carrito_personal),
url(r'^eliminar_producto_del_carrito/(\d+)/$',eliminar_producto_del_carrito),
url(r'^editar_producto_del_carrito/(\d+)/$',editar_producto_del_carrito),
url(r'^editar_estado_producto_del_carrito/(\d+)/([^/]+)/$',editar_estado_producto_del_carrito),
url(r'^realizar_compra_individual/(\d+)/$',realizar_compra_individual),
url(r'^realizar_compra/$',realizar_compra),
url(r'^enviar_mensaje/(\d+)/$', enviar_mensaje),
url(r'^ver_mis_mensajes/([^/]+)/([^/]+)/$',ver_mis_mensajes),
url(r'^responder_mensaje/(\d+)/$',responder_mensaje),
url(r'^cambiar_tipo_de_vista/(\d+)/$',cambiar_tipo_de_vista),
url(r'^agregar_a_preferidas/(\d+)/$',agregar_a_preferidas),
url(r'^configurar_vista_pagina_principal/$',configurar_vista_pagina_principal),
url(r'^evaluar/(\d+)/([^/]+)/$',evaluar),
url(r'^administrar_mis_categorias/(\d+)/$',administrar_mis_categorias),
url(r'^traspasar_tienda/(\d+)/$',traspasar_tienda),
url(r'^editar_categoria_de_mi_tienda/(\d+)/(\d+)/$',editar_categoria_de_mi_tienda),
url(r'^borrar_categoria_de_mi_tienda/(\d+)/(\d+)/$',borrar_categoria_de_mi_tienda),
url(r'^comunicacion_tienda/(\d+)/([^/]+)/$',comunicacion_tienda),
url(r'^seleccion_compra/(\d+)/(\d+)/$',seleccion_compra),
url(r'^notificar_a_todos_que/$',notificar_a_todos_que),
url(r'^guardar_token/$',guardar_token, name='guardar_token'),
url(r'^serviceworker(.*.js)$', TemplateView.as_view(template_name='serviceworker.js', content_type='application/x-javascript')),
url(r'^realizar_lista_de_compras/(\d+)/$',realizar_lista_de_compras),
url(r'^agregar_lista_de_compra_al_carrito/(\d+)/$',agregar_lista_de_compra_al_carrito),
url(r'^crear_super_producto/(\d+)/$',crear_super_producto),
url(r'^go_tipo_uber/$',go_tipo_uber),
url(r'^go_delivery/$',go_delivery),
url(r'^([^/]+)/$', mis_tiendas),
url(r'^([^/]+)/([^/]+)/$', mi_tienda),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
64402309d95926900ca65d4eebb29551360c1b03 | 1def774fa7899e53de2f63578518a634050c1e82 | /section6/security.py | 62396f43b2da68138c6f88accf1c0ffbf869ea3b | [] | no_license | albayona/flask_content | f570e937c92c2813e5db8919cfa1707918178034 | 7030da7a8457ec818112bacee011d21343cab841 | refs/heads/master | 2023-01-29T00:54:59.864671 | 2020-12-17T04:27:34 | 2020-12-17T04:27:34 | 314,171,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,399 | py | from flask import request, jsonify
from flask_jwt_extended import create_access_token, get_jwt_claims, create_refresh_token, jwt_required, get_raw_jwt, \
jwt_refresh_token_required, get_jwt_identity
from blacklist import BLACKLIST
from models.user import UserModel
from functools import update_wrapper
from flask_restful import abort, Resource, reqparse
from werkzeug.security import safe_str_cmp
def authenticate(username, password):
user = UserModel.find_by_username(username)
if user and safe_str_cmp(user.password, password):
return user
def identity(payload):
user_id = payload['identity']
return UserModel.find_by_id(user_id)
def role_required(role):
def decorator(fn):
def wrapped_function(*args, **kwargs):
# For authorization er return status code 403
if not safe_str_cmp(get_jwt_claims(), role):
return {"msg": "You do not meet the roles required for this operation"}, 403
return fn(*args, **kwargs)
return update_wrapper(wrapped_function, fn)
return decorator
class Login(Resource):
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, required=True, help="This field cannot be blank")
parser.add_argument('password', type=str, required=True, help="This field cannot be blank")
parser.add_argument('type', type=str, required=True, help="This field cannot be blank")
def post(self):
data = Login.parser.parse_args()
user = UserModel.find_by_username(data['username'])
if user and safe_str_cmp(user.password, data['password']):
access_token = create_access_token(identity=user, fresh=True)
refresh_token = create_refresh_token(user)
return {
'access_token': access_token,
'refresh_token': refresh_token
}, 200
return {"message": "Invalid Credentials!"}, 401
class Logout(Resource):
@jwt_required
def post(self):
jti = get_raw_jwt()['jti']
BLACKLIST.add(jti)
return {"message": "Successfully logged out"}, 200
class TokenRefresh(Resource):
@jwt_refresh_token_required
def post(self):
current_user = get_jwt_identity()
new_token = create_access_token(identity=current_user, fresh=False)
return {'access_token': new_token}, 200
| [
"[email protected]"
] | |
907b8d2e4fbac6eb91146f0df7ba54c27035b164 | 3da93763bbc39692ef6f468a91c42b335674af44 | /python/tautau.py | 4b3b070d64b461333e438d27ae701bc767b0a60c | [] | no_license | mbirostris/cpt | 59f5fd0a45bf2c044b55156835dbb1c5c460ee84 | 5bae5d82647e271e686f892b0b762425563f1e50 | refs/heads/master | 2020-06-06T03:07:26.096708 | 2015-06-23T08:04:12 | 2015-06-23T08:04:12 | 27,839,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,947 | py | import sys
import ROOT#gROOT, TCanvas, TF1, TFile
from root_numpy import root2array, root2rec, tree2rec
import os
import shutil
import math
from ROOT import * #gROOT, TCanvas, TF1, TFile
import pylab as pl
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
from matplotlib import gridspec
from matplotlib.backends.backend_pdf import PdfPages
import HistMacro as mac
import bins
from array import array
from subprocess import call
gROOT.Reset()
gROOT.SetBatch()
###########################################
# control plots
###########################################
try:
shutil.rmtree("./plots/")
except:
print "OK..."
try:
os.mkdir("./plots/")
except:
print "Jedziemyyyy...."
def page(category, HiggsMass):
fig = plt.figure(figsize=(16, 16))
plt.axis([0, 10, 0, 10])
plt.text(5, 7, "Kategoria:\n" + category + "\nHiggsMass: " + HiggsMass, fontsize=50,color='b', ha='center', va='top')
return fig;
#category = ['0jet_low', '0jet_high', 'inclusive', 'boost_high', 'vbf']#'inclusive','boost_high', 'vbf']; # variable = ["MtLeg1MVA"]; bin_width = [1]; bin_min=[0]; bin_max=[150]
#category = ['inclusive', 'btag', 'btag_low', 'btag_high', 'nobtag']
category = [ 'nobtag_medium']
#variable = ["diTauNSVfitMass","diTauVisMass","visibleTauMass" ] #, "decayMode", "MEtMVA", "MEtMVAPhi", "MtLeg1MVA", "ptL1", "ptL2", "etaL1", "etaL2", "phiL1", "phiL2", "pt1", "pt2", "eta1", "eta2", "phi1", "phi2", "Deta", "Dphi", "Mjj", "diTauRecoPt", "numPV"];
variable = ["diTauNSVfitMass"];
for i in ['0jet_low']:
call("hadd ./root/" +i+ "/QCD.root ./root/"+i+"/QCD_*.root", shell=True)
for i in ['btag','btag_low','btag_high','inclusive', 'nobtag', 'nobtag_low', 'nobtag_medium']:
call("hadd ./root/" +i+ "/Embedded.root ./root/"+i+"/Data_Embedded.root ./root/"+i+"/TTbarEmb.root", shell=True)
#call(["hadd", "./root/inclusive/QCD.root", "./root/inclusive/QCD_Data.root", "./root/inclusive/QCD_DY->ll, j->t.root", "./root/inclusive/QCD_DY->ll, l->t.root", "./root/inclusive/QCD_DY->tautau.root", "./root/inclusive/QCD_DY->tt, jj.root", "./root/inclusive/QCD_LL.root", "./root/inclusive/QCD_Others.root", "./root/inclusive/QCD_TTbar.root", "./root/inclusive/QCD_WJets.root"])
HiggsMass = [];
for i in range(115,130,5):
HiggsMass.append(str(i));
print "Proccesed Higgs Masses: ", HiggsMass;
pp = PdfPages('./plots/analysis.pdf')
for mass in HiggsMass:
for j in xrange(0, len(category)):
fih = page( category[j], mass)
pp.savefig(fih)
for i in xrange(0, len(variable)):
if (category[j] == 'MtLeg1MVA' and variable[i] != 'MtLeg1MVA'):
continue
else:
fig = mac.plot(variable[i], category[j], bins.get(category[j], variable[i]), mass)
pp.savefig(fig)
plt.close()
pp.close()
| [
"[email protected]"
] | |
e55b0c7aae5064490a387ff915342f70c51b626a | 336705b43988965b7601cdb47b0588e31c88b880 | /TG_AutoConfigurator/plugins/inline.py | 6f87931dcaba06a65cd9f69b2c5707eab79ddc4c | [
"MIT"
] | permissive | qwertyadrian/TG_AutoConfigurator | 13b251fe5477cb6918286d08816e3d0d0e827d70 | d3aee59076ae4b1e77f2c0d487d56b6d74021f5a | refs/heads/master | 2022-06-22T15:03:33.685971 | 2022-06-08T15:10:36 | 2022-06-08T15:10:36 | 192,208,383 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,035 | py | from pyrogram import InlineQuery, InlineQueryResultArticle, InputTextMessageContent
from ..TG_AutoConfigurator import AutoConfigurator
from ..utils import tools
@AutoConfigurator.on_inline_query()
def inline(bot: AutoConfigurator, query: InlineQuery):
if tools.admin_check(bot, query):
string = query.query.lower()
results = []
bot.reload_config()
sources_list = bot.config.sections()[3:] if bot.config.has_section("proxy") else bot.config.sections()[2:]
for source in sources_list:
if not string or source.startswith(string):
text, reply_markup = tools.generate_setting_info(bot, source)
results.append(
InlineQueryResultArticle(
title=source,
input_message_content=InputTextMessageContent(text, disable_web_page_preview=True),
reply_markup=reply_markup,
)
)
query.answer(results=results, cache_time=0)
| [
"[email protected]"
] | |
69f9dd47463c7924f4b97ceba4c0412bd8f0fad0 | 4addd4aa5f38387be3ee1fc9cefff69485d74e1a | /GOPE/Gopesa/principal/migrations/0001_initial.py | 8fbff4085107be2c0cfcfefd2c190f1af34c8f6c | [] | no_license | A01701833/GOPESA | a40f2a328dd9f7e108ab5a7e85f1d99c6a047a0e | 33dda28a8fe66ef3f76d22c16ace9e37c7ff1937 | refs/heads/master | 2020-04-08T09:29:24.321695 | 2018-11-26T20:14:09 | 2018-11-26T20:14:09 | 159,226,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | # Generated by Django 2.1.1 on 2018-11-16 20:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Propiedades',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=100)),
('terreno', models.CharField(max_length=20)),
('cuartos', models.CharField(max_length=20)),
('banos', models.CharField(max_length=20)),
],
),
]
| [
"[email protected]"
] | |
c58367f57e3a973de97e3542dc625d0ddef9babe | 2f23f2ddce5b44950c0ecc8fac11d7eb7cd6c6ca | /src/bptl/camunda/migrations/0011_auto_20200226_1441.py | 21914fbf0fd13fb6906b4a40f4d6683c3cc5aad5 | [] | no_license | Amsterdam/bptl-lite | c989c22f0efb2ae866e1b81b0f2effdeb18917c9 | 7dd8a13cef455e34a7432e310978f7871a81de23 | refs/heads/master | 2023-07-09T21:20:13.028609 | 2021-08-10T08:09:00 | 2021-08-10T08:09:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | # Generated by Django 2.2.10 on 2020-02-26 13:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("camunda", "0010_copy_tasks_to_parent"),
]
operations = [
migrations.RemoveField(
model_name="externaltask",
name="execution_error",
),
migrations.RemoveField(
model_name="externaltask",
name="id",
),
migrations.RemoveField(
model_name="externaltask",
name="result_variables",
),
migrations.RemoveField(
model_name="externaltask",
name="status",
),
migrations.RemoveField(
model_name="externaltask",
name="topic_name",
),
migrations.RemoveField(
model_name="externaltask",
name="variables",
),
]
| [
"[email protected]"
] | |
4061b69cd1a0426dd429f731702ee97b4ae88e23 | 90bc2d33954d4db8608e77d0430a3f8c0a920d64 | /midaasTask/asgi.py | b37d065f6e1f66323b5d92de7d437d3e0dbdbf95 | [] | no_license | anil-bothe/midaasTask | 873512a7bb31ba7c7a711e2c3a4085cc6e2505fe | 33420cb97682dbba8246b388cf3ac990e314df64 | refs/heads/master | 2023-07-09T18:49:46.301471 | 2021-08-02T05:59:56 | 2021-08-02T05:59:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
ASGI config for midaasTask project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'midaasTask.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
3a3796bf98f90c15d5b4c72280decdc7e412449c | 7e5c96cf9091f3912b51215f98e4e75a1fd5fe56 | /lucky/lucky.py | 1d572cd3c5a6d7528c63213dd644cac608dfe1a7 | [
"BSD-3-Clause"
] | permissive | aenon/shiba_cogs | ef3cdff2d274cb8029b9fc7204a119c5c92e4c80 | cb65b75f648698f63913b8b38ed7bafa33df6f4c | refs/heads/master | 2021-07-13T06:09:02.672212 | 2017-10-18T07:04:34 | 2017-10-18T07:04:34 | 105,968,419 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | import discord
from discord.ext import commands
class lucky:
"""Cog for Husky Lucky"""
def __init__(self, bot):
self.bot = bot
@commands.command()
async def lucky(self):
"""Cog for Husky Lucky
instagram https://www.instagram.com/thehuskylucky/
get random post: description and url"""
await self.bot.say("This is work in progress!")
def setup(bot):
bot.add_cog(lucky(bot))
| [
"[email protected]"
] | |
9e02a82736c3444d9a8a3e3f92dc8fc5eb8cd8bc | c3f4e09bba013cc1ac99830077b89edad23b21f7 | /hello.py | dd4d12ce9e04dc511fd3be04347eaad83ca7458d | [] | no_license | mkemper/herokupythonwebapp | 70477d990c8640ac467f6e406b04a87b61f6ebe7 | 55e76253ac2ae346e8958a00792b50c2820ac7a9 | refs/heads/master | 2020-05-28T14:08:07.801003 | 2014-05-02T19:54:06 | 2014-05-02T19:54:06 | 18,992,699 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | import os
from flask import Flask
from flask import render_template
app = Flask(__name__)
@app.route('/')
@app.route('/hello')
def hello():
return render_template("index.html",title = 'TestApp',user = 'Gustave Gans')
| [
"[email protected]"
] | |
a366043bb5e6902c35d10a0d2a4c4f20f7c00e59 | 09790593dd44a1ad31b387bbe4adea927697d3ef | /gitrepo/python/potega.py | bc854f3e91c77fa6df3de3052f801055ae4470c6 | [] | no_license | kuba332211/gitrepo | 0baa6179725fee3f200c619f0b436e89906a76a1 | b80b1931337b886bd6cff343b0feae4ce3055dbb | refs/heads/master | 2020-03-28T01:02:56.559496 | 2019-10-11T06:36:16 | 2019-10-11T06:36:16 | 147,471,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# potega.py
# obliczanie potęgi podstawy podniesionej do wykładnika
def potega_it(a, n):
wynik = 1
for i in range(n):
wynik = wynik * a
#print(wynik)
return wynik
def main(args):
#a =int(input("Podaj podstawę: "))
#n =int(input("wykładnik: "))
#print("Potęga {} do {} wynosi {}".format(a,n, potega_it(a, n)))
assert(potega_it(1,1) == 1)
assert(potega_it(2,1) == 2)
assert(potega_it(2,2) == 4)
assert(potega_it(0,4) == 0)
assert(potega_it(1,0) == 1)
assert(potega_it(4,0) == 1)
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| [
"[email protected]"
] | |
c862a250c2c47d807e95c99304745e82a2d09ea8 | fa60cc12d59d73376e5812c11613c238c2e17447 | /performance/stat.py | f5f255373f674e68f841a6e5920a0e80f710348e | [] | no_license | ftao/playzmq | 1a466c364d0f4c935d678656cd6eb8aceb571703 | dde945f27850159c0cfc4d1ecda53e9f32331612 | refs/heads/master | 2020-03-30T11:30:52.107291 | 2012-02-06T15:10:34 | 2012-02-06T15:10:34 | 3,289,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | #!/usr/bin/env python
'''
Do stat calucation on data
'''
import time
import json
import sys
import itertools
import zmq
def stat(data, field):
total = 0
count = 0
for item in data:
total += item[field]
count += 1
return total, count, total * 1.0 /count
def get_data_stream(socket):
while True:
msg = socket.recv()
if msg == '':
break
yield json.loads(msg)
def main():
input_addr = sys.argv[1]
ctx = zmq.Context()
input_socket = ctx.socket(zmq.SUB)
input_socket.connect(input_addr)
input_socket.setsockopt(zmq.SUBSCRIBE, '')
ret = stat(get_data_stream(input_socket), 'hlen')
print time.time(), ret
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
49387a3cdc6c6e23837c5436d99d317dbd2554eb | 40b262d813d07a113914d6009af8737898f2e096 | /Platos test/apps/schedules/migrations/0001_initial.py | 8cf8f0fa053f62e70fdc4f248d417b5c4d27999c | [] | no_license | Nish8192/Python | cb6de3b96e790464a0a4ad10eda86ce4f79688b4 | 5c03beff6f3669d5cfb6b31c5749827db8b6a627 | refs/heads/master | 2020-12-23T16:56:18.301723 | 2017-05-27T02:09:02 | 2017-05-27T02:09:02 | 92,563,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,560 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-28 22:44
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('login_register', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Day',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('h9to10', models.BooleanField(verbose_name=False)),
('h10to11', models.BooleanField(verbose_name=False)),
('h11to12', models.BooleanField(verbose_name=False)),
('h12to13', models.BooleanField(verbose_name=False)),
('h13to14', models.BooleanField(verbose_name=False)),
('h14to15', models.BooleanField(verbose_name=False)),
('h15to16', models.BooleanField(verbose_name=False)),
('h16to17', models.BooleanField(verbose_name=False)),
('h17to18', models.BooleanField(verbose_name=False)),
('h18to19', models.BooleanField(verbose_name=False)),
('h19to20', models.BooleanField(verbose_name=False)),
('h20to21', models.BooleanField(verbose_name=False)),
('h21to22', models.BooleanField(verbose_name=False)),
('h22to23', models.BooleanField(verbose_name=False)),
('h23to0', models.BooleanField(verbose_name=False)),
('h0to1', models.BooleanField(verbose_name=False)),
('h1to2', models.BooleanField(verbose_name=False)),
('h2to3', models.BooleanField(verbose_name=False)),
('h3to4', models.BooleanField(verbose_name=False)),
('h4to5', models.BooleanField(verbose_name=False)),
('h5to6', models.BooleanField(verbose_name=False)),
('h6to7', models.BooleanField(verbose_name=False)),
('h7to8', models.BooleanField(verbose_name=False)),
('h8to9', models.BooleanField(verbose_name=False)),
],
),
migrations.CreateModel(
name='Schedule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fri', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='fri_schedule', to='schedules.Day')),
('mon', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='mon_schedule', to='schedules.Day')),
('sat', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sat_schedule', to='schedules.Day')),
('sun', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sun_schedule', to='schedules.Day')),
('thu', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='thu_schedule', to='schedules.Day')),
('tue', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tue_schedule', to='schedules.Day')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='schedule_user', to='login_register.User')),
('wed', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='wed_schedule', to='schedules.Day')),
],
),
]
| [
"[email protected]"
] | |
020f0da0cd7aa01961d16151a1bafcc96e78372c | dc696115f95a173c358f749b3ab867681f4b4cbb | /code/centroid_plot_csv_new_1.py | 86c267194d2e750849c67b552f803d94a68bbf6d | [] | no_license | smohammed/GalexScanCalibration | 2c81a660f6a19b5c5125f9fcce1b5a0857152d7c | 59953cd2312b2f639891e434a54c5b9e8aabd155 | refs/heads/master | 2020-04-27T18:58:27.816939 | 2018-08-30T12:31:35 | 2018-08-30T12:31:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,166 | py | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import c3
from astropy.io import fits as pyfits
import numpy as np
from astropy import wcs as pywcs
import sys
import aplpy
import os
from sklearn.neighbors import KernelDensity
import csv
import math
import asp_cal
import glob
from astropy.coordinates import SkyCoord
from astropy.coordinates import ICRS, Galactic, FK4, FK5
from astropy.coordinates import Angle, Latitude, Longitude
from astropy.io.fits import update
import re
from scipy.interpolate import splev, splrep
def _find_centroid(filename):
try:
hdulist = pyfits.open(filename)
except IOError as e:
print "I/O error({0}): {1}".format(e.errno, e.strerror)
hdulist = None
except:
print "Unexpected error:", sys.exc_info()[0]
raise
if hdulist is not None:
w = pywcs.WCS(hdulist[0].header, hdulist)
data = hdulist[0].data
data = data.byteswap(True).newbyteorder()
cy, cx = c3.find_centroid(data)
else:
return None
return cx,cy
def find_centroid(filename):
try:
hdulist = pyfits.open(filename)
except IOError as e:
print "I/O error({0}): {1}: {2}".format(e.errno, e.strerror, filename)
hdulist = None
except:
print "Unexpected error:", sys.exc_info()[0]
raise
if hdulist is not None:
hdulist = pyfits.open(filename)
w = pywcs.WCS(hdulist[0].header, hdulist)
data = hdulist[0].data
data = data.byteswap(True).newbyteorder()
cy, cx = c3.find_centroid(data)
centroid = w.wcs_pix2world(w.sip_foc2pix([[cx, cy]],1),1)[0]
if centroid[0]>1:
centroid[0] = centroid[0]-360.
else:
centroid = [0,0]
return centroid
def get_centers(initial, final):
centers = []
for i in range(initial, final+1):
filename = '../fits/co/right/co_map%d_%d_zoom_nocal.fits'%(i,i+1)
centroid = find_centroid(filename)
centers.append(centroid)
centers = np.array(centers)
return centers
def get_centers_half(initial, final):
centers = []
for i in range(initial, final+1):
for j in range(10):
filename = '../fits/test/co_map%d_%d_10.fits'%(i,j)
centroid = find_centroid(filename)
centers.append(centroid)
centers = np.array(centers)
return centers
def corr_plot(centroid, filename, title):
print filename
#centroid = find_centroid(filename)
#centroid = np.load('../data/offsets300-899_half_r.npy')[7]
print centroid
fig = aplpy.FITSFigure(filename)
fig.add_label(centroid[0], centroid[1], 'X', color='red')
fig.show_grayscale(invert=True)
fig.tick_labels.set_xformat('d.ddd')
fig.tick_labels.set_yformat('d.ddd')
fig.recenter(0., 0., radius=0.01)
fig.add_grid()
fig.set_title(title)
basename = os.path.basename(filename)
preffix, ext = os.path.splitext(basename)
fig.save('../plots/corr_10/%s.png'%preffix)
def load_data(filename):
data = []
with open(filename, 'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if math.fabs(float(row[0]))>0.008 or math.fabs(float(row[1]))>0.008:
pass
else:
data.append(row)
csvfile.close()
return data
def moving_stat(data, out_mask, half_win=10):
moving_mask = np.zeros(data.shape)
moving_mask[0:2*half_win+1] = 1
mean = np.zeros(data.shape)
median = np.zeros(data.shape)
abs_dev = np.zeros(data.shape)
std = np.zeros(data.shape)
z = np.zeros(data.shape)
mz = np.zeros(data.shape)
for i in range(half_win):
if out_mask[i] == 1:
std[i] = 1
abs_dev[i] = 1
else:
tmp_out_mask = -out_mask[:half_win+i+1]+1
#print i, data[:half_win+i+1][tmp_out_mask>0].shape
mean[i] = np.mean(data[:half_win+i+1][tmp_out_mask>0], axis=0)
std[i] = np.std(data[:half_win+i+1][tmp_out_mask>0], axis=0)
median[i] = np.median(data[:half_win+i+1][tmp_out_mask>0], axis=0)
abs_dev[i] = np.median(np.absolute(data[:half_win+i+1][tmp_out_mask>0]-median[i]), axis=0)
if out_mask[-i-1] == 1:
std[-i-1] = 1
abs_dev[-i-1] =1
else:
tmp_out_mask = -out_mask[-half_win-i-1:]+1
median[-i-1] = np.median(data[-half_win-i-1:][tmp_out_mask>0], axis=0)
abs_dev[-i-1] = np.median(np.absolute(data[-half_win-i-1:][tmp_out_mask>0]-median[-i-1]), axis=0)
mean[-i-1] = np.mean(data[-half_win-i-1:][tmp_out_mask>0], axis=0)
std[-i-1] = np.std(data[-half_win-i-1:][tmp_out_mask>0], axis=0)
#print -i-1, data[-half_win-i-1:][tmp_out_mask>0].shape
for i in range(data.shape[0]-2*half_win):
if out_mask[half_win+i] == 1:
std[half_win+i] = 1
abs_dev[half_win+i] =1
moving_mask_tmp = np.roll(moving_mask, i)
tmp_out_mask = -out_mask[moving_mask_tmp>0]+1
mean[half_win+i] = np.mean(data[moving_mask_tmp>0][tmp_out_mask>0], axis=0)
std[half_win+i] = np.std(data[moving_mask_tmp>0][tmp_out_mask>0], axis=0)
median[half_win+i] = np.median(data[moving_mask_tmp>0][tmp_out_mask>0], axis=0)
abs_dev[half_win+i] = np.median(np.absolute(data[moving_mask_tmp>0][tmp_out_mask>0]-median[half_win+i]), axis=0)
#print half_win+i, data[moving_mask_tmp>0][tmp_out_mask>0].shape
z = np.absolute((data - mean)/std)
mz = np.absolute(0.6745*(data-median)/abs_dev)
return z, mz
def generate_zero_offsets(name):
print name
hdulist = pyfits.open('../AIS_GAL_SCAN/asprta/%s-asprta.fits'%name)
initial = 1
final = hdulist[1].data['T'].shape[0]-1
centers = []
for i in range(initial, final+1):
centers.append(np.load('../data/%s/cata/centroids_photon%d.npy'%(name, i)))
centroids = np.concatenate(centers, axis=0)
print centroids.shape
np.save('../data/%s/cata/offsets%d_10_new_photon.npy'%(name, initial), centroids)
asp_cal.interpolate_offsets(name)
output = "../plots/%s/cata/output.csv"%(name)
dir = os.path.dirname(output)
if not os.path.exists(dir):
os.makedirs(dir)
def generate_first_offsets(name):
print name
hdulist = pyfits.open('../AIS_GAL_SCAN/asprta/%s-asprta.fits'%name)
initial = 1
final = hdulist[1].data['T'].shape[0]-1
centers = []
center_time = []
for i in range(initial, final+1):
c = np.load('../data/%s/cata/centroids_rot%d.npy'%(name, i))
#if c.shape == (1,3):
# c = c[:,:2]
centers.append(c)
center_time.append(np.load('../data/%s/cata/time_rot%d.npy'%(name, i)))
print c.shape
centroids = np.concatenate(centers, axis=0)
time = np.concatenate(center_time, axis=0)
print centroids.shape
out_mask = np.zeros(centroids.shape[0])
z, mz = moving_stat(centroids[:,0], out_mask, half_win=100)
outliers = np.zeros(centroids.shape[0])
outliers[mz>3.5] = 1
outliers[out_mask>0] = 1
outliers = outliers>0
index = np.arange(centroids.shape[0])
centroids[outliers, 0] = np.interp(index[outliers], index[~outliers], centroids[~outliers,0])
z, mz = moving_stat(centroids[:,1], out_mask, half_win=100)
outliers = np.zeros(centroids.shape[0])
outliers[mz>3.5] = 1
outliers[out_mask>0] = 1
outliers = outliers>0
index = np.arange(centroids.shape[0])
centroids[outliers, 1] = np.interp(index[outliers], index[~outliers], centroids[~outliers,1])
output = "../plots/%s/cata/output.csv"%(name)
dir = os.path.dirname(output)
if not os.path.exists(dir):
os.makedirs(dir)
plt.plot(centroids[:,0], '.b')
plt.savefig('../plots/%s/cata/offsets_10_new_half.pdf'%name, dpi=190)
plt.clf()
np.save('../data/%s/cata/time%d_10_new_half.npy'%(name, initial), time)
np.save('../data/%s/cata/offsets%d_10_new_half.npy'%(name, initial), centroids)
co_data = hdulist[1].data
T = co_data['T']
ra = co_data['ra']
dec = co_data['dec']
roll = co_data['roll']
ra_new = np.interp(time, T, ra) - centroids[:,0]
dec_new = np.interp(time, T, dec) - centroids[:,1]
roll_new = np.interp(time, T, roll) - centroids[:,2]
other = np.zeros((time.shape[0], 8))
array = np.concatenate([np.array([time, ra_new, dec_new, roll_new]).T, other], axis=1)
data = np.core.records.fromarrays(array.transpose(), dtype=[('T', float), ('RA', float), ('DEC', float), ('ROLL', float),\
('STATUS_FLAG', int), ('ROLL_RAD', float), ('X', float), ('Y', float), ('Z', float), ('XDOT', float), ('YDOT', float), ('ZDOT', float)])
new_file = '../AIS_GAL_SCAN/asprta/%s-cal-asprta.fits'%(name)
os.system('cp ../AIS_GAL_SCAN/asprta/%s-asprta.fits ../AIS_GAL_SCAN/asprta/%s-cal-asprta.fits'%(name, name))
update(new_file, data, 1)
hdu = pyfits.open(new_file)
print hdu[1].data['RA'].shape
print hdu[1].data['DEC'].shape
hdu.close()
#asp_cal.interpolate_offsets(name, 1., centroids)
tmp_files = glob.glob("../data/%s/cata/centroids_rot*"%name)
for tmp_file in tmp_files:
os.remove(tmp_file)
tmp_files = glob.glob("../data/%s/cata/time_rot*"%name)
for tmp_file in tmp_files:
os.remove(tmp_file)
'''
print centroids.shape
os.system('cp ../AIS_GAL_SCAN/asprta/%s-asprta.fits ../AIS_GAL_SCAN/asprta/%s-asprta-cal.fits'%(name, name))
hdu = pyfits.open('../AIS_GAL_SCAN/asprta/%s-asprta-cal.fits'%(name), 'update')
hdu[1].data['RA'][1:] -= centroids[:,0]
hdu[1].data['DEC'][1:] -= centroids[:,1]
print hdu[1].data['RA'][1:]
print hdu[1].data['DEC'][1:]
hdu.flush()
hdu.close()
'''
def generate_sec_offsets(name):
print name
hdulist = pyfits.open('../AIS_GAL_SCAN/asprta/%s-asprta.fits'%name)
initial = 1
final = hdulist[1].data['T'].shape[0]-1
centers = []
for i in range(initial, final+1):
centers.append(np.load('../data/%s/cata/centroids_sec%d.npy'%(name, i)))
centroids = np.concatenate(centers, axis=0)
print centroids.shape
plt.plot(centroids[:,0], '.b')
plt.savefig('../plots/%s/cata/offsets_10_new_sec.pdf'%name, dpi=190)
plt.clf()
np.save('../data/%s/cata/offsets%d_10_new_sec.npy'%(name, initial), centroids)
asp_cal.secondary_cal(name)
tmp_files = glob.glob("../data/%s/cata/centroids_sec*"%name)
print tmp_files
for tmp_file in tmp_files:
os.remove(tmp_file)
def generate_sec_offsets_new(name):
print name
hdulist = pyfits.open('../AIS_GAL_SCAN/asprta/%s-asprta.fits'%name)
initial = 0
final = hdulist[1].data['T'].shape[0]-1
centers = []
center_time = []
for i in range(initial, final+1):
#center = np.load('../data/%s/cata/centroids_rot%d.npy'%(name, i))
#if center.shape!=(2,3):
# print i, center.shape
centers.append(np.load('../data/%s/cata/centroids_rot%d.npy'%(name, i)))
center_time.append(np.load('../data/%s/cata/time_rot%d.npy'%(name, i)))
centroids = np.concatenate(centers, axis=0)
time = np.concatenate(center_time, axis=0)
print centroids.shape
print time.shape
output = '../plots/%s/cata/offsets_10_new_sec.pdf'%name
dir = os.path.dirname(output)
if not os.path.exists(dir):
os.makedirs(dir)
plt.plot(centroids[:,0], '.b')
plt.savefig('../plots/%s/cata/offsets_10_new_sec.pdf'%name, dpi=190)
plt.clf()
np.save('../data/%s/cata/offsets%d_10_new_sec.npy'%(name, initial), centroids)
np.save('../data/%s/cata/time%d_10_new_sec.npy'%(name, initial), time)
co_data = hdulist[1].data
T = co_data['T']
ra = co_data['ra']
dec = co_data['dec']
roll = co_data['roll']
ra_new = np.interp(time, T, ra) - centroids[:,0]
dec_new = np.interp(time, T, dec) - centroids[:,1]
roll_new = np.interp(time, T, roll) - centroids[:,2]
other = np.zeros((time.shape[0], 8))
array = np.concatenate([np.array([time, ra_new, dec_new, roll_new]).T, other], axis=1)
data = np.core.records.fromarrays(array.transpose(), dtype=[('T', float), ('RA', float), ('DEC', float), ('ROLL', float),\
('STATUS_FLAG', int), ('ROLL_RAD', float), ('X', float), ('Y', float), ('Z', float), ('XDOT', float), ('YDOT', float), ('ZDOT', float)])
new_file = '../AIS_GAL_SCAN/asprta/%s-sec-asprta.fits'%(name)
os.system('cp ../AIS_GAL_SCAN/asprta/%s-asprta.fits ../AIS_GAL_SCAN/asprta/%s-sec-asprta.fits'%(name, name))
update(new_file, data, 1)
hdu = pyfits.open(new_file)
print hdu[1].data['RA'].shape
print hdu[1].data['DEC'].shape
hdu.close()
tmp_files = glob.glob("../data/%s/cata/centroids_rot*"%name)
for tmp_file in tmp_files:
os.remove(tmp_file)
tmp_files = glob.glob("../data/%s/cata/time_rot*"%name)
for tmp_file in tmp_files:
os.remove(tmp_file)
def generate_new_offsets_new(name, asprta, suffix, tmp_dir, num_p):
print name
try:
centroids = np.load(tmp_dir+'/offsets_%s.npy'%(suffix))
time = np.load(tmp_dir+'/time_%s.npy'%(suffix))
except IOError:
try:
centroids = np.load(tmp_dir+'/offsets0_%s.npy'%(suffix))
time = np.load(tmp_dir+'/time0_%s.npy'%(suffix))
except IOError:
print 'no file'
return 0
print centroids.shape
print time.shape
output = '../plots/1/%s-%s.pdf'%(name, suffix)
dir = os.path.dirname(output)
if not os.path.exists(dir):
os.makedirs(dir)
plt.plot(centroids[:,0], '.k')
plt.savefig('../plots/1/%s-%s_ra.pdf'%(name, suffix), dpi=190)
plt.clf()
plt.plot(centroids[:,1], '.k')
plt.savefig('../plots/1/%s-%s_dec.pdf'%(name, suffix), dpi=190)
plt.clf()
#np.save(tmp_dir+'/offsets_%s.npy'%(suffix), centroids)
#np.save(tmp_dir+'/time_%s.npy'%(suffix), time)
hdulist = pyfits.open(asprta)
co_data = hdulist[1].data
T = co_data['T']
ra = co_data['ra']
dec = co_data['dec']
roll = co_data['roll']
ra_new = np.interp(time, T, ra) - centroids[:,0]
dec_new = np.interp(time, T, dec) - centroids[:,1]
roll_new = np.interp(time, T, roll) - centroids[:,2]
other = np.zeros((time.shape[0], 8))
array = np.concatenate([np.array([time, ra_new, dec_new, roll_new]).T, other], axis=1)
data = np.core.records.fromarrays(array.transpose(), dtype=[('T', float), ('RA', float), ('DEC', float), ('ROLL', float),\
('STATUS_FLAG', int), ('ROLL_RAD', float), ('X', float), ('Y', float), ('Z', float), ('XDOT', float), ('YDOT', float), ('ZDOT', float)])
new_file = re.split('-asprta.fits', asprta)[0]+'-'+suffix+'-asprta.fits'
os.system('cp {0} {1}'.format(asprta, new_file))
update(new_file, data, 1)
hdu = pyfits.open(new_file)
print hdu[1].data['RA'].shape
print hdu[1].data['DEC'].shape
hdu.close()
if __name__ == '__main__':
if True:
name = sys.argv[1]
suffix = sys.argv[2]
generate_new_offsets_new(name, suffix)
| [
"[email protected]"
] | |
3317db01ff8d0d5eff65cd314197024a8f717d5c | cc196a0111bbdcc04af7e579bc87e808cc0c7b02 | /trident/__init__.py | 880dacc18d173da5184a33655707722ed4ae11ab | [
"MIT"
] | permissive | Jackliaoall-AI-Framework/trident | 73819a95121d9d4dbf81d28ae32aea43c0541840 | cd26c1108c05c3ab4c262f9b416a126b2ad2f858 | refs/heads/master | 2023-01-15T15:15:10.544946 | 2020-11-23T04:15:33 | 2020-11-23T04:15:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | """trident api"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from importlib import reload
from sys import stderr
defaultencoding = 'utf-8'
if sys.getdefaultencoding() != defaultencoding:
reload(sys)
sys.setdefaultencoding(defaultencoding)
__version__ = '0.6.1'
stderr.write('trident {0}\n'.format(__version__))
from trident.backend import *
import threading
import random
| [
"[email protected]"
] | |
2da7f4194e18775060afca1bfc1dcd85d1009570 | 3411ad233c411c06765f4b07f8670c12025178b6 | /201-300/231-240/237-deleteNodeInLinkedList/deleteNodeInLinkedList.py | 9b19cef457feb64cb67a51b92b91f733e6ae73ed | [
"MIT"
] | permissive | xuychen/Leetcode | 7d9d31fed898ce58440f5ae6665d2ccaf1a4b256 | c8bf33af30569177c5276ffcd72a8d93ba4c402a | refs/heads/master | 2021-11-19T20:39:43.741589 | 2021-10-24T16:26:52 | 2021-10-24T16:26:52 | 140,212,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
node.val = node.next.val
node.next = node.next.next | [
"[email protected]"
] | |
912e2103ce003cb1d9fcd5a02369716b032b4a25 | 63872807248066ffd0435dcd12ea37d60dc4a9e2 | /pyCDFTOOLS/draw_clock.py | c02c4946e1043f9c0b18b75c93bb072decd01a2e | [
"MIT"
] | permissive | malzubairy/NEMO-related | 8214d80216a874b710c487e14f586596941fa587 | 81ea5a5e94aa40b90e95093384bf769ec269afc4 | refs/heads/master | 2020-04-27T02:59:48.745286 | 2019-03-04T09:28:04 | 2019-03-04T09:28:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,159 | py | #!/usr/bin/env python3
# JM: 27 Nov 2018
# draw a clock
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
# units in radians
def draw_clock(yyyymmdd, clock_color = "xkcd:grey", progress_color = "Spectral", fontsize = 14, ax = None):
"""
fairly dumb way of drawing a clock, takes input as yyyymmdd and draws a clock
through drawing some filled circles with contourf in polar plot
the plot will not automatically scale but the "fontsize" number can be modified
accordingly to make it scale, so test this with some sample images first
input:
yyyymmdd string of yyyy/mm/dd, how you grab from data is up to you
clock_color default is "xkcd:grey", modify accordingly as RGB, hex, python words etc.
progress_color default is orange progress lime background from "Spectral", change it
by inputing a colormap if you like
fontsize default 14, modify this depending on clock size
ax subplot axes to plot it, suggestion is in the parent axes do say
a = plt.axes([0.95, .6, .2, .2], polar = True)
draw_clock("19510630", ax = a, fontsize = 10)
"""
if ax is None:
ax = plt.axes(projection = 'polar')
# set up the clock as a circle
ax.set_theta_offset(np.pi / 2.0) # start counting at 12 o'clock
ax.set_theta_direction("clockwise") # go clockwise
ax.set_xticks([]) # kill all the ticks
ax.set_rticks([])
ax.set_rlim(0, 1) # set the clockface
ax.set_facecolor(clock_color)
# set up an array to plot the invariant parts
outer_line = 0.70
inner_line = 0.45
theta_vec = np.linspace(0, 2 * np.pi, 71)
r_vec = np.linspace(inner_line, outer_line, 31)
theta, r = np.meshgrid(theta_vec, r_vec)
# set up some settings (hand tuned for now...)
months = {}
months["label"] = ["J", "F", "M", "A", "M", "J", "J", "A", "S", "O", "N", "D"]
months["theta"] = np.arange(0, 12, 1) * np.pi / 6.0 + np.pi / 12.0
months["days"] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] # assume no leap years
# work out the date in radians
year = int(yyyymmdd[0:4])
month = int(yyyymmdd[4:6])
day = int(yyyymmdd[6::])
if (year > 9999) or (year < 0):
print("year grabbed is %.4d and is out of bounds?" % year)
return
if (month > 12) or (month < 0):
print("month grabbed is %.2d and is out of bounds?" % month)
return
if (day > months["days"][month - 1]) or (day < 0):
print("month grabbed is %.2d" % month)
print("but date grabbed is %.2d so is out of bounds?" % day)
return
date_in_rad = (month - 1) * np.pi / 6.0 + (day / months["days"][month - 1]) * np.pi / 6.0
ax.plot(theta_vec, inner_line * np.ones(theta_vec.shape), 'k')
ax.plot(theta_vec, outer_line * np.ones(theta_vec.shape), 'k')
ax.plot(theta_vec, 1.0 * np.ones(theta_vec.shape), 'k', linewidth = 2)
ax.text(3 * np.pi / 2.0, 0.0, "%.4d" % year,
ha = 'center', va = 'center', fontsize = fontsize)
for month in range(12):
ax.plot([month * np.pi / 6.0, month * np.pi / 6.0], [outer_line, 1.0], 'k-')
ax.text(months["theta"][month], 0.85, months["label"][month],
ha = 'center', va = 'center', fontsize = fontsize)
filled_region = np.where(theta < date_in_rad + 0.01, -1, 1) # ad a little increment to push the contour over
ax.contourf(theta, r, filled_region, levels = np.linspace(-2, 2, 3), cmap = progress_color)
def hex_to_rgb(color_hex):
color_rgb = tuple(int(color_hex[i:i+2], 16) / 255 for i in (0, 2 ,4))
return color_rgb
def hex_duple_colormap(color_hex1, color_hex2, sample = False):
colors = [hex_to_rgb(color_hex1), hex_to_rgb(color_hex2)]
cmap = LinearSegmentedColormap.from_list("murp", colors, N = 2)
if sample:
x = np.arange(0, np.pi, 0.1)
y = np.arange(0, 2*np.pi, 0.1)
X, Y = np.meshgrid(x, y)
Z = np.cos(X) * np.sin(Y) * 10
ax = plt.axes()
im = ax.imshow(Z, interpolation='nearest', origin='lower', cmap = cmap)
fig.colorbar(im, ax = ax)
plt.show()
return cmap
| [
"[email protected]"
] | |
78c173508b8ba231c1887527ff227333e548ecf8 | f010d4693aa8c3a8ba10220cec010a1138fc9311 | /venv/bin/faker | 85b96713c2cc50149d465d9527ac94eb10891e19 | [] | no_license | difasdfs/project_crisbardashboard | a4cf028e65a76688bd347e12b888cad910db5ae5 | 5c47bff0d491c29abb1cddaad152e66d5624336d | refs/heads/main | 2023-02-03T09:58:09.859779 | 2020-12-26T03:53:56 | 2020-12-26T03:53:56 | 315,183,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | #!/home/itcrisbar/project_crisbardashboard/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from faker.cli import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"[email protected]"
] | ||
b0c14efa30aa6296d714f88a56de72b29a3cb346 | 821d830910c354cb89767a77e00c77deb592ca0c | /bayesnet/math/__init__.py | a7e1743bd4c1a5cdfed8a8de29633ed4e5c1f037 | [
"MIT"
] | permissive | zxsted/BayesianNetwork | c61aa77a511e96852dec38f268f0dc31b6752cac | efe75b5416a262741fa60ad09380684886e91eff | refs/heads/master | 2021-05-09T05:38:43.513255 | 2017-10-25T06:58:26 | 2017-10-25T06:58:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,193 | py | from bayesnet.math.add import add
from bayesnet.math.divide import divide, rdivide
from bayesnet.math.exp import exp
from bayesnet.math.log import log
from bayesnet.math.matmul import matmul, rmatmul
from bayesnet.math.mean import mean
from bayesnet.math.multiply import multiply
from bayesnet.math.negative import negative
from bayesnet.math.power import power, rpower
from bayesnet.math.product import prod
from bayesnet.math.sqrt import sqrt
from bayesnet.math.square import square
from bayesnet.math.subtract import subtract, rsubtract
from bayesnet.math.sum import sum
from bayesnet.tensor.tensor import Tensor
Tensor.__add__ = add
Tensor.__radd__ = add
Tensor.__truediv__ = divide
Tensor.__rtruediv__ = rdivide
Tensor.mean = mean
Tensor.__matmul__ = matmul
Tensor.__rmatmul__ = rmatmul
Tensor.__mul__ = multiply
Tensor.__rmul__ = multiply
Tensor.__neg__ = negative
Tensor.__pow__ = power
Tensor.__rpow__ = rpower
Tensor.prod = prod
Tensor.__sub__ = subtract
Tensor.__rsub__ = rsubtract
Tensor.sum = sum
__all__ = [
"add",
"divide",
"exp",
"log",
"matmul",
"mean",
"multiply",
"power",
"prod",
"sqrt",
"square",
"subtract",
"sum"
]
| [
"[email protected]"
] | |
ca2d354525667e6ce05dcbd15758dec072a1e2c1 | 0559941516569d39a040341ab0861975fa70a327 | /Code/src/hellopython/第二章/test/monthrate.py | a933543be04d02f56c353bc94fe127fb1b402d5d | [] | no_license | LYTXJY/python_full_stack | e4cadfea45a711a72d56229d6032d46c0b752899 | 9e99b0d9d44e7191b0358a02e114bbcd18eee635 | refs/heads/master | 2021-02-03T22:34:13.939998 | 2020-03-11T14:05:29 | 2020-03-11T14:05:29 | 243,561,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | #计算利率
#月供 = 贷款数*月利率 / (1 - 1/(1+月利率)**(年限*12))
years = eval(input("请输入贷款年限:"))
money = eval(input("请输入贷款金额:"))
monthrate = eval(input("请输入贷款月利率:"))
monthmoney = money * monthrate / (1 - 1 / (1 + monthrate) ** (years * 12))
totalmoney = monthmoney * years * 12
print("月供:", monthmoney, "总还款:", totalmoney)
| [
"[email protected]"
] | |
5b16d88b43724df85472827ca9a1de35f189620b | 6ed48bf3c72e61fe53144a3545ab305112c93501 | /appengine/findit/handlers/test/help_triage_test.py | ef11561424031e1e74e5d088f7422bb54953dd08 | [
"BSD-3-Clause"
] | permissive | eunchong/infra | ee5f7a9379977de8c814f90dbba3f6adbf06a75c | ce3728559112bfb3e8b32137eada517aec6d22f9 | refs/heads/master | 2022-11-27T06:26:57.415805 | 2016-04-08T12:34:36 | 2016-04-08T12:34:36 | 55,699,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,341 | py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import webapp2
from testing_utils import testing
from common.git_repository import GitRepository
from handlers import help_triage
from model.wf_analysis import WfAnalysis
from model.wf_build import WfBuild
from waterfall import buildbot
from waterfall.build_info import BuildInfo
from waterfall import build_util
EXPECTED_RESULTS_120 = {
'598ed4fa15e6a1d0d92b2b7df04fc31ab5d6e829': {
'fixed_cl_review_url': 'https://codereview.chromium.org/12578123',
'fixing_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/121'),
'fixed_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/120'),
'fixed_build_number': 120,
'fixing_cl_review_url': 'https://codereview.chromium.org/1280463001',
'fixing_cl_commit_position': 342013,
'fixed_cl_commit_position': 341971,
'fixed_revision': '598ed4fa15e6a1d0d92b2b7df04fc31ab5d6e829',
'fixing_build_number': 121,
'action': 'Reverted',
'fixing_revision': '598sd489df74g125svf35s04fc3'
},
'062a6f974d7c08d27902060c241149ce193e4dd5': {
'fixed_cl_review_url': 'https://codereview.chromium.org/1268183002',
'fixing_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/121'),
'fixed_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/120'),
'fixed_build_number': 120,
'fixing_cl_review_url': 'https://codereview.chromium.org/1280463006',
'fixing_cl_commit_position': 342015,
'fixed_cl_commit_position': 341977,
'fixed_revision': '062a6f974d7c08d27902060c241149ce193e4dd5',
'fixing_build_number': 121,
'action': 'Reverted',
'fixing_revision': '123456789c08d27902060c241149ce193e4dd5dd'
},
'584de1b73f811bcdb98eae1fb0d92b2b7df04fc3': {
'fixed_cl_review_url': 'https://codereview.chromium.org/1263223005',
'fixing_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/122'),
'fixed_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/120'),
'fixed_build_number': 120,
'fixing_cl_review_url': 'https://codereview.chromium.org/1280463003',
'fixing_cl_commit_position': 342014,
'fixed_cl_commit_position': 341976,
'fixed_revision': '584de1b73f811bcdb98eae1fb0d92b2b7df04fc3',
'fixing_build_number': 122,
'action': 'Reverted',
'fixing_revision': '123456671bcdb98eae1fb0d92b2b7df04fc3'
},
'3e4aaaa45c528d4ab0670331a6c0ebfc4f3ab8e6': {
'fixed_cl_review_url': 'https://codereview.chromium.org/1260813007',
'fixing_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/123'),
'fixed_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/120'),
'fixed_build_number': 120,
'fixing_cl_review_url': 'https://codereview.chromium.org/1280463100',
'fixing_cl_commit_position': 332070,
'fixed_cl_commit_position': 341978,
'fixed_revision': '3e4aaaa45c528d4ab0670331a6c0ebfc4f3ab8e6',
'fixing_build_number': 123,
'action': 'Reverted',
'fixing_revision': '123455668d4ab0670331a6c0ebfc4f3ab8e6'
}
}
EXPECTED_RESULTS_121 = {
'3e4aaaa45c528d4ab0670331a6c0ebfc4f3ab8e6': {
'fixed_cl_review_url': 'https://codereview.chromium.org/1260813007',
'fixing_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/123'),
'fixed_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/120'),
'fixed_build_number': 120,
'fixing_cl_review_url': 'https://codereview.chromium.org/1280463100',
'action': 'Reverted',
'fixed_cl_commit_position': 341978,
'fixed_revision': '3e4aaaa45c528d4ab0670331a6c0ebfc4f3ab8e6',
'fixing_build_number': 123,
'fixing_cl_commit_position': 332070,
'fixing_revision': '123455668d4ab0670331a6c0ebfc4f3ab8e6'
},
'584de1b73f811bcdb98eae1fb0d92b2b7df04fc3': {
'fixed_cl_review_url': 'https://codereview.chromium.org/1263223005',
'fixing_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/122'),
'fixed_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/120'),
'fixed_build_number': 120,
'fixing_cl_review_url': 'https://codereview.chromium.org/1280463003',
'action': 'Reverted',
'fixed_cl_commit_position': 341976,
'fixed_revision': '584de1b73f811bcdb98eae1fb0d92b2b7df04fc3',
'fixing_build_number': 122,
'fixing_cl_commit_position': 342014,
'fixing_revision': '123456671bcdb98eae1fb0d92b2b7df04fc3'
},
'123456789c08d27902060c241149ce193e4dd5dd': {
'fixed_cl_review_url': 'https://codereview.chromium.org/1280463006',
'fixing_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/122'),
'fixed_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/121'),
'fixed_build_number': 121,
'fixing_cl_review_url': 'https://codereview.chromium.org/1161773008',
'action': 'Reverted',
'fixed_cl_commit_position': 342015,
'fixed_revision': '123456789c08d27902060c241149ce193e4dd5dd',
'fixing_build_number': 122,
'fixing_cl_commit_position': 332062,
'fixing_revision': '062a6f974d7c01234569ce193e4dd5'
}
}
class HelpTriageTest(testing.AppengineTestCase):
app_module = webapp2.WSGIApplication([
('/help-triage', help_triage.HelpTriage),
], debug=True)
def _GetBuildInfo(self, master_name, builder_name, build_number):
file_name = os.path.join(
os.path.dirname(__file__), 'data', 'help_triage_test_data',
'build_data_%s_%s_%s.json' % (
master_name, builder_name, build_number))
if not os.path.isfile(file_name):
return None
with open(file_name, 'r') as f:
return f.read()
def _MockDownloadBuildData(
self, master_name, builder_name, build_number):
build = WfBuild.Get(master_name, builder_name, build_number)
if not build: # pragma: no cover
build = WfBuild.Create(master_name, builder_name, build_number)
build.data = self._GetBuildInfo(master_name, builder_name, build_number)
build.put()
return build
def _MockDownloadChangeLogData(self, revision):
file_name = os.path.join(
os.path.dirname(__file__), 'data', 'help_triage_test_data',
'change_log_' + revision)
with open(file_name) as f:
commit_log = f.read()
return revision, json.loads(commit_log[len(')]}\'\n'):])
def setUp(self):
super(HelpTriageTest, self).setUp()
self.master_name = 'm'
self.builder_name = 'b'
self.mock_current_user(user_email='[email protected]', is_admin=True)
self.mock(build_util, 'DownloadBuildData',
self._MockDownloadBuildData)
self.mock(GitRepository, '_DownloadChangeLogData',
self._MockDownloadChangeLogData)
def _CreateAnalysis(self, build_number, first_failure, last_pass=None):
analysis = WfAnalysis.Create(
self.master_name, self.builder_name, build_number)
analysis.result = {
'failures': [
{
'last_pass': last_pass,
'first_failure': first_failure,
'suspected_cls': [],
'step_name': 'gn_check'
}
]
}
analysis.put()
def testGetFirstFailedBuild(self):
self._CreateAnalysis(120, 118, 117)
first_build, failed_steps = help_triage._GetFirstFailedBuild(
self.master_name, self.builder_name, 120)
self.assertEqual(118, first_build)
self.assertEqual(['gn_check'], failed_steps)
def testGetFirstFailedBuildNoLastPass(self):
self._CreateAnalysis(120, 118)
first_build, failed_steps = help_triage._GetFirstFailedBuild(
self.master_name, self.builder_name, 120)
self.assertEqual(118, first_build)
self.assertEqual(['gn_check'], failed_steps)
def testGetFirstFailedBuildNoAnalysis(self):
first_build, failed_steps = help_triage._GetFirstFailedBuild(
self.master_name, self.builder_name, 120)
self.assertIsNone(first_build)
self.assertIsNone(failed_steps)
def testCheckReverts(self):
self._CreateAnalysis(120, 120)
results = help_triage._CheckReverts(
self.master_name, self.builder_name, 120)
self.assertEqual(EXPECTED_RESULTS_120, results)
def testCheckRevertsReturnNoneWhenNoGreenBuild(self):
self._CreateAnalysis(124, 124)
expected_results = {}
results = help_triage._CheckReverts(
self.master_name, self.builder_name, 124)
self.assertEqual(expected_results, results)
def testCheckRevertsReturnNoneWhenNoReverts(self):
self._CreateAnalysis(118, 118)
expected_results = {}
results = help_triage._CheckReverts(
self.master_name, self.builder_name, 118)
self.assertEqual(expected_results, results)
def testHelpTriageHandler(self):
build_url = buildbot.CreateBuildUrl(
self.master_name, self.builder_name, 121)
analysis = WfAnalysis.Create(self.master_name, self.builder_name, 121)
analysis.result = {
'failures': [
{
'last_pass': None,
'first_failure': 120,
'suspected_cls': [],
'step_name': 'gn_check'
}
]
}
analysis.put()
response = self.test_app.get('/help-triage', params={'url': build_url})
self.assertEqual(200, response.status_int)
self.assertEqual(EXPECTED_RESULTS_121, response.json_body)
def testHelpTriageHandlerReturnNoneForGreenBuild(self):
build_url = buildbot.CreateBuildUrl(
self.master_name, self.builder_name, 123)
build = WfBuild.Create(self.master_name, self.builder_name, 123)
build.data = self._GetBuildInfo(self.master_name, self.builder_name, 123)
build.put()
response = self.test_app.get('/help-triage', params={'url': build_url})
expected_results = {}
self.assertEqual(200, response.status_int)
self.assertEqual(expected_results, response.json_body)
| [
"[email protected]"
] | |
afff7325a984dc8bfcc0c081baafbff71099bdcd | 8d7d847f109183365022112a6b76cb20fa158255 | /Crawler/Crawler/settings.py | fd19646728c9c25cb290559bf94c5142869e8503 | [] | no_license | Xarliend/DataViewSite | 42e59fed9483ff2f877a40b4f31a1b82c5435ecd | 59921492547d68908e6e8f54a062baeb05b19580 | refs/heads/master | 2022-11-24T15:12:01.842042 | 2020-01-04T22:21:17 | 2020-01-04T22:21:17 | 227,699,982 | 0 | 0 | null | 2022-11-04T19:31:04 | 2019-12-12T21:29:47 | Python | UTF-8 | Python | false | false | 3,089 | py | # -*- coding: utf-8 -*-
# Scrapy settings for Crawler project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'Crawler'
SPIDER_MODULES = ['Crawler.spiders']
NEWSPIDER_MODULE = 'Crawler.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'Crawler (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'Crawler.middlewares.CrawlerSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'Crawler.middlewares.CrawlerDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'Crawler.pipelines.CrawlerPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"[email protected]"
] | |
b0395f5306037472584cb31f83bbac95066c2fa6 | 7e293e5992eea0e139f9420925a9bcb64d0fd7c2 | /data_sets/generate_data.py | 4f9c0ce0032bc0049e8a5d47b4efcaf54e9a29cd | [
"MIT"
] | permissive | xcleancode/Distributed_GPU_LSH_using_SYCL | 5dd0ee84220119ef8b1577b0dc3f357e9caf8195 | a1763670fa4e9df8ac1df4ffcc89b0c5ceed36cb | refs/heads/master | 2023-05-13T04:45:41.573043 | 2021-06-07T12:31:52 | 2021-06-07T12:31:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,743 | py | # @author Marcel Breyer
# @date 2020-08-06
# @brief Python3 script for generating data sets.
import argparse
import sklearn.datasets
import sklearn.preprocessing
import numpy as np
import sys
import csv
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
def size_in_bytes(numpy_type):
return np.dtype(numpy_type).itemsize
# real_type = np.float32
real_type = np.uint32
size_type = np.uint32
# setup command line arguments parser
parser = argparse.ArgumentParser()
parser.add_argument("--size", help="the number of data points", type=int, required=True)
parser.add_argument("--dims", help="the number of dimensions per data point", type=int, required=True)
parser.add_argument("--output_file", help="the file to write the generated data points to", type=str, required=True)
parser.add_argument("--num_cluster", help="the number of different clusters", type=int, default=3, required=False)
parser.add_argument("--cluster_std", help="the clusters standard deviation", type=float, default=1.0, required=False)
parser.add_argument("--scale", help="scales the data points to [0, 1]", action="store_true")
parser.add_argument("--binary", help="saves the data in binary format", action="store_true")
parser.add_argument("--debug", help="uses debug data", action="store_true")
args = parser.parse_args()
# generate data points
if args.debug:
data = np.arange(args.size * args.dims, dtype=real_type) % args.size
data = np.reshape(data, (args.size, args.dims))
else:
data = sklearn.datasets.make_blobs(n_samples=args.size, n_features=args.dims, centers=args.num_cluster, \
cluster_std=args.cluster_std, shuffle=True, random_state=1)[0].astype(real_type)
# scale data to [0, 1] if requested
if args.scale:
sklearn.preprocessing.minmax_scale(data, feature_range=(0, 1), copy=False)
if args.binary:
# write data points to file in binary format
with open(args.output_file, 'wb') as file:
file.write(args.size.to_bytes(size_in_bytes(size_type), sys.byteorder))
file.write(args.dims.to_bytes(size_in_bytes(size_type), sys.byteorder))
file.write(data.tobytes())
else:
# write data points to file in text format
with open(args.output_file, 'w', newline='\n') as file:
writer = csv.writer(file, delimiter=',')
writer.writerow([args.size])
writer.writerow([args.dims])
writer.writerows(data)
# draw data points if dims == 2 || dims == 3
# if args.dims == 2:
# plt.scatter(data[:, 0], data[:, 1], s=10)
# plt.show()
# elif args.dims == 3:
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.scatter(data[:, 0], data[:, 1], data[:, 2], s=10)
# plt.show()
| [
"[email protected]"
] | |
57b500dbbccbef6d98b6b6d9293526c6082141e2 | 3ff9a091e9546b336b5d167fb3678c315e2a6e44 | /testing/test_calc.py | cbc579781beafe9ef19d32d8a33b008117c3ab31 | [] | no_license | titoeb/python_tuts | 7b2a8b7a6a3d5afc83d0f44a7ec46c575ec6ef3b | 4efb0e364ba929ce2cf8dd21a7fa86fec5e049f6 | refs/heads/master | 2021-05-25T07:30:19.292777 | 2020-04-07T11:27:36 | 2020-04-07T11:27:36 | 253,715,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,316 | py | import unittest
from unittest.mock import patch
import calc
class TestCalc(unittest.TestCase):
def test_add(self):
self.assertEqual(calc.add(10, 5), 15)
self.assertEqual(calc.add(5, -5), 0)
self.assertEqual(calc.add(-10, -5), -15)
def test_subtract(self):
self.assertEqual(calc.subtract(10, 5), 5)
self.assertEqual(calc.subtract(5, -5), 10)
self.assertEqual(calc.subtract(-10, -5), -5)
def test_multiply(self):
self.assertEqual(calc.multiply(10, 5), 50)
self.assertEqual(calc.multiply(5, -5), -25)
self.assertEqual(calc.multiply(-10, -5), 50)
def test_divide(self):
self.assertEqual(calc.divide(10, 5), 2)
self.assertEqual(calc.divide(5, -5), -1)
self.assertEqual(calc.divide(-10, -5), 2)
self.assertEqual(calc.divide(5, 2), 2.5)
self.assertRaises(ValueError, calc.divide, 10, 0)
def test_get_website_1(self):
with patch('calc.requests.get') as mocket_get:
mocket_get.return_value.ok = True
mocket_get.return_value.text = 'Success'
schedule = calc.get_website()
mocket_get.assert_called_with('http://company.com/tim/1')
self.assertEqual(schedule, 'Success')
if __name__ == "__main__":
unittest.main() | [
"[email protected]"
] | |
82bf1eea5257be4934bb0ac54853d03f28b2cab4 | fcf656dccbf69fd2fe6abe4fa82b5e9722bc3052 | /util/__init__.py | e8c2b4443a05f4f240f2f5008f3ef4edd7fc6b3e | [] | no_license | tetrapus/Karkat | 7630374fe3d3ecea3706b29ccfa1ec08722db5fd | 6e898ce3e6814924314bd0106b1b844adced7258 | refs/heads/master | 2021-01-17T08:55:42.857753 | 2017-10-31T03:51:07 | 2017-10-31T03:51:07 | 9,131,639 | 9 | 10 | null | 2023-06-28T11:38:30 | 2013-03-31T15:01:06 | Python | UTF-8 | Python | false | false | 1,381 | py | import threading
from . import services
from . import irc
from . import text
from . import dcc
from . import images
from . import files
from . import throttle
from . import database
# Taken straight from the xchat source. Thanks, xchat!
rfc_tolowertab = {'A': 'a', 'G': 'g', '\\': '|', '^': '~', 'D': 'd', 'C': 'c', 'T': 't', 'M': 'm', 'I': 'i', 'B': 'b', 'N': 'n', 'R': 'r', 'W': 'w', 'L': 'l', 'F': 'f', 'Y': 'y', '[': '{', 'P': 'p', 'S': 's', 'H': 'h', ']': '}', 'O': 'o', 'Q': 'q', 'U': 'u', 'V': 'v', 'J': 'j', 'K': 'k', 'E': 'e', 'Z': 'z', 'X': 'x'}
def cmp(a, b):
return (a > b) - (a < b)
def rfc_nickkey(nick: str) -> str:
return "".join(rfc_tolowertab.get(i, i) for i in nick)
def average(x):
return float(sum(x))/len(x) if x else 0.00
# TODO: Move to submodule
class Job(threading.Thread):
def __init__(self, job):
threading.Thread.__init__(self)
self.job = job
self.answer = None
def run(self):
self.answer = self.job()
def parallelise(jobs):
# Do all jobs in parallel and return their results.
threads = [Job(i) for i in jobs]
# Start all threads
for i in threads: i.start()
# Join all threads
for i in threads: i.join()
return [i.answer for i in threads]
__all__ = ["services", "irc", "text", "parallelise", "cmp", "rfc_nickkey", "average", "dcc", "images", "files", "throttle"] | [
"[email protected]"
] | |
42fed63039e8e6390da8ccfcffd357ec6875fbb3 | 08aec07b5db889e586a56e36efea31c21c5695f2 | /day009/ex56.py | 9e14fad93694867afc15eb03ad6e7d9908ee1a76 | [] | no_license | nileshnegi/hackerrank-python | 7bb19929d2b963f02b37259c06b893c6520f33dc | 0d2ab9ee40156e81b568ab4d5a6d5cd4f6ca7385 | refs/heads/master | 2023-01-07T07:20:44.859336 | 2020-11-11T14:30:21 | 2020-11-11T14:30:21 | 279,877,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | """
Introduction to Sets
Ms. Gabriel Williams is a botany professor at District College.
One day, she asked her student Mickey to compute the average
of all the ```N``` plants with distinct heights in her greenhouse.
"""
def average(array):
array = set(array)
return sum(array) / len(array)
if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().split()))
result = average(arr)
print(result) | [
"[email protected]"
] | |
f2e42d00a9c9ec787e590be61a586dcb7a5ca218 | 938645067e93c08e8c4f2bf26a03565e0dc9002b | /filter/particle_filter.py | bb8b15b5700119d309c42c3cf2e1186bc15661ed | [
"MIT"
] | permissive | kolaszko/particle_filter | 51d584e90d78a3837a21dad1025dc8550c93c7fd | 9fedcee5ef2eb00a1fa85398327121e3df53f94c | refs/heads/main | 2023-02-27T17:51:39.468016 | 2021-02-04T11:12:56 | 2021-02-04T11:12:56 | 335,359,281 | 11 | 1 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | import abc
class ParticleFilter(abc.ABC):
@abc.abstractmethod
def resample(self):
pass
@abc.abstractmethod
def predict(self):
pass
@abc.abstractmethod
def update(self):
pass
| [
"[email protected]"
] | |
0a86e4fc0405ed8ccd273d8afd97faa2407ff8a0 | 1f9f0f53555a32417cd4f8a84ca65b766b7b5230 | /sequence_operations/nucleotides_and_complements.py | 256313ac7ac21c94612ccff312c05ba9636c9dd5 | [] | no_license | AlexThePav/biopython_tutorial | 56044ca924e04fbce545cb712efa4fb5e23050fd | ed8950ba0e199498340c54e8dc5eba0b0cc7b69d | refs/heads/master | 2020-05-27T11:28:49.980694 | 2019-06-06T15:58:38 | 2019-06-06T15:58:38 | 188,601,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
my_seq = Seq("GATCGATGGGCCTATATAGGATCGAAAATCGC", IUPAC.unambiguous_dna)
print(repr(my_seq))
print(repr(my_seq.complement()))
print(repr(my_seq.reverse_complement()))
print(repr(my_seq[::-1])) | [
"[email protected]"
] | |
f5aa93af1b204622e971696dced1aaeadd31c668 | 9b7ccf0deba84b8d53efcd42d407a7650feb1848 | /djangoAnswer/djangoAnswer/manage.py | dc86f99c714f93995a3a750e48089906ae5e7f68 | [] | no_license | Ryu-Morimoto/Django-Training | 37ba72232714164b992f333630172d638a91238f | 3e5a1a4501e2bb04a841f8ae7b3155ef204192eb | refs/heads/master | 2022-12-10T00:26:46.832692 | 2020-01-21T04:29:55 | 2020-01-21T04:29:55 | 298,179,922 | 0 | 0 | null | 2020-09-24T05:42:46 | 2020-09-24T05:42:45 | null | UTF-8 | Python | false | false | 632 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangoAnswer.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
5c28ac9cbfe8f5ad592a4cfcd48b4f8f7a649334 | 3553129c1ddef3cab5f4aa7670c8c2f7dc970884 | /GuardCam_Test.py | b092b2dc7896273bc44b396a53586454c046a853 | [] | no_license | kichichoi102/GuardCam_HackATL | f979d8f3f00d6f451fb8744d494955af6deb0ea0 | f70f6ecc1abd9b3b74b46524dd68c3e43cfb6e29 | refs/heads/main | 2022-12-23T13:45:14.721945 | 2020-10-04T03:51:43 | 2020-10-04T03:51:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,211 | py | import os, io
from google.cloud import vision_v1
import pandas as pd
request = vision_v1.GetProductSetRequest(name="name")
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r'###Directory to Google Cloud Key#####'
client = vision_v1.ImageAnnotatorClient()
file_name = "offenders1.png"
image_path = f'.\Images\{file_name}'
with io.open(image_path, "rb") as image_file:
content = image_file.read()
image = vision_v1.types.Image(content=content)
response = client.face_detection(image=image)
faceAnnotations = response.face_annotations
print("Faces Borders:")
for face in faceAnnotations:
face_vertices = ['({0},{1})'.format(vertex.x, vertex.y) for vertex in face.bounding_poly.vertices]
print("Face Bound: {0}".format(",".join(face_vertices)))
print("")
question = input("Do you want to see all the details? (y/N)\n")
question.lower()
if question == "y":
print(faceAnnotations)
#
# print("Faces Borders:")
# for face in faceAnnotations:
# for position in face.landmarks:
# landmark_nose_tip = ['({0},{1},{2})'.format(position.x, position.y, position.z)]
# # print("Face Bound: {0}".format(",".join(face_vertices)))
# # print("")
| [
"[email protected]"
] | |
9bdbd72fc4ed1a170536032760d6ca6dc7f9be45 | f4133969204cdc43719dd9da6edd1e438bc94d29 | /feb15/r1.py | bce2e6cae9073fd780db27e8cf61b782e909b185 | [] | no_license | elake/inclass | 305def75f832c963cfb0bfc12c2f150481e02903 | f64a1d93aff317af2b79d64ed64d2f0eecac8764 | refs/heads/master | 2021-01-20T11:23:34.937770 | 2013-04-03T23:51:31 | 2013-04-03T23:51:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | while True:
x = input()
print(x)
| [
"cmput296@ubuntu.(none)"
] | cmput296@ubuntu.(none) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.