max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
notification/migrations/0003_auto_20210329_0459.py | Ariesgal2017/twitterclonez | 0 | 6630751 | <reponame>Ariesgal2017/twitterclonez
# Generated by Django 3.1.7 on 2021-03-29 04:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('notification', '0002_auto_20210327_0626'),
]
operations = [
migrations.CreateModel(
name='Notice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=255)),
('message', models.TextField(verbose_name='message')),
('created_ts', models.DateTimeField(auto_now_add=True, verbose_name='created')),
('read', models.BooleanField(default=False, verbose_name='read')),
('archived', models.BooleanField(default=False, verbose_name='archived')),
('recipient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recieved_notices', to=settings.AUTH_USER_MODEL, verbose_name='recipient')),
('sender', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sent_notices', to=settings.AUTH_USER_MODEL, verbose_name='sender')),
],
options={
'verbose_name': 'notice',
'verbose_name_plural': 'notices',
'ordering': ['-created_ts'],
},
),
migrations.DeleteModel(
name='Notification',
),
]
| # Generated by Django 3.1.7 on 2021-03-29 04:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('notification', '0002_auto_20210327_0626'),
]
operations = [
migrations.CreateModel(
name='Notice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=255)),
('message', models.TextField(verbose_name='message')),
('created_ts', models.DateTimeField(auto_now_add=True, verbose_name='created')),
('read', models.BooleanField(default=False, verbose_name='read')),
('archived', models.BooleanField(default=False, verbose_name='archived')),
('recipient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recieved_notices', to=settings.AUTH_USER_MODEL, verbose_name='recipient')),
('sender', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sent_notices', to=settings.AUTH_USER_MODEL, verbose_name='sender')),
],
options={
'verbose_name': 'notice',
'verbose_name_plural': 'notices',
'ordering': ['-created_ts'],
},
),
migrations.DeleteModel(
name='Notification',
),
] | en | 0.793428 | # Generated by Django 3.1.7 on 2021-03-29 04:59 | 1.707644 | 2 |
Client_Windows/reset.py | peeesspee/BitOJ | 30 | 6630752 | from database_management import manage_database
rabbitmq_username = 'client'
rabbitmq_password = '<PASSWORD>'
host = 'localhost'
conn, cur = manage_database.initialize_table()
manage_database.reset_database(conn)
| from database_management import manage_database
rabbitmq_username = 'client'
rabbitmq_password = '<PASSWORD>'
host = 'localhost'
conn, cur = manage_database.initialize_table()
manage_database.reset_database(conn)
| none | 1 | 1.790886 | 2 |
|
crypto_sign_challenge.py | kumarnalinaksh21/Intel-Crypto-Challenge | 0 | 6630753 | <gh_stars>0
import sys
import os
import platform
import base64
import json
import logging
from OpenSSL import crypto
#---------------------- Initialising Logger------------------------#
logging.basicConfig(filename='crypto-sign-challenge.log', filemode='a', format='%(asctime)s - %(name)s - %(message)s')
logger=logging.getLogger()
logger.setLevel(logging.DEBUG)
#---------------------- Initialising Global Variables------------------------#
message = "" #initialising empty message
count = 0 #initialising empty count for number of characters in the message
Type_Key = crypto.TYPE_RSA #initialising key type to RSA, modify to TYPE_DSA for DSA
bits = 2048 #initialising bit size for keys
private_key = "" #initialising empty private key
public_key = "" #initialising empty public key
pkey = "" #initialising empty pkey object
signature = "" #initialising empty signature
resultJSON = {} #initialising empty JSON response
flag = False #set global flag as false
numberOfArguments = len(sys.argv) #total arguments passed to the script
#---------------------- Funtion generates message from Command Line arguments------------------------#
def Consolidate_Message():
global message, numberOfArguments
for i in range(1, numberOfArguments): #Forming message from the argument
message = message + sys.argv[i] + " "
logger.debug("Constructing the message from parsed arguments.")
#---------------------- Funtion parses arguments, checks min and max character limits and forms message------------------------#
def Check_Input():
global message, count, flag
for i in message: #counting number of characters in the message
count = count + 1
count = count - 1 #removing the count for the space character at the end of the message
if(count<=1 or count>250): #giving alert if number of characters are insufficient or more than required.
logger.debug("The message must be more than 1 character and less than 250 characters.")
flag = False
logger.debug("Script execution is unsuccessfull!")
else:
flag = True
logger.debug("The message is within acceptable parameters")
#---------------------- Funtion generates Private and Public key pairs------------------------#
def Create_Key_Pair():
global Type_Key, bits, private_key, public_key, pkey
pkey = crypto.PKey()
pkey.generate_key(Type_Key, bits) #generating key pair
private_key = crypto.dump_privatekey(crypto.FILETYPE_PEM , pkey , None) #saving private key contents in byte form
public_key = crypto.dump_publickey(crypto.FILETYPE_PEM , pkey) #saving public key contents in byte form
logger.debug("Key pair has been generated")
#---------------------- Funtion reads Private and Public keys from their respective PEM files------------------------#
def Read_Key_Pair_From_PEM_File(PrivFilePath, PubFilepath):
global private_key, public_key
with open(PrivFilePath, 'rb') as PrivFile:
private_key = crypto.load_privatekey(crypto.FILETYPE_PEM, PrivFile.read(), None) #loading private key from PEM file in pkey() object form
logger.debug("Private key has been loaded from the PEM file.")
with open(PubFilepath, 'rb') as PubFile:
public_key = PubFile.read().decode('utf-8') #assigning Base64 encoded string (PEM format) of the public key from PEM file
logger.debug("Public key has been loaded from the PEM file and decoded to Base64.")
#---------------------- Funtion writes Private and Public keys into their respective PEM files------------------------#
def Write_Key_Pair_To_PEM_File(PrivFilePath, PubFilepath):
global private_key, public_key
with open(PrivFilePath, 'wb') as PrivFile:
PrivFile.write(private_key) #writing private key to PEM file
logger.debug("Private key has been written to the PEM file.")
with open(PrivFilePath, 'rb') as PrivFile:
private_key = crypto.load_privatekey(crypto.FILETYPE_PEM, PrivFile.read(), None) #reassiging private key content to variable in pkey() object form
logger.debug("Private key has been configured in the form of pkey() object.")
with open(PubFilepath, 'wb') as PubFile:
PubFile.write(public_key) #writing public key to PEM file
logger.debug("Public key has been written to the PEM file.")
with open(PubFilepath, 'rb') as PubFile:
public_key = PubFile.read().decode('utf-8') #assigning Base64 encoded string (PEM format) of the public key from PEM file
logger.debug("Public key has been loaded from the PEM file and decoded to Base64.")
#---------------------- Funtion checks the type of operating system on which script is being run------------------------#
def Check_Operating_System_Type():
OS = platform.system()
return OS
#------- Funtion checks if directory exists, else tries to create it. If unsuccessful then uses current working directory and configure keys---------#
def Check_If_directory_Exists_And_Then_Configure_Keys():
global private_key, public_key
path = os.path.expanduser("~") #initialise home directory
if Check_Operating_System_Type() == "Windows": #initialise full path for fetching and saving keys based on Operating System
subPath = "\.local\share\signer"
finalPath = path + subPath #path for Windows default directory
elif Check_Operating_System_Type() == "Linux":
subPath = "/.local/share/signer"
finalPath = path + subPath #path for Linux default directory
if os.path.exists(finalPath): #Directory for keys exists
logger.debug("Default directory exists.")
if Check_Operating_System_Type() == "Windows":
PubFilepath = finalPath + "\PublicKey.pem" #initialise path for public key in Windows
PrivFilePath = finalPath + "\PrivateKey.pem" #initialise path for private key in Windows
elif Check_Operating_System_Type() == "Linux":
PubFilepath = finalPath + "/PublicKey.pem" #initialise path for public key in Linux
PrivFilePath = finalPath + "/PrivateKey.pem" #initialise path for private key in Linux
if os.path.exists(PubFilepath) and os.path.exists(PrivFilePath): #check if both the keys exist
logger.debug("Private and Public keys exist.")
Read_Key_Pair_From_PEM_File(PrivFilePath, PubFilepath) #read keys from PEM files and configuring them into variables
else: #if keys don't exist, create them
logger.debug("Private and Public keys does not exist.")
Create_Key_Pair() #generating Private and Public keys
Write_Key_Pair_To_PEM_File(PrivFilePath, PubFilepath) #writing keys into PEM files and configuring them into variables
else: #Directory for keys does not exists
logger.debug("Default directory does not esist.")
try :
logger.debug("Attempting to creating the directory")
os.mkdir(finalPath) # trying to create the directory
except :
pass #executing further script in case directory creation encounters an error
if os.path.exists(finalPath) == True: #checking if directory has been created successfully
logger.debug("Directory created successfully!")
Check_If_directory_Exists_And_Then_Configure_Keys() #recursion
else:
logger.debug("Default directory could not be created due to access permission issue.")
path = os.getcwd() #initialising current working directory as path
logger.debug("Instead of default directory program will now use current working directory.")
if Check_Operating_System_Type() == "Windows":
PubFilepath = path + "\PublicKey.pem" #initialise path in current working directory for public key in Windows
PrivFilePath = path + "\PrivateKey.pem" #initialise path in current working directory for private key in Windows
elif Check_Operating_System_Type() == "Linux":
PubFilepath = path + "/PublicKey.pem" #initialise path in current working directory for public key in Linux
PrivFilePath = path + "/PrivateKey.pem" #initialise path in current working directory for private key in Linux
if os.path.exists(PubFilepath) and os.path.exists(PrivFilePath): #check if both the keys exist
logger.debug("Private and Public keys exist.")
Read_Key_Pair_From_PEM_File(PrivFilePath, PubFilepath) #read keys from PEM files and configuring them into variables
else: #if keys don't exist, create them
logger.debug("Private and Public keys does not exist.")
Create_Key_Pair() #generating Private and Public keys
Write_Key_Pair_To_PEM_File(PrivFilePath, PubFilepath) #writing keys into PEM files and configuring them into variables
#---- Funtion generates RFC 4648 compliant Base64 encoded cryptographic signature of the message, calculated using the private key and the SHA256 digest of the message----#
def Signing_The_Message():
global signature, private_key, message
signature = crypto.sign(private_key, message.encode(), "sha256") #forming signature using private key and sha256 digest of message
signature = base64.encodebytes(signature).decode() #Base64 encoding of the cryptographic signature of the message
logger.debug("Signature has been formed and decoded to Base64")
#-------------Function generates JSON compliant to the schema defined in README----------------#
def Form_JSON():
global resultJSON, message, signature, public_key
resultJSON = { "message": message, "signature":signature, "pubkey":public_key} #initialising dictionary complaint with JSON
logger.debug(resultJSON)
resultJSON = json.dumps(resultJSON, indent=3) # serialising into JSON
logger.debug("JSON has been formed and decoded to Base64")
print(resultJSON)
return True
#-------------Main Function----------------#
def Main():
logger.debug("Script has been invoked!")
Consolidate_Message() #generates message from Command Line arguments
Check_Input() #checking input and consolidating message, if as per policy then program will proceed.
if flag == True:
Check_If_directory_Exists_And_Then_Configure_Keys() #checking if directory exists, else we try to create it. If unsuccessful then we use current working directory and configure the keys
Signing_The_Message() #generating RFC 4648 compliant Base64 encoded cryptographic signature of the message, calculated using the private key and the SHA256 digest of the message
Form_JSON() #generating JSON compliant to the schema defined in README
logger.debug("Script execution has completed successfully!")
if __name__ == "__main__":
Main()
| import sys
import os
import platform
import base64
import json
import logging
from OpenSSL import crypto
#---------------------- Initialising Logger------------------------#
logging.basicConfig(filename='crypto-sign-challenge.log', filemode='a', format='%(asctime)s - %(name)s - %(message)s')
logger=logging.getLogger()
logger.setLevel(logging.DEBUG)
#---------------------- Initialising Global Variables------------------------#
message = "" #initialising empty message
count = 0 #initialising empty count for number of characters in the message
Type_Key = crypto.TYPE_RSA #initialising key type to RSA, modify to TYPE_DSA for DSA
bits = 2048 #initialising bit size for keys
private_key = "" #initialising empty private key
public_key = "" #initialising empty public key
pkey = "" #initialising empty pkey object
signature = "" #initialising empty signature
resultJSON = {} #initialising empty JSON response
flag = False #set global flag as false
numberOfArguments = len(sys.argv) #total arguments passed to the script
#---------------------- Funtion generates message from Command Line arguments------------------------#
def Consolidate_Message():
global message, numberOfArguments
for i in range(1, numberOfArguments): #Forming message from the argument
message = message + sys.argv[i] + " "
logger.debug("Constructing the message from parsed arguments.")
#---------------------- Funtion parses arguments, checks min and max character limits and forms message------------------------#
def Check_Input():
global message, count, flag
for i in message: #counting number of characters in the message
count = count + 1
count = count - 1 #removing the count for the space character at the end of the message
if(count<=1 or count>250): #giving alert if number of characters are insufficient or more than required.
logger.debug("The message must be more than 1 character and less than 250 characters.")
flag = False
logger.debug("Script execution is unsuccessfull!")
else:
flag = True
logger.debug("The message is within acceptable parameters")
#---------------------- Funtion generates Private and Public key pairs------------------------#
def Create_Key_Pair():
global Type_Key, bits, private_key, public_key, pkey
pkey = crypto.PKey()
pkey.generate_key(Type_Key, bits) #generating key pair
private_key = crypto.dump_privatekey(crypto.FILETYPE_PEM , pkey , None) #saving private key contents in byte form
public_key = crypto.dump_publickey(crypto.FILETYPE_PEM , pkey) #saving public key contents in byte form
logger.debug("Key pair has been generated")
#---------------------- Funtion reads Private and Public keys from their respective PEM files------------------------#
def Read_Key_Pair_From_PEM_File(PrivFilePath, PubFilepath):
global private_key, public_key
with open(PrivFilePath, 'rb') as PrivFile:
private_key = crypto.load_privatekey(crypto.FILETYPE_PEM, PrivFile.read(), None) #loading private key from PEM file in pkey() object form
logger.debug("Private key has been loaded from the PEM file.")
with open(PubFilepath, 'rb') as PubFile:
public_key = PubFile.read().decode('utf-8') #assigning Base64 encoded string (PEM format) of the public key from PEM file
logger.debug("Public key has been loaded from the PEM file and decoded to Base64.")
#---------------------- Funtion writes Private and Public keys into their respective PEM files------------------------#
def Write_Key_Pair_To_PEM_File(PrivFilePath, PubFilepath):
global private_key, public_key
with open(PrivFilePath, 'wb') as PrivFile:
PrivFile.write(private_key) #writing private key to PEM file
logger.debug("Private key has been written to the PEM file.")
with open(PrivFilePath, 'rb') as PrivFile:
private_key = crypto.load_privatekey(crypto.FILETYPE_PEM, PrivFile.read(), None) #reassiging private key content to variable in pkey() object form
logger.debug("Private key has been configured in the form of pkey() object.")
with open(PubFilepath, 'wb') as PubFile:
PubFile.write(public_key) #writing public key to PEM file
logger.debug("Public key has been written to the PEM file.")
with open(PubFilepath, 'rb') as PubFile:
public_key = PubFile.read().decode('utf-8') #assigning Base64 encoded string (PEM format) of the public key from PEM file
logger.debug("Public key has been loaded from the PEM file and decoded to Base64.")
#---------------------- Funtion checks the type of operating system on which script is being run------------------------#
def Check_Operating_System_Type():
OS = platform.system()
return OS
#------- Funtion checks if directory exists, else tries to create it. If unsuccessful then uses current working directory and configure keys---------#
def Check_If_directory_Exists_And_Then_Configure_Keys():
global private_key, public_key
path = os.path.expanduser("~") #initialise home directory
if Check_Operating_System_Type() == "Windows": #initialise full path for fetching and saving keys based on Operating System
subPath = "\.local\share\signer"
finalPath = path + subPath #path for Windows default directory
elif Check_Operating_System_Type() == "Linux":
subPath = "/.local/share/signer"
finalPath = path + subPath #path for Linux default directory
if os.path.exists(finalPath): #Directory for keys exists
logger.debug("Default directory exists.")
if Check_Operating_System_Type() == "Windows":
PubFilepath = finalPath + "\PublicKey.pem" #initialise path for public key in Windows
PrivFilePath = finalPath + "\PrivateKey.pem" #initialise path for private key in Windows
elif Check_Operating_System_Type() == "Linux":
PubFilepath = finalPath + "/PublicKey.pem" #initialise path for public key in Linux
PrivFilePath = finalPath + "/PrivateKey.pem" #initialise path for private key in Linux
if os.path.exists(PubFilepath) and os.path.exists(PrivFilePath): #check if both the keys exist
logger.debug("Private and Public keys exist.")
Read_Key_Pair_From_PEM_File(PrivFilePath, PubFilepath) #read keys from PEM files and configuring them into variables
else: #if keys don't exist, create them
logger.debug("Private and Public keys does not exist.")
Create_Key_Pair() #generating Private and Public keys
Write_Key_Pair_To_PEM_File(PrivFilePath, PubFilepath) #writing keys into PEM files and configuring them into variables
else: #Directory for keys does not exists
logger.debug("Default directory does not esist.")
try :
logger.debug("Attempting to creating the directory")
os.mkdir(finalPath) # trying to create the directory
except :
pass #executing further script in case directory creation encounters an error
if os.path.exists(finalPath) == True: #checking if directory has been created successfully
logger.debug("Directory created successfully!")
Check_If_directory_Exists_And_Then_Configure_Keys() #recursion
else:
logger.debug("Default directory could not be created due to access permission issue.")
path = os.getcwd() #initialising current working directory as path
logger.debug("Instead of default directory program will now use current working directory.")
if Check_Operating_System_Type() == "Windows":
PubFilepath = path + "\PublicKey.pem" #initialise path in current working directory for public key in Windows
PrivFilePath = path + "\PrivateKey.pem" #initialise path in current working directory for private key in Windows
elif Check_Operating_System_Type() == "Linux":
PubFilepath = path + "/PublicKey.pem" #initialise path in current working directory for public key in Linux
PrivFilePath = path + "/PrivateKey.pem" #initialise path in current working directory for private key in Linux
if os.path.exists(PubFilepath) and os.path.exists(PrivFilePath): #check if both the keys exist
logger.debug("Private and Public keys exist.")
Read_Key_Pair_From_PEM_File(PrivFilePath, PubFilepath) #read keys from PEM files and configuring them into variables
else: #if keys don't exist, create them
logger.debug("Private and Public keys does not exist.")
Create_Key_Pair() #generating Private and Public keys
Write_Key_Pair_To_PEM_File(PrivFilePath, PubFilepath) #writing keys into PEM files and configuring them into variables
#---- Funtion generates RFC 4648 compliant Base64 encoded cryptographic signature of the message, calculated using the private key and the SHA256 digest of the message----#
def Signing_The_Message():
global signature, private_key, message
signature = crypto.sign(private_key, message.encode(), "sha256") #forming signature using private key and sha256 digest of message
signature = base64.encodebytes(signature).decode() #Base64 encoding of the cryptographic signature of the message
logger.debug("Signature has been formed and decoded to Base64")
#-------------Function generates JSON compliant to the schema defined in README----------------#
def Form_JSON():
global resultJSON, message, signature, public_key
resultJSON = { "message": message, "signature":signature, "pubkey":public_key} #initialising dictionary complaint with JSON
logger.debug(resultJSON)
resultJSON = json.dumps(resultJSON, indent=3) # serialising into JSON
logger.debug("JSON has been formed and decoded to Base64")
print(resultJSON)
return True
#-------------Main Function----------------#
def Main():
logger.debug("Script has been invoked!")
Consolidate_Message() #generates message from Command Line arguments
Check_Input() #checking input and consolidating message, if as per policy then program will proceed.
if flag == True:
Check_If_directory_Exists_And_Then_Configure_Keys() #checking if directory exists, else we try to create it. If unsuccessful then we use current working directory and configure the keys
Signing_The_Message() #generating RFC 4648 compliant Base64 encoded cryptographic signature of the message, calculated using the private key and the SHA256 digest of the message
Form_JSON() #generating JSON compliant to the schema defined in README
logger.debug("Script execution has completed successfully!")
if __name__ == "__main__":
Main() | en | 0.654046 | #---------------------- Initialising Logger------------------------# #---------------------- Initialising Global Variables------------------------# #initialising empty message #initialising empty count for number of characters in the message #initialising key type to RSA, modify to TYPE_DSA for DSA #initialising bit size for keys #initialising empty private key #initialising empty public key #initialising empty pkey object #initialising empty signature #initialising empty JSON response #set global flag as false #total arguments passed to the script #---------------------- Funtion generates message from Command Line arguments------------------------# #Forming message from the argument #---------------------- Funtion parses arguments, checks min and max character limits and forms message------------------------# #counting number of characters in the message #removing the count for the space character at the end of the message #giving alert if number of characters are insufficient or more than required. #---------------------- Funtion generates Private and Public key pairs------------------------# #generating key pair #saving private key contents in byte form #saving public key contents in byte form #---------------------- Funtion reads Private and Public keys from their respective PEM files------------------------# #loading private key from PEM file in pkey() object form #assigning Base64 encoded string (PEM format) of the public key from PEM file #---------------------- Funtion writes Private and Public keys into their respective PEM files------------------------# #writing private key to PEM file #reassiging private key content to variable in pkey() object form #writing public key to PEM file #assigning Base64 encoded string (PEM format) of the public key from PEM file #---------------------- Funtion checks the type of operating system on which script is being run------------------------# #------- Funtion checks if directory exists, else tries to create it. If unsuccessful then uses current working directory and configure keys---------# #initialise home directory #initialise full path for fetching and saving keys based on Operating System #path for Windows default directory #path for Linux default directory #Directory for keys exists #initialise path for public key in Windows #initialise path for private key in Windows #initialise path for public key in Linux #initialise path for private key in Linux #check if both the keys exist #read keys from PEM files and configuring them into variables #if keys don't exist, create them #generating Private and Public keys #writing keys into PEM files and configuring them into variables #Directory for keys does not exists # trying to create the directory #executing further script in case directory creation encounters an error #checking if directory has been created successfully #recursion #initialising current working directory as path #initialise path in current working directory for public key in Windows #initialise path in current working directory for private key in Windows #initialise path in current working directory for public key in Linux #initialise path in current working directory for private key in Linux #check if both the keys exist #read keys from PEM files and configuring them into variables #if keys don't exist, create them #generating Private and Public keys #writing keys into PEM files and configuring them into variables #---- Funtion generates RFC 4648 compliant Base64 encoded cryptographic signature of the message, calculated using the private key and the SHA256 digest of the message----# #forming signature using private key and sha256 digest of message #Base64 encoding of the cryptographic signature of the message #-------------Function generates JSON compliant to the schema defined in README----------------# #initialising dictionary complaint with JSON # serialising into JSON #-------------Main Function----------------# #generates message from Command Line arguments #checking input and consolidating message, if as per policy then program will proceed. #checking if directory exists, else we try to create it. If unsuccessful then we use current working directory and configure the keys #generating RFC 4648 compliant Base64 encoded cryptographic signature of the message, calculated using the private key and the SHA256 digest of the message #generating JSON compliant to the schema defined in README | 2.917424 | 3 |
tests/test_structure.py | loyada/typedpy | 14 | 6630754 | <reponame>loyada/typedpy
import enum
import sys
import typing
from dataclasses import dataclass
import pytest
from pytest import raises
from typedpy import (
Structure,
DecimalNumber,
PositiveInt,
String,
Enum,
Field,
Integer,
Map,
Array,
AnyOf,
NoneField,
DateField,
DateTime,
)
from typedpy.structures import (
FinalStructure,
ImmutableStructure,
unique,
MAX_NUMBER_OF_INSTANCES_TO_VERIFY_UNIQUENESS,
)
class Venue(enum.Enum):
NYSE = enum.auto()
CBOT = enum.auto()
AMEX = enum.auto()
NASDAQ = enum.auto()
class Trader(Structure):
lei: String(pattern="[0-9A-Z]{18}[0-9]{2}$")
alias: String(maxLength=32)
def test_optional_fields():
class Trade(Structure):
notional: DecimalNumber(maximum=10000, minimum=0)
quantity: PositiveInt(maximum=100000, multiplesOf=5)
symbol: String(pattern="[A-Z]+$", maxLength=6)
buyer: Trader
seller: Trader
venue: Enum[Venue]
comment: String
_optional = ["comment", "venue"]
assert set(Trade._required) == {"notional", "quantity", "symbol", "buyer", "seller"}
Trade(
notional=1000,
quantity=150,
symbol="APPL",
buyer=Trader(lei="12345678901234567890", alias="GSET"),
seller=Trader(lei="12345678901234567888", alias="MSIM"),
timestamp="01/30/20 05:35:35",
)
def test_optional_fields_required_overrides():
class Trade(Structure):
notional: DecimalNumber(maximum=10000, minimum=0)
quantity: PositiveInt(maximum=100000, multiplesOf=5)
symbol: String(pattern="[A-Z]+$", maxLength=6)
buyer: Trader
seller: Trader
venue: Enum[Venue]
comment: String
_optional = ["comment", "venue"]
_required = []
Trade()
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_field_by_name_fins_annotated_fields():
class Trade(Structure):
notional: DecimalNumber(maximum=10000, minimum=0)
quantity: PositiveInt(maximum=100000, multiplesOf=5)
symbol: String(pattern="[A-Z]+$", maxLength=6)
buyer: Trader
my_list: list[str]
seller: typing.Optional[Trader]
venue: Enum[Venue]
comment: String
_optional = ["comment", "venue"]
_required = []
field_names = Trade.get_all_fields_by_name().keys()
for f in {"notional", "quantity", "seller", "symbol", "buyer", "my_list"}:
assert f in field_names
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_iterating_over_wrapped_structure():
class Foo(Structure):
wrapped: list[str]
_additionalProperties = False
foo = Foo(wrapped=["x", "y", "z"])
assert list(foo) == foo.wrapped
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_iterating_over_wrapped_structure_map():
class Foo(Structure):
wrapped: Map[str, int]
_additionalProperties = False
foo = Foo(wrapped={"x": 2, "y": 3, "z": 4})
assert list(foo) == ["x", "y", "z"]
def test_cast():
class Foo(Structure):
a: int
b: int
class Bar(Foo, ImmutableStructure):
s: typing.Optional[str]
bar = Bar(a=1, b=2, s="xyz")
foo: Foo = bar.cast_to(Foo)
assert foo == Foo(a=1, b=2)
assert foo.cast_to(Bar) == Bar(a=1, b=2)
def test_cast_invalid():
class Foo(Structure):
a: int
b: int
class Bar(Foo, ImmutableStructure):
s: str
foo = Foo(a=1, b=2)
with raises(TypeError):
foo.cast_to(Bar)
with raises(TypeError):
foo.cast_to(DateTime)
def test_iterating_over_wrapped_structure_err():
class Foo(Structure):
wrapped: int
_additionalProperties = False
foo = Foo(wrapped=4)
with raises(TypeError) as excinfo:
assert list(foo) == foo.wrapped
assert "Foo is not a wrapper of an iterable" in str(excinfo.value)
def test_optional_fields_required_overrides1():
with raises(ValueError) as excinfo:
class Trade(Structure):
venue: Enum[Venue]
comment: String
_optional = ["venue"]
_required = ["venue"]
assert (
"optional cannot override prior required in the class or in a base class"
in str(excinfo.value)
)
@pytest.fixture(scope="session")
def Point():
from math import sqrt
class PointClass:
def __init__(self, x, y):
self.x = x
self.y = y
def size(self):
return sqrt(self.x ** 2 + self.y ** 2)
return PointClass
def test_field_of_class(Point):
class Foo(Structure):
i: int
point: Field[Point]
foo = Foo(i=5, point=Point(3, 4))
assert foo.point.size() == 5
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_ignore_none(Point):
class Foo(Structure):
i: list[int]
maybe_date: typing.Optional[DateField]
_ignore_none = True
assert Foo(i=[5], maybe_date=None).i == [5]
assert Foo(i=[1]).maybe_date is None
assert Foo(i=[1], maybe_date=None).i[0] == 1
assert Foo(i=[5], maybe_date="2020-01-31").i[0] == 5
with raises(ValueError):
assert Foo(i=[5], maybe_date="2020-01-31a")
def test_do_not_ignore_none(Point):
class Foo(Structure):
i = Integer
point: Field[Point]
_ignore_none = False
with raises(TypeError) as excinfo:
Foo(i=None, point=Point(3, 4))
assert ": Expected <class 'int'>; Got None" in str(excinfo.value)
def test_do_not_ignore_none_for_required_fields(Point):
class Foo(Structure):
i: int
date: typing.Optional[DateField]
_ignore_none = True
with raises(TypeError) as excinfo:
Foo(i=None)
assert ": Expected <class 'int'>; Got None" in str(excinfo.value)
def test_field_of_class_typeerror(Point):
class Foo(Structure):
i: int
point: Field[Point]
with raises(TypeError) as excinfo:
Foo(i=5, point="xyz")
assert (
"point: Expected <class 'tests.test_structure.Point.<locals>.PointClass'>; Got 'xyz'"
in str(excinfo.value)
)
def test_using_arbitrary_class_in_anyof(Point):
class Foo(Structure):
i: int
point: AnyOf[Point, int]
assert Foo(i=1, point=2).point == 2
def test_using_arbitrary_class_in_union(Point):
class Foo(Structure):
i: int
point: typing.Union[Point, int]
assert Foo(i=1, point=2).point == 2
def test_optional(Point):
class Foo(Structure):
i: int
point: typing.Optional[Point]
assert Foo(i=1).point is None
assert Foo(i=1, point=None).point is None
foo = Foo(i=1, point=Point(3, 4))
assert foo.point.size() == 5
foo.point = None
assert foo.point is None
foo.point = Point(3, 4)
assert foo.point.size() == 5
def test_optional_err(Point):
class Foo(Structure):
i: int
point: typing.Optional[Point]
with raises(ValueError) as excinfo:
Foo(i=1, point=3)
assert "point: 3 Did not match any field option" in str(excinfo.value)
def test_field_of_class_in_map(Point):
class Foo(Structure):
i: int
point_by_int: Map[Integer, Field[Point]]
foo = Foo(i=5, point_by_int={1: Point(3, 4)})
assert foo.point_by_int[1].size() == 5
def test_field_of_class_in_map_simpler_syntax(Point):
class Foo(Structure):
i: int
point_by_int: Map[Integer, Point]
foo = Foo(i=5, point_by_int={1: Point(3, 4)})
assert foo.point_by_int[1].size() == 5
def test_field_of_class_in_map_typerror(Point):
class Foo(Structure):
i: int
point_by_int: Map[Integer, Field[Point]]
with raises(TypeError) as excinfo:
Foo(i=5, point_by_int={1: Point(3, 4), 2: 3})
assert (
"point_by_int_value: Expected <class 'tests.test_structure.Point.<locals>.PointClass'>; Got 3"
in str(excinfo.value)
)
def test_field_of_class_in_map__simpler_syntax_typerror(Point):
class Foo(Structure):
i: int
point_by_int: Map[Integer, Point]
with raises(TypeError) as excinfo:
Foo(i=5, point_by_int={1: Point(3, 4), 2: 3})
assert (
"point_by_int_value: Expected <class 'tests.test_structure.Point.<locals>.PointClass'>; Got 3"
in str(excinfo.value)
)
def test_simple_invalid_type():
with raises(TypeError) as excinfo:
class Foo(Structure):
i = Array["x"]
assert "Unsupported field type in definition: 'x'" in str(excinfo.value)
def test_simple_nonefield_usage():
class Foo(Structure):
a = Array[AnyOf[Integer, NoneField]]
foo = Foo(a=[1, 2, 3, None, 4])
assert foo.a == [1, 2, 3, None, 4]
def test_auto_none_conversion():
class Foo(Structure):
a = Array[AnyOf[Integer, None]]
foo = Foo(a=[1, 2, 3, None, 4])
assert foo.a == [1, 2, 3, None, 4]
def test_final_structure_violation():
class Foo(FinalStructure):
s: str
with raises(TypeError) as excinfo:
class Bar(Foo):
pass
assert "Tried to extend Foo, which is a FinalStructure. This is forbidden" in str(
excinfo.value
)
def test_final_structure_no_violation():
class Foo(Structure):
s: str
class Bar(Foo, FinalStructure):
pass
def test_as_bool():
class Foo(Structure):
s: typing.Optional[str]
i: typing.Optional[int]
assert not (Foo())
assert Foo(i=5)
def test_unique_violation():
@unique
class Foo(Structure):
s: str
i: int
Foo(s="xxx", i=1)
Foo(s="xxx", i=2)
with raises(ValueError) as excinfo:
Foo(s="xxx", i=1)
assert (
"Instance copy in Foo, which is defined as unique. Instance is"
" <Instance of Foo. Properties: i = 1, s = 'xxx'>" in str(excinfo.value)
)
def test_unique_violation_by_update():
@unique
class Foo(Structure):
s: str
i: int
Foo(s="xxx", i=1)
foo = Foo(s="xxx", i=2)
with raises(ValueError) as excinfo:
foo.i = 1
assert (
"Instance copy in Foo, which is defined as unique. Instance is"
" <Instance of Foo. Properties: i = 1, s = 'xxx'>" in str(excinfo.value)
)
def test_unique_violation_stop_checking__if_too_many_instances():
@unique
class Foo(Structure):
i: int
for i in range(MAX_NUMBER_OF_INSTANCES_TO_VERIFY_UNIQUENESS):
Foo(i=i)
Foo(i=1)
Foo(i=1)
def test_copy_with_overrides():
class Trade(Structure):
notional: DecimalNumber(maximum=10000, minimum=0)
quantity: PositiveInt(maximum=100000, multiplesOf=5)
symbol: String(pattern="[A-Z]+$", maxLength=6)
timestamp = DateTime
buyer: Trader
seller: Trader
venue: Enum[Venue]
comment: String
_optional = ["comment", "venue"]
trade_1 = Trade(
notional=1000,
quantity=150,
symbol="APPL",
buyer=Trader(lei="12345678901234567890", alias="GSET"),
seller=Trader(lei="12345678901234567888", alias="MSIM"),
timestamp="01/30/20 05:35:35",
)
trade_2 = trade_1.shallow_clone_with_overrides(notional=500)
assert trade_2.notional == 500
trade_2.notional = 1000
assert trade_2 == trade_1
def test_defect_required_should_propagate_with_ignore_none():
class Foo(Structure):
a = Integer
class Bar(Foo):
s = String
_ignore_none = True
with raises(TypeError) as excinfo:
Bar(s="x", a=None)
assert "a: Expected <class 'int'>; Got None" in str(excinfo.value)
def test_defect_multiple_inheritance_with_optional_1():
class Foo1(Structure):
a = Integer(default=1)
class Foo2(Structure):
b = Integer
class Bar1(Foo1, Foo2):
pass
class Bar2(Foo2, Foo1):
pass
Bar1(b=1)
Bar2(b=1)
def test_defect_multiple_inheritance_with_optional_2():
class Foo1(Structure):
a = Integer
_optional = ["a"]
class Foo2(Structure):
b = Integer
class Bar1(Foo1, Foo2):
pass
class Bar2(Foo2, Foo1):
pass
Bar1(b=1)
Bar2(b=1)
def test_from_other_class():
class PersonModel:
def __init__(self, *, first_name, age):
self.first_name = first_name
self.age = age
class Person(Structure):
id = Integer
name = String
age = Integer
person_model = PersonModel(first_name="john", age=40)
person = Person.from_other_class(person_model, id=123, name=person_model.first_name)
assert person == Person(name="john", id=123, age=40)
def test_to_other_class():
@dataclass
class PersonDataclass:
name: str
age: int
class Person(Structure):
id = Integer
name = String
person = Person(id=1, name="john").to_other_class(
PersonDataclass, ignore_props=["id"], age=40
)
assert person == PersonDataclass(name="john", age=40)
def test_defaults_are_connected_to_structure():
class Foo(Structure):
a: Array(items=String, default=list)
foo = Foo()
assert foo == Foo(a=[])
assert foo.a == []
foo.a.append("xyz")
assert foo.a == ["xyz"]
def test_invalid_defaults_are_caught():
def factory():
return [1, 2, 3]
with raises(TypeError) as excinfo:
class Foo(Structure):
a: Array(items=String, default=factory)
assert "Invalid default value: [1, 2, 3];" in str(excinfo.value)
def test_default_alternative_style():
def default_factory():
return [1, 2, 3]
class Example(Structure):
i: Array[Integer] = default_factory
assert Example() == Example(i=[1, 2, 3])
def test_inheritance_with_optional_field():
class Foo(Structure):
a: String
b: String
with raises(ValueError) as excinfo:
class Bar(Foo):
c: String
_optional = ["b"]
assert (
"optional cannot override prior required in the class or in a base class"
in str(excinfo.value)
)
def test_classreference_cant_accept_none():
class Foo(Structure):
bar = String
class Bar(Structure):
bar = String
foo = Foo
with raises(TypeError) as excinfo:
Bar(bar="abc", foo=None)
assert (
"foo: Expected <Structure: Foo. Properties: bar = <String>>; Got None"
in str(excinfo.value)
)
def test_required_is_inherited_field():
class A(Structure):
x = Integer
y = Integer
_required = []
class B(A):
_required = ["x", "y"]
with raises(TypeError) as excinfo:
B(y=5)
assert "missing a required argument: 'x'" in str(excinfo.value)
assert B(x=1, y=2).x == 1
def test_dont_allow_assignment_to_non_typedpy_types():
Structure.set_block_non_typedpy_field_assignment()
with raises(TypeError) as excinfo:
class A(Structure):
a = typing.List[str]
assert "a: assigned a non-Typedpy type" in str(excinfo.value)
with raises(TypeError) as excinfo:
class B(Structure):
b = typing.Optional[str]
assert "b: assigned a non-Typedpy type" in str(excinfo.value)
Structure.set_block_non_typedpy_field_assignment(False)
class C(Structure):
b = typing.List[str]
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_dont_allow_assignment_to_non_typedpy_types_pep585():
Structure.set_block_non_typedpy_field_assignment()
with raises(TypeError) as excinfo:
class A(Structure):
a = list[str]
assert "a: assigned a non-Typedpy type" in str(excinfo.value)
Structure.set_block_non_typedpy_field_assignment(False)
class C(Structure):
b = list[str]
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_dont_allow_assignment_to_non_typedpy_types_valid():
Structure.set_block_non_typedpy_field_assignment()
class A(Structure):
a: list[str] = list
assert A().a == []
def test_additional_properties_blocks_additional_properties_even_after_instantiation():
class Foo(Structure):
i: int
_additionalProperties = False
foo = Foo(i=5)
with raises(ValueError) as excinfo:
foo.x = []
assert "Foo: trying to set a non-field 'x' is not allowed" in str(excinfo.value)
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_find_fields_with_function_returning_field():
def Name() -> Field:
return String(minLength=10)
class Foo(Structure):
age: int
name: Name
assert set(Foo.get_all_fields_by_name().keys()) == {"age", "name"}
assert str(Foo.name) == "<String. Properties: minLength = 10>"
| import enum
import sys
import typing
from dataclasses import dataclass
import pytest
from pytest import raises
from typedpy import (
Structure,
DecimalNumber,
PositiveInt,
String,
Enum,
Field,
Integer,
Map,
Array,
AnyOf,
NoneField,
DateField,
DateTime,
)
from typedpy.structures import (
FinalStructure,
ImmutableStructure,
unique,
MAX_NUMBER_OF_INSTANCES_TO_VERIFY_UNIQUENESS,
)
class Venue(enum.Enum):
NYSE = enum.auto()
CBOT = enum.auto()
AMEX = enum.auto()
NASDAQ = enum.auto()
class Trader(Structure):
lei: String(pattern="[0-9A-Z]{18}[0-9]{2}$")
alias: String(maxLength=32)
def test_optional_fields():
class Trade(Structure):
notional: DecimalNumber(maximum=10000, minimum=0)
quantity: PositiveInt(maximum=100000, multiplesOf=5)
symbol: String(pattern="[A-Z]+$", maxLength=6)
buyer: Trader
seller: Trader
venue: Enum[Venue]
comment: String
_optional = ["comment", "venue"]
assert set(Trade._required) == {"notional", "quantity", "symbol", "buyer", "seller"}
Trade(
notional=1000,
quantity=150,
symbol="APPL",
buyer=Trader(lei="12345678901234567890", alias="GSET"),
seller=Trader(lei="12345678901234567888", alias="MSIM"),
timestamp="01/30/20 05:35:35",
)
def test_optional_fields_required_overrides():
class Trade(Structure):
notional: DecimalNumber(maximum=10000, minimum=0)
quantity: PositiveInt(maximum=100000, multiplesOf=5)
symbol: String(pattern="[A-Z]+$", maxLength=6)
buyer: Trader
seller: Trader
venue: Enum[Venue]
comment: String
_optional = ["comment", "venue"]
_required = []
Trade()
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_field_by_name_fins_annotated_fields():
class Trade(Structure):
notional: DecimalNumber(maximum=10000, minimum=0)
quantity: PositiveInt(maximum=100000, multiplesOf=5)
symbol: String(pattern="[A-Z]+$", maxLength=6)
buyer: Trader
my_list: list[str]
seller: typing.Optional[Trader]
venue: Enum[Venue]
comment: String
_optional = ["comment", "venue"]
_required = []
field_names = Trade.get_all_fields_by_name().keys()
for f in {"notional", "quantity", "seller", "symbol", "buyer", "my_list"}:
assert f in field_names
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_iterating_over_wrapped_structure():
class Foo(Structure):
wrapped: list[str]
_additionalProperties = False
foo = Foo(wrapped=["x", "y", "z"])
assert list(foo) == foo.wrapped
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_iterating_over_wrapped_structure_map():
class Foo(Structure):
wrapped: Map[str, int]
_additionalProperties = False
foo = Foo(wrapped={"x": 2, "y": 3, "z": 4})
assert list(foo) == ["x", "y", "z"]
def test_cast():
class Foo(Structure):
a: int
b: int
class Bar(Foo, ImmutableStructure):
s: typing.Optional[str]
bar = Bar(a=1, b=2, s="xyz")
foo: Foo = bar.cast_to(Foo)
assert foo == Foo(a=1, b=2)
assert foo.cast_to(Bar) == Bar(a=1, b=2)
def test_cast_invalid():
class Foo(Structure):
a: int
b: int
class Bar(Foo, ImmutableStructure):
s: str
foo = Foo(a=1, b=2)
with raises(TypeError):
foo.cast_to(Bar)
with raises(TypeError):
foo.cast_to(DateTime)
def test_iterating_over_wrapped_structure_err():
class Foo(Structure):
wrapped: int
_additionalProperties = False
foo = Foo(wrapped=4)
with raises(TypeError) as excinfo:
assert list(foo) == foo.wrapped
assert "Foo is not a wrapper of an iterable" in str(excinfo.value)
def test_optional_fields_required_overrides1():
with raises(ValueError) as excinfo:
class Trade(Structure):
venue: Enum[Venue]
comment: String
_optional = ["venue"]
_required = ["venue"]
assert (
"optional cannot override prior required in the class or in a base class"
in str(excinfo.value)
)
@pytest.fixture(scope="session")
def Point():
from math import sqrt
class PointClass:
def __init__(self, x, y):
self.x = x
self.y = y
def size(self):
return sqrt(self.x ** 2 + self.y ** 2)
return PointClass
def test_field_of_class(Point):
class Foo(Structure):
i: int
point: Field[Point]
foo = Foo(i=5, point=Point(3, 4))
assert foo.point.size() == 5
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_ignore_none(Point):
class Foo(Structure):
i: list[int]
maybe_date: typing.Optional[DateField]
_ignore_none = True
assert Foo(i=[5], maybe_date=None).i == [5]
assert Foo(i=[1]).maybe_date is None
assert Foo(i=[1], maybe_date=None).i[0] == 1
assert Foo(i=[5], maybe_date="2020-01-31").i[0] == 5
with raises(ValueError):
assert Foo(i=[5], maybe_date="2020-01-31a")
def test_do_not_ignore_none(Point):
class Foo(Structure):
i = Integer
point: Field[Point]
_ignore_none = False
with raises(TypeError) as excinfo:
Foo(i=None, point=Point(3, 4))
assert ": Expected <class 'int'>; Got None" in str(excinfo.value)
def test_do_not_ignore_none_for_required_fields(Point):
class Foo(Structure):
i: int
date: typing.Optional[DateField]
_ignore_none = True
with raises(TypeError) as excinfo:
Foo(i=None)
assert ": Expected <class 'int'>; Got None" in str(excinfo.value)
def test_field_of_class_typeerror(Point):
class Foo(Structure):
i: int
point: Field[Point]
with raises(TypeError) as excinfo:
Foo(i=5, point="xyz")
assert (
"point: Expected <class 'tests.test_structure.Point.<locals>.PointClass'>; Got 'xyz'"
in str(excinfo.value)
)
def test_using_arbitrary_class_in_anyof(Point):
class Foo(Structure):
i: int
point: AnyOf[Point, int]
assert Foo(i=1, point=2).point == 2
def test_using_arbitrary_class_in_union(Point):
class Foo(Structure):
i: int
point: typing.Union[Point, int]
assert Foo(i=1, point=2).point == 2
def test_optional(Point):
class Foo(Structure):
i: int
point: typing.Optional[Point]
assert Foo(i=1).point is None
assert Foo(i=1, point=None).point is None
foo = Foo(i=1, point=Point(3, 4))
assert foo.point.size() == 5
foo.point = None
assert foo.point is None
foo.point = Point(3, 4)
assert foo.point.size() == 5
def test_optional_err(Point):
class Foo(Structure):
i: int
point: typing.Optional[Point]
with raises(ValueError) as excinfo:
Foo(i=1, point=3)
assert "point: 3 Did not match any field option" in str(excinfo.value)
def test_field_of_class_in_map(Point):
class Foo(Structure):
i: int
point_by_int: Map[Integer, Field[Point]]
foo = Foo(i=5, point_by_int={1: Point(3, 4)})
assert foo.point_by_int[1].size() == 5
def test_field_of_class_in_map_simpler_syntax(Point):
class Foo(Structure):
i: int
point_by_int: Map[Integer, Point]
foo = Foo(i=5, point_by_int={1: Point(3, 4)})
assert foo.point_by_int[1].size() == 5
def test_field_of_class_in_map_typerror(Point):
class Foo(Structure):
i: int
point_by_int: Map[Integer, Field[Point]]
with raises(TypeError) as excinfo:
Foo(i=5, point_by_int={1: Point(3, 4), 2: 3})
assert (
"point_by_int_value: Expected <class 'tests.test_structure.Point.<locals>.PointClass'>; Got 3"
in str(excinfo.value)
)
def test_field_of_class_in_map__simpler_syntax_typerror(Point):
class Foo(Structure):
i: int
point_by_int: Map[Integer, Point]
with raises(TypeError) as excinfo:
Foo(i=5, point_by_int={1: Point(3, 4), 2: 3})
assert (
"point_by_int_value: Expected <class 'tests.test_structure.Point.<locals>.PointClass'>; Got 3"
in str(excinfo.value)
)
def test_simple_invalid_type():
with raises(TypeError) as excinfo:
class Foo(Structure):
i = Array["x"]
assert "Unsupported field type in definition: 'x'" in str(excinfo.value)
def test_simple_nonefield_usage():
class Foo(Structure):
a = Array[AnyOf[Integer, NoneField]]
foo = Foo(a=[1, 2, 3, None, 4])
assert foo.a == [1, 2, 3, None, 4]
def test_auto_none_conversion():
class Foo(Structure):
a = Array[AnyOf[Integer, None]]
foo = Foo(a=[1, 2, 3, None, 4])
assert foo.a == [1, 2, 3, None, 4]
def test_final_structure_violation():
class Foo(FinalStructure):
s: str
with raises(TypeError) as excinfo:
class Bar(Foo):
pass
assert "Tried to extend Foo, which is a FinalStructure. This is forbidden" in str(
excinfo.value
)
def test_final_structure_no_violation():
class Foo(Structure):
s: str
class Bar(Foo, FinalStructure):
pass
def test_as_bool():
class Foo(Structure):
s: typing.Optional[str]
i: typing.Optional[int]
assert not (Foo())
assert Foo(i=5)
def test_unique_violation():
@unique
class Foo(Structure):
s: str
i: int
Foo(s="xxx", i=1)
Foo(s="xxx", i=2)
with raises(ValueError) as excinfo:
Foo(s="xxx", i=1)
assert (
"Instance copy in Foo, which is defined as unique. Instance is"
" <Instance of Foo. Properties: i = 1, s = 'xxx'>" in str(excinfo.value)
)
def test_unique_violation_by_update():
@unique
class Foo(Structure):
s: str
i: int
Foo(s="xxx", i=1)
foo = Foo(s="xxx", i=2)
with raises(ValueError) as excinfo:
foo.i = 1
assert (
"Instance copy in Foo, which is defined as unique. Instance is"
" <Instance of Foo. Properties: i = 1, s = 'xxx'>" in str(excinfo.value)
)
def test_unique_violation_stop_checking__if_too_many_instances():
@unique
class Foo(Structure):
i: int
for i in range(MAX_NUMBER_OF_INSTANCES_TO_VERIFY_UNIQUENESS):
Foo(i=i)
Foo(i=1)
Foo(i=1)
def test_copy_with_overrides():
class Trade(Structure):
notional: DecimalNumber(maximum=10000, minimum=0)
quantity: PositiveInt(maximum=100000, multiplesOf=5)
symbol: String(pattern="[A-Z]+$", maxLength=6)
timestamp = DateTime
buyer: Trader
seller: Trader
venue: Enum[Venue]
comment: String
_optional = ["comment", "venue"]
trade_1 = Trade(
notional=1000,
quantity=150,
symbol="APPL",
buyer=Trader(lei="12345678901234567890", alias="GSET"),
seller=Trader(lei="12345678901234567888", alias="MSIM"),
timestamp="01/30/20 05:35:35",
)
trade_2 = trade_1.shallow_clone_with_overrides(notional=500)
assert trade_2.notional == 500
trade_2.notional = 1000
assert trade_2 == trade_1
def test_defect_required_should_propagate_with_ignore_none():
class Foo(Structure):
a = Integer
class Bar(Foo):
s = String
_ignore_none = True
with raises(TypeError) as excinfo:
Bar(s="x", a=None)
assert "a: Expected <class 'int'>; Got None" in str(excinfo.value)
def test_defect_multiple_inheritance_with_optional_1():
class Foo1(Structure):
a = Integer(default=1)
class Foo2(Structure):
b = Integer
class Bar1(Foo1, Foo2):
pass
class Bar2(Foo2, Foo1):
pass
Bar1(b=1)
Bar2(b=1)
def test_defect_multiple_inheritance_with_optional_2():
class Foo1(Structure):
a = Integer
_optional = ["a"]
class Foo2(Structure):
b = Integer
class Bar1(Foo1, Foo2):
pass
class Bar2(Foo2, Foo1):
pass
Bar1(b=1)
Bar2(b=1)
def test_from_other_class():
class PersonModel:
def __init__(self, *, first_name, age):
self.first_name = first_name
self.age = age
class Person(Structure):
id = Integer
name = String
age = Integer
person_model = PersonModel(first_name="john", age=40)
person = Person.from_other_class(person_model, id=123, name=person_model.first_name)
assert person == Person(name="john", id=123, age=40)
def test_to_other_class():
@dataclass
class PersonDataclass:
name: str
age: int
class Person(Structure):
id = Integer
name = String
person = Person(id=1, name="john").to_other_class(
PersonDataclass, ignore_props=["id"], age=40
)
assert person == PersonDataclass(name="john", age=40)
def test_defaults_are_connected_to_structure():
class Foo(Structure):
a: Array(items=String, default=list)
foo = Foo()
assert foo == Foo(a=[])
assert foo.a == []
foo.a.append("xyz")
assert foo.a == ["xyz"]
def test_invalid_defaults_are_caught():
def factory():
return [1, 2, 3]
with raises(TypeError) as excinfo:
class Foo(Structure):
a: Array(items=String, default=factory)
assert "Invalid default value: [1, 2, 3];" in str(excinfo.value)
def test_default_alternative_style():
def default_factory():
return [1, 2, 3]
class Example(Structure):
i: Array[Integer] = default_factory
assert Example() == Example(i=[1, 2, 3])
def test_inheritance_with_optional_field():
class Foo(Structure):
a: String
b: String
with raises(ValueError) as excinfo:
class Bar(Foo):
c: String
_optional = ["b"]
assert (
"optional cannot override prior required in the class or in a base class"
in str(excinfo.value)
)
def test_classreference_cant_accept_none():
class Foo(Structure):
bar = String
class Bar(Structure):
bar = String
foo = Foo
with raises(TypeError) as excinfo:
Bar(bar="abc", foo=None)
assert (
"foo: Expected <Structure: Foo. Properties: bar = <String>>; Got None"
in str(excinfo.value)
)
def test_required_is_inherited_field():
class A(Structure):
x = Integer
y = Integer
_required = []
class B(A):
_required = ["x", "y"]
with raises(TypeError) as excinfo:
B(y=5)
assert "missing a required argument: 'x'" in str(excinfo.value)
assert B(x=1, y=2).x == 1
def test_dont_allow_assignment_to_non_typedpy_types():
Structure.set_block_non_typedpy_field_assignment()
with raises(TypeError) as excinfo:
class A(Structure):
a = typing.List[str]
assert "a: assigned a non-Typedpy type" in str(excinfo.value)
with raises(TypeError) as excinfo:
class B(Structure):
b = typing.Optional[str]
assert "b: assigned a non-Typedpy type" in str(excinfo.value)
Structure.set_block_non_typedpy_field_assignment(False)
class C(Structure):
b = typing.List[str]
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_dont_allow_assignment_to_non_typedpy_types_pep585():
Structure.set_block_non_typedpy_field_assignment()
with raises(TypeError) as excinfo:
class A(Structure):
a = list[str]
assert "a: assigned a non-Typedpy type" in str(excinfo.value)
Structure.set_block_non_typedpy_field_assignment(False)
class C(Structure):
b = list[str]
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_dont_allow_assignment_to_non_typedpy_types_valid():
Structure.set_block_non_typedpy_field_assignment()
class A(Structure):
a: list[str] = list
assert A().a == []
def test_additional_properties_blocks_additional_properties_even_after_instantiation():
class Foo(Structure):
i: int
_additionalProperties = False
foo = Foo(i=5)
with raises(ValueError) as excinfo:
foo.x = []
assert "Foo: trying to set a non-field 'x' is not allowed" in str(excinfo.value)
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_find_fields_with_function_returning_field():
def Name() -> Field:
return String(minLength=10)
class Foo(Structure):
age: int
name: Name
assert set(Foo.get_all_fields_by_name().keys()) == {"age", "name"}
assert str(Foo.name) == "<String. Properties: minLength = 10>" | none | 1 | 2.318874 | 2 |
|
apps/puistot.py | SanttuVP/streamlit_test_app | 0 | 6630755 | import streamlit as st
import leafmap.kepler as leafmap
import geopandas as gpd
def app():
st.title("Puistoihin liittyvät kaavamääräykset vuodelta 2021")
st.markdown(
"""
Väritä, visualisoi ja filtteröi aineistoa kartan vasemmasta yläkulmasta avautuvan työkalupakin avulla.
"""
)
m = leafmap.Map(center=[60.174, 24.802], zoom=14.5, height=600, widescreen=False)
gdf = gpd.read_file("http://pygeoapi-testing.gispocoding.fi/collections/koonti_koko_suomi_kaavakohteet/items?f=json&limit=1000")
gdf['kaavoitusteema'] = gdf['kaavoitusteema'].astype('str')
gdf['kaavamaarayslaji'] = gdf['kaavamaarayslaji'].astype('str')
gdf = gdf[gdf['kaavamaarayslaji'].str.contains('puisto')]
gdf_map = gdf[["kaavoitusteema", "kaavamaarayslaji", "tekstiarvo", "geometry"]]
m.add_gdf(gdf_map, layer_name="Pyoratiet")
m.to_streamlit(height=700) | import streamlit as st
import leafmap.kepler as leafmap
import geopandas as gpd
def app():
st.title("Puistoihin liittyvät kaavamääräykset vuodelta 2021")
st.markdown(
"""
Väritä, visualisoi ja filtteröi aineistoa kartan vasemmasta yläkulmasta avautuvan työkalupakin avulla.
"""
)
m = leafmap.Map(center=[60.174, 24.802], zoom=14.5, height=600, widescreen=False)
gdf = gpd.read_file("http://pygeoapi-testing.gispocoding.fi/collections/koonti_koko_suomi_kaavakohteet/items?f=json&limit=1000")
gdf['kaavoitusteema'] = gdf['kaavoitusteema'].astype('str')
gdf['kaavamaarayslaji'] = gdf['kaavamaarayslaji'].astype('str')
gdf = gdf[gdf['kaavamaarayslaji'].str.contains('puisto')]
gdf_map = gdf[["kaavoitusteema", "kaavamaarayslaji", "tekstiarvo", "geometry"]]
m.add_gdf(gdf_map, layer_name="Pyoratiet")
m.to_streamlit(height=700) | fi | 0.979323 | Väritä, visualisoi ja filtteröi aineistoa kartan vasemmasta yläkulmasta avautuvan työkalupakin avulla. | 2.743249 | 3 |
mediately/tools/admin.py | lifelonglearner127/mediately | 0 | 6630756 | <reponame>lifelonglearner127/mediately
from django.contrib import admin
from mediately.tools.models import Tool, Log
@admin.register(Tool)
class ToolAdmin(admin.ModelAdmin):
pass
@admin.register(Log)
class LogAdmin(admin.ModelAdmin):
pass
| from django.contrib import admin
from mediately.tools.models import Tool, Log
@admin.register(Tool)
class ToolAdmin(admin.ModelAdmin):
pass
@admin.register(Log)
class LogAdmin(admin.ModelAdmin):
pass | none | 1 | 1.595163 | 2 |
|
universal/algos/mpt.py | paulorodriguesxv/universal-portfolios | 1 | 6630757 | <gh_stars>1-10
from ..algo import Algo
import numpy as np
import pandas as pd
from sklearn import covariance
from sklearn.base import BaseEstimator
from scipy import optimize
from cvxopt import solvers, matrix
from six import string_types
import logging
from .. import tools
from .estimators import *
solvers.options['show_progress'] = False
class MPT(Algo):
""" Modern portfolio theory approach. See https://en.wikipedia.org/wiki/Modern_portfolio_theory.
"""
PRICE_TYPE = 'ratio'
def __init__(self, window=None, mu_estimator=None, cov_estimator=None, mu_window=None, cov_window=None,
min_history=None, bounds=None, max_leverage=1., method='mpt', q=0.01, gamma=0.,
optimizer_options=None, force_weights=None, **kwargs):
"""
:param window: Window for calculating mean and variance. Use None for entire history.
:param mu_estimator: TODO
:param cov_estimator: TODO
:param min_history: Use zero weights for first min_periods. Default is 1 year
:param max_leverage: Max leverage to use.
:param method: optimization objective - can be "mpt", "sharpe" and "variance"
:param q: depends on method, e.g. for "mpt" it is risk aversion parameter (higher means lower aversion to risk)
from https://en.wikipedia.org/wiki/Modern_portfolio_theory#Efficient_frontier_with_no_risk-free_asset
q=2 is equivalent to full-kelly, q=1 is equivalent to half kelly
:param gamma: Penalize changing weights (can be number or Series with individual weights such as fees)
"""
super().__init__(min_history=min_history, **kwargs)
mu_window = mu_window or window
cov_window = cov_window or window
self.method = method
self.q = q
self.gamma = gamma
self.bounds = bounds or {}
self.force_weights = force_weights
self.max_leverage = max_leverage
self.optimizer_options = optimizer_options or {}
if bounds and max_leverage != 1:
raise NotImplemented('max_leverage cannot be used with bounds, consider removing max_leverage and replace it with bounds1')
if cov_estimator is None:
cov_estimator = 'empirical'
if isinstance(cov_estimator, string_types):
if cov_estimator == 'empirical':
# use pandas covariance in init_step
cov_estimator = covariance.EmpiricalCovariance()
elif cov_estimator == 'ledoit-wolf':
cov_estimator = covariance.LedoitWolf()
elif cov_estimator == 'graph-lasso':
cov_estimator = covariance.GraphLasso()
elif cov_estimator == 'oas':
cov_estimator = covariance.OAS()
elif cov_estimator == 'single-index':
cov_estimator = SingleIndexCovariance()
else:
raise NotImplemented('Unknown covariance estimator {}'.format(cov_estimator))
# handle sklearn models
if isinstance(cov_estimator, BaseEstimator):
cov_estimator = CovarianceEstimator(cov_estimator, window=cov_window)
if mu_estimator is None:
mu_estimator = SharpeEstimator()
if isinstance(mu_estimator, string_types):
if mu_estimator == 'historical':
mu_estimator = HistoricalEstimator(window=mu_window)
elif mu_estimator == 'sharpe':
mu_estimator = SharpeEstimator()
else:
raise NotImplemented('Unknown mu estimator {}'.format(mu_estimator))
self.cov_estimator = cov_estimator
self.mu_estimator = mu_estimator
def init_weights(self, columns):
b = np.array([0. if c == 'CASH' else 1. for c in columns])
return b / b.sum()
def init_step(self, X):
# set min history to 1 year
if not self.min_history:
self.min_history = tools.freq(X.index)
# replace covariance estimator with empirical covariance and precompute it
if isinstance(self.cov_estimator, covariance.EmpiricalCovariance):
class EmpiricalCov(object):
""" Behave like sklearn covariance estimator. """
allow_nan = True
def __init__(self, X, window, min_history):
self.C = tools.rolling_cov_pairwise(X, window=window, min_periods=min_history)
def fit(self, X):
# get sigma matrix
x = X.iloc[-1]
sigma = self.C[x.name]
# make sure sigma is properly indexed
sigma = sigma.reindex(index=x.index).reindex(columns=x.index)
self.covariance_ = sigma.values
return self
self.cov_estimator = CovarianceEstimator(EmpiricalCov(X, self.cov_estimator.window, self.min_history))
def estimate_mu_sigma_sh(self, S):
X = self._convert_prices(S, self.PRICE_TYPE, self.REPLACE_MISSING)
sigma = self.cov_estimator.fit(X - 1)
mu = self.mu_estimator.fit(X, sigma)
vol = np.sqrt(np.diag(sigma))
sh = (mu - self.mu_estimator.rfr) / vol
sh[vol == 0] = 0.
return mu, sigma, sh
def portfolio_mu(self, last_b, mu):
return (last_b * mu).sum()
def portfolio_vol(self, last_b, sigma):
w = last_b.values
sigma = sigma.reindex(index=last_b.index, columns=last_b.index).values
return np.sqrt((w @ sigma @ w))
def portfolio_gradient(self, last_b, mu, sigma, q=None, decompose=False):
""" Calculate gradient for given objective function. Can be used to determine which stocks
should be added / removed from portfolio.
"""
q = q or self.q
w = last_b.values
mu = mu.values
sigma = sigma.values
p_vol = np.sqrt(w @ sigma @ w)
p_mu = w @ mu
if self.method == 'sharpe':
grad_sharpe = mu.T / p_vol
grad_vol = -sigma * w.T * p_mu / p_vol**3
grad_sharpe = pd.Series(np.array(grad_sharpe).ravel(), index=last_b.index)
grad_vol = pd.Series(np.array(grad_vol).ravel(), index=last_b.index)
if decompose:
return grad_sharpe, grad_vol
else:
return grad_sharpe + grad_vol
elif self.method == 'mpt':
grad_mu = pd.Series(np.array(mu).ravel(), index=last_b.index)
grad_sigma = pd.Series((sigma @ w).ravel(), index=last_b.index)
grad_vol = pd.Series(np.array(-sigma @ w / p_vol).ravel(), index=last_b.index)
if decompose:
return grad_mu, grad_vol
else:
return q * grad_mu - 2 * grad_sigma
else:
raise NotImplemented('Method {} not yet implemented'.format(self.method))
def step(self, x, last_b, history, **kwargs):
# get sigma and mu estimates
X = history
if self.bounds.keys() - X.columns - {'all'}:
raise Exception(f'Bounds for undefined symbols {self.bounds.keys() - X.columns - set(["all"])}')
# remove assets with NaN values
# cov_est = self.cov_estimator.cov_est
# if hasattr(cov_est, 'allow_nan') and cov_est.allow_nan:
# na_assets = (X.notnull().sum() < self.min_history).values
# else:
# na_assets = X.isnull().any().values
# check NA assets
na_assets = (X.notnull().sum() < self.min_history).values
if any(na_assets):
raise Exception('Assets containing null values: {}'.format(X.columns[na_assets]))
X = X.iloc[:, ~na_assets]
x = x[~na_assets]
last_b = last_b[~na_assets]
# get sigma and mu estimations
sigma = self.cov_estimator.fit(X - 1)
mu = self.mu_estimator.fit(X, sigma)
ss = pd.Series(np.diag(sigma), index=sigma.columns)
assert (mu.index == X.columns).all()
# make Series from gamma
gamma = self.gamma
if isinstance(gamma, float):
gamma = x * 0 + gamma
elif callable(gamma):
# use gamma as a function
pass
else:
gamma = gamma.reindex(x.index)
gamma_null = gamma[gamma.isnull()]
assert len(gamma_null) == 0, 'gamma is missing values for {}'.format(gamma_null.index)
# find optimal portfolio
last_b = pd.Series(last_b, index=x.index, name=x.name)
b = self.optimize(mu, sigma, q=self.q, gamma=gamma, max_leverage=self.max_leverage, last_b=last_b, **kwargs)
b = pd.Series(b, index=X.columns).reindex(history.columns, fill_value=0.)
return b
def optimize(self, mu, sigma, q, gamma, max_leverage, last_b, **kwargs):
if self.method == 'mpt':
return self._optimize_mpt(mu, sigma, q, gamma, last_b, **kwargs)
elif self.method == 'sharpe':
return self._optimize_sharpe(mu, sigma, q, gamma, max_leverage, last_b, **kwargs)
elif self.method == 'variance':
return self._optimize_variance(mu, sigma, q, gamma, max_leverage, last_b, **kwargs)
else:
raise Exception('Unknown method {}'.format(self.method))
def _optimize_sharpe(self, mu, sigma, q, gamma, max_leverage, last_b):
""" Maximize sharpe ratio b.T * mu / sqrt(b.T * sigma * b + q) """
mu = np.matrix(mu)
sigma = np.matrix(sigma)
def maximize(bb):
if callable(gamma):
fee_penalization = gamma(pd.Series(bb, index=last_b.index), last_b)
else:
fee_penalization = sum(gamma * abs(bb - last_b))
bb = np.matrix(bb)
return -mu * bb.T / np.sqrt(bb * sigma * bb.T + q) + fee_penalization
if self.allow_cash:
cons = ({'type': 'ineq', 'fun': lambda b: max_leverage - sum(b)},)
else:
cons = ({'type': 'eq', 'fun': lambda b: max_leverage - sum(b)},)
bounds = [(0., max_leverage)] * len(last_b)
if self.max_weight:
bounds = [(max(l, -self.max_weight), min(u, self.max_weight)) for l, u in bounds]
x0 = last_b
MAX_TRIES = 3
for _ in range(MAX_TRIES):
res = optimize.minimize(maximize, x0, bounds=bounds,
constraints=cons, method='slsqp', options=self.optimizer_options)
# it is possible that slsqp gives out-of-bounds error, try it again with different x0
if np.any(res.x < -0.01) or np.any(res.x > max_leverage + 0.01):
x0 = np.random.random(len(res.x))
else:
break
else:
raise Exception()
return res.x
def _optimize_mpt(self, mu, sigma, q, gamma, last_b):
""" Minimize b.T * sigma * b - q * b.T * mu """
assert (mu.index == sigma.columns).all()
assert (mu.index == last_b.index).all()
symbols = list(mu.index)
sigma = np.array(sigma)
mu = np.array(mu).T
n = len(symbols)
force_weights = self.force_weights or {}
# portfolio constraints
bounds = self.bounds or {}
if 'all' not in bounds:
bounds['all'] = (0, 1)
G = []
h = []
for i, sym in enumerate(symbols):
# forced weights
if sym in force_weights:
continue
# constraints
lower, upper = bounds.get(sym, bounds['all'])
if lower is not None:
r = np.zeros(n)
r[i] = -1
G.append(r)
h.append(-lower)
if upper is not None:
r = np.zeros(n)
r[i] = 1
G.append(r)
h.append(upper)
# # additional constraints on selling
# if sym not in allow_sell:
# r = np.zeros(n)
# r[i] = -1
# G.append(r)
# h.append(-last_b[i])
G = matrix(np.vstack(G).astype(float))
h = matrix(np.array(h).astype(float))
b = _maximize(mu, sigma, q, n, G, h, symbols, last_b, force_weights)
# try:
# b = maximize(mu, sigma, q)
# except ValueError as e:
# raise e
# b = last_b
# second optimization for fees
if (gamma != 0).any() and (b != last_b).any():
b = maximize_with_penalization(b, last_b, mu, sigma, q, gamma)
return b
def _optimize_variance(self, mu, sigma, q, gamma, max_leverage, last_b):
""" Minimize b.T * sigma * b subject to b.T * mu >= q. If you find no such solution,
just maximize return. """
sigma = np.matrix(sigma)
mu = np.matrix(mu)
def maximize(mu, sigma, q):
n = len(last_b)
P = matrix(2 * sigma)
qq = matrix(np.zeros(n))
G = matrix(np.r_[-np.eye(n), -mu])
h = matrix(np.r_[np.zeros(n), -q])
try:
if max_leverage is None or max_leverage == float('inf'):
sol = solvers.qp(P, qq, G, h)
else:
if self.allow_cash:
G = matrix(np.r_[G, matrix(np.ones(n)).T])
h = matrix(np.r_[h, matrix([self.max_leverage])])
sol = solvers.qp(P, qq, G, h, initvals=last_b)
else:
A = matrix(np.ones(n)).T
b = matrix(np.array([max_leverage]))
sol = solvers.qp(P, qq, G, h, A, b, initvals=last_b)
if sol['status'] == 'unknown':
raise ValueError()
except ValueError:
# no feasible solution - maximize return instead
P = P * 0
qq = matrix(-mu.T)
G = matrix(np.r_[-np.eye(n), matrix(np.ones(n)).T])
h = matrix(np.r_[np.zeros(n), self.max_leverage])
sol = solvers.qp(P, qq, G, h)
return np.squeeze(sol['x'])
b = maximize(mu, sigma, q)
return b
# regularization parameter for singular cases
ALPHA = 0.000001
def _maximize(mu, sigma, q, n, G, h, symbols, last_b, force_weights):
P = matrix(2 * (sigma + ALPHA * np.eye(n)))
q = matrix(-q * mu + 2 * ALPHA * last_b.values)
A = matrix(np.ones(n)).T
b = matrix(np.array([1.]))
for sym, w in force_weights.items():
ix = symbols.index(sym)
a = np.zeros(n)
a[ix] = 1
A = matrix(np.r_[A, matrix(a).T])
b = matrix(np.r_[b, matrix([w])])
sol = solvers.qp(P, q, G, h, A, b, initvals=last_b)
if sol['status'] != 'optimal':
logging.warning("Solution not found for {}, using last weights".format(last_b.name))
return last_b
return np.squeeze(sol['x'])
def _maximize_with_penalization(b, last_b, mu, sigma, q, gamma):
n = len(mu)
c = np.sign(b - last_b)
sigma = matrix(sigma)
mu = matrix(mu)
P = 2 * (sigma + ALPHA * matrix(np.eye(n)))
qq = 2 * sigma * matrix(last_b) - q * mu + matrix(gamma * c)
G = matrix(np.r_[-np.diag(c), np.eye(n), -np.eye(n)])
h = matrix(np.r_[np.zeros(n), 1. - last_b, last_b])
A = matrix(np.ones(n)).T
b = matrix([1. - sum(last_b)])
sol = solvers.qp(P, qq, G, h, A, b, initvals=np.zeros(n))
return np.squeeze(sol['x']) + np.array(last_b)
| from ..algo import Algo
import numpy as np
import pandas as pd
from sklearn import covariance
from sklearn.base import BaseEstimator
from scipy import optimize
from cvxopt import solvers, matrix
from six import string_types
import logging
from .. import tools
from .estimators import *
solvers.options['show_progress'] = False
class MPT(Algo):
""" Modern portfolio theory approach. See https://en.wikipedia.org/wiki/Modern_portfolio_theory.
"""
PRICE_TYPE = 'ratio'
def __init__(self, window=None, mu_estimator=None, cov_estimator=None, mu_window=None, cov_window=None,
min_history=None, bounds=None, max_leverage=1., method='mpt', q=0.01, gamma=0.,
optimizer_options=None, force_weights=None, **kwargs):
"""
:param window: Window for calculating mean and variance. Use None for entire history.
:param mu_estimator: TODO
:param cov_estimator: TODO
:param min_history: Use zero weights for first min_periods. Default is 1 year
:param max_leverage: Max leverage to use.
:param method: optimization objective - can be "mpt", "sharpe" and "variance"
:param q: depends on method, e.g. for "mpt" it is risk aversion parameter (higher means lower aversion to risk)
from https://en.wikipedia.org/wiki/Modern_portfolio_theory#Efficient_frontier_with_no_risk-free_asset
q=2 is equivalent to full-kelly, q=1 is equivalent to half kelly
:param gamma: Penalize changing weights (can be number or Series with individual weights such as fees)
"""
super().__init__(min_history=min_history, **kwargs)
mu_window = mu_window or window
cov_window = cov_window or window
self.method = method
self.q = q
self.gamma = gamma
self.bounds = bounds or {}
self.force_weights = force_weights
self.max_leverage = max_leverage
self.optimizer_options = optimizer_options or {}
if bounds and max_leverage != 1:
raise NotImplemented('max_leverage cannot be used with bounds, consider removing max_leverage and replace it with bounds1')
if cov_estimator is None:
cov_estimator = 'empirical'
if isinstance(cov_estimator, string_types):
if cov_estimator == 'empirical':
# use pandas covariance in init_step
cov_estimator = covariance.EmpiricalCovariance()
elif cov_estimator == 'ledoit-wolf':
cov_estimator = covariance.LedoitWolf()
elif cov_estimator == 'graph-lasso':
cov_estimator = covariance.GraphLasso()
elif cov_estimator == 'oas':
cov_estimator = covariance.OAS()
elif cov_estimator == 'single-index':
cov_estimator = SingleIndexCovariance()
else:
raise NotImplemented('Unknown covariance estimator {}'.format(cov_estimator))
# handle sklearn models
if isinstance(cov_estimator, BaseEstimator):
cov_estimator = CovarianceEstimator(cov_estimator, window=cov_window)
if mu_estimator is None:
mu_estimator = SharpeEstimator()
if isinstance(mu_estimator, string_types):
if mu_estimator == 'historical':
mu_estimator = HistoricalEstimator(window=mu_window)
elif mu_estimator == 'sharpe':
mu_estimator = SharpeEstimator()
else:
raise NotImplemented('Unknown mu estimator {}'.format(mu_estimator))
self.cov_estimator = cov_estimator
self.mu_estimator = mu_estimator
def init_weights(self, columns):
b = np.array([0. if c == 'CASH' else 1. for c in columns])
return b / b.sum()
def init_step(self, X):
# set min history to 1 year
if not self.min_history:
self.min_history = tools.freq(X.index)
# replace covariance estimator with empirical covariance and precompute it
if isinstance(self.cov_estimator, covariance.EmpiricalCovariance):
class EmpiricalCov(object):
""" Behave like sklearn covariance estimator. """
allow_nan = True
def __init__(self, X, window, min_history):
self.C = tools.rolling_cov_pairwise(X, window=window, min_periods=min_history)
def fit(self, X):
# get sigma matrix
x = X.iloc[-1]
sigma = self.C[x.name]
# make sure sigma is properly indexed
sigma = sigma.reindex(index=x.index).reindex(columns=x.index)
self.covariance_ = sigma.values
return self
self.cov_estimator = CovarianceEstimator(EmpiricalCov(X, self.cov_estimator.window, self.min_history))
def estimate_mu_sigma_sh(self, S):
X = self._convert_prices(S, self.PRICE_TYPE, self.REPLACE_MISSING)
sigma = self.cov_estimator.fit(X - 1)
mu = self.mu_estimator.fit(X, sigma)
vol = np.sqrt(np.diag(sigma))
sh = (mu - self.mu_estimator.rfr) / vol
sh[vol == 0] = 0.
return mu, sigma, sh
def portfolio_mu(self, last_b, mu):
return (last_b * mu).sum()
def portfolio_vol(self, last_b, sigma):
w = last_b.values
sigma = sigma.reindex(index=last_b.index, columns=last_b.index).values
return np.sqrt((w @ sigma @ w))
def portfolio_gradient(self, last_b, mu, sigma, q=None, decompose=False):
""" Calculate gradient for given objective function. Can be used to determine which stocks
should be added / removed from portfolio.
"""
q = q or self.q
w = last_b.values
mu = mu.values
sigma = sigma.values
p_vol = np.sqrt(w @ sigma @ w)
p_mu = w @ mu
if self.method == 'sharpe':
grad_sharpe = mu.T / p_vol
grad_vol = -sigma * w.T * p_mu / p_vol**3
grad_sharpe = pd.Series(np.array(grad_sharpe).ravel(), index=last_b.index)
grad_vol = pd.Series(np.array(grad_vol).ravel(), index=last_b.index)
if decompose:
return grad_sharpe, grad_vol
else:
return grad_sharpe + grad_vol
elif self.method == 'mpt':
grad_mu = pd.Series(np.array(mu).ravel(), index=last_b.index)
grad_sigma = pd.Series((sigma @ w).ravel(), index=last_b.index)
grad_vol = pd.Series(np.array(-sigma @ w / p_vol).ravel(), index=last_b.index)
if decompose:
return grad_mu, grad_vol
else:
return q * grad_mu - 2 * grad_sigma
else:
raise NotImplemented('Method {} not yet implemented'.format(self.method))
def step(self, x, last_b, history, **kwargs):
# get sigma and mu estimates
X = history
if self.bounds.keys() - X.columns - {'all'}:
raise Exception(f'Bounds for undefined symbols {self.bounds.keys() - X.columns - set(["all"])}')
# remove assets with NaN values
# cov_est = self.cov_estimator.cov_est
# if hasattr(cov_est, 'allow_nan') and cov_est.allow_nan:
# na_assets = (X.notnull().sum() < self.min_history).values
# else:
# na_assets = X.isnull().any().values
# check NA assets
na_assets = (X.notnull().sum() < self.min_history).values
if any(na_assets):
raise Exception('Assets containing null values: {}'.format(X.columns[na_assets]))
X = X.iloc[:, ~na_assets]
x = x[~na_assets]
last_b = last_b[~na_assets]
# get sigma and mu estimations
sigma = self.cov_estimator.fit(X - 1)
mu = self.mu_estimator.fit(X, sigma)
ss = pd.Series(np.diag(sigma), index=sigma.columns)
assert (mu.index == X.columns).all()
# make Series from gamma
gamma = self.gamma
if isinstance(gamma, float):
gamma = x * 0 + gamma
elif callable(gamma):
# use gamma as a function
pass
else:
gamma = gamma.reindex(x.index)
gamma_null = gamma[gamma.isnull()]
assert len(gamma_null) == 0, 'gamma is missing values for {}'.format(gamma_null.index)
# find optimal portfolio
last_b = pd.Series(last_b, index=x.index, name=x.name)
b = self.optimize(mu, sigma, q=self.q, gamma=gamma, max_leverage=self.max_leverage, last_b=last_b, **kwargs)
b = pd.Series(b, index=X.columns).reindex(history.columns, fill_value=0.)
return b
def optimize(self, mu, sigma, q, gamma, max_leverage, last_b, **kwargs):
if self.method == 'mpt':
return self._optimize_mpt(mu, sigma, q, gamma, last_b, **kwargs)
elif self.method == 'sharpe':
return self._optimize_sharpe(mu, sigma, q, gamma, max_leverage, last_b, **kwargs)
elif self.method == 'variance':
return self._optimize_variance(mu, sigma, q, gamma, max_leverage, last_b, **kwargs)
else:
raise Exception('Unknown method {}'.format(self.method))
def _optimize_sharpe(self, mu, sigma, q, gamma, max_leverage, last_b):
""" Maximize sharpe ratio b.T * mu / sqrt(b.T * sigma * b + q) """
mu = np.matrix(mu)
sigma = np.matrix(sigma)
def maximize(bb):
if callable(gamma):
fee_penalization = gamma(pd.Series(bb, index=last_b.index), last_b)
else:
fee_penalization = sum(gamma * abs(bb - last_b))
bb = np.matrix(bb)
return -mu * bb.T / np.sqrt(bb * sigma * bb.T + q) + fee_penalization
if self.allow_cash:
cons = ({'type': 'ineq', 'fun': lambda b: max_leverage - sum(b)},)
else:
cons = ({'type': 'eq', 'fun': lambda b: max_leverage - sum(b)},)
bounds = [(0., max_leverage)] * len(last_b)
if self.max_weight:
bounds = [(max(l, -self.max_weight), min(u, self.max_weight)) for l, u in bounds]
x0 = last_b
MAX_TRIES = 3
for _ in range(MAX_TRIES):
res = optimize.minimize(maximize, x0, bounds=bounds,
constraints=cons, method='slsqp', options=self.optimizer_options)
# it is possible that slsqp gives out-of-bounds error, try it again with different x0
if np.any(res.x < -0.01) or np.any(res.x > max_leverage + 0.01):
x0 = np.random.random(len(res.x))
else:
break
else:
raise Exception()
return res.x
def _optimize_mpt(self, mu, sigma, q, gamma, last_b):
""" Minimize b.T * sigma * b - q * b.T * mu """
assert (mu.index == sigma.columns).all()
assert (mu.index == last_b.index).all()
symbols = list(mu.index)
sigma = np.array(sigma)
mu = np.array(mu).T
n = len(symbols)
force_weights = self.force_weights or {}
# portfolio constraints
bounds = self.bounds or {}
if 'all' not in bounds:
bounds['all'] = (0, 1)
G = []
h = []
for i, sym in enumerate(symbols):
# forced weights
if sym in force_weights:
continue
# constraints
lower, upper = bounds.get(sym, bounds['all'])
if lower is not None:
r = np.zeros(n)
r[i] = -1
G.append(r)
h.append(-lower)
if upper is not None:
r = np.zeros(n)
r[i] = 1
G.append(r)
h.append(upper)
# # additional constraints on selling
# if sym not in allow_sell:
# r = np.zeros(n)
# r[i] = -1
# G.append(r)
# h.append(-last_b[i])
G = matrix(np.vstack(G).astype(float))
h = matrix(np.array(h).astype(float))
b = _maximize(mu, sigma, q, n, G, h, symbols, last_b, force_weights)
# try:
# b = maximize(mu, sigma, q)
# except ValueError as e:
# raise e
# b = last_b
# second optimization for fees
if (gamma != 0).any() and (b != last_b).any():
b = maximize_with_penalization(b, last_b, mu, sigma, q, gamma)
return b
def _optimize_variance(self, mu, sigma, q, gamma, max_leverage, last_b):
""" Minimize b.T * sigma * b subject to b.T * mu >= q. If you find no such solution,
just maximize return. """
sigma = np.matrix(sigma)
mu = np.matrix(mu)
def maximize(mu, sigma, q):
n = len(last_b)
P = matrix(2 * sigma)
qq = matrix(np.zeros(n))
G = matrix(np.r_[-np.eye(n), -mu])
h = matrix(np.r_[np.zeros(n), -q])
try:
if max_leverage is None or max_leverage == float('inf'):
sol = solvers.qp(P, qq, G, h)
else:
if self.allow_cash:
G = matrix(np.r_[G, matrix(np.ones(n)).T])
h = matrix(np.r_[h, matrix([self.max_leverage])])
sol = solvers.qp(P, qq, G, h, initvals=last_b)
else:
A = matrix(np.ones(n)).T
b = matrix(np.array([max_leverage]))
sol = solvers.qp(P, qq, G, h, A, b, initvals=last_b)
if sol['status'] == 'unknown':
raise ValueError()
except ValueError:
# no feasible solution - maximize return instead
P = P * 0
qq = matrix(-mu.T)
G = matrix(np.r_[-np.eye(n), matrix(np.ones(n)).T])
h = matrix(np.r_[np.zeros(n), self.max_leverage])
sol = solvers.qp(P, qq, G, h)
return np.squeeze(sol['x'])
b = maximize(mu, sigma, q)
return b
# regularization parameter for singular cases
ALPHA = 0.000001
def _maximize(mu, sigma, q, n, G, h, symbols, last_b, force_weights):
P = matrix(2 * (sigma + ALPHA * np.eye(n)))
q = matrix(-q * mu + 2 * ALPHA * last_b.values)
A = matrix(np.ones(n)).T
b = matrix(np.array([1.]))
for sym, w in force_weights.items():
ix = symbols.index(sym)
a = np.zeros(n)
a[ix] = 1
A = matrix(np.r_[A, matrix(a).T])
b = matrix(np.r_[b, matrix([w])])
sol = solvers.qp(P, q, G, h, A, b, initvals=last_b)
if sol['status'] != 'optimal':
logging.warning("Solution not found for {}, using last weights".format(last_b.name))
return last_b
return np.squeeze(sol['x'])
def _maximize_with_penalization(b, last_b, mu, sigma, q, gamma):
n = len(mu)
c = np.sign(b - last_b)
sigma = matrix(sigma)
mu = matrix(mu)
P = 2 * (sigma + ALPHA * matrix(np.eye(n)))
qq = 2 * sigma * matrix(last_b) - q * mu + matrix(gamma * c)
G = matrix(np.r_[-np.diag(c), np.eye(n), -np.eye(n)])
h = matrix(np.r_[np.zeros(n), 1. - last_b, last_b])
A = matrix(np.ones(n)).T
b = matrix([1. - sum(last_b)])
sol = solvers.qp(P, qq, G, h, A, b, initvals=np.zeros(n))
return np.squeeze(sol['x']) + np.array(last_b) | en | 0.671207 | Modern portfolio theory approach. See https://en.wikipedia.org/wiki/Modern_portfolio_theory. :param window: Window for calculating mean and variance. Use None for entire history. :param mu_estimator: TODO :param cov_estimator: TODO :param min_history: Use zero weights for first min_periods. Default is 1 year :param max_leverage: Max leverage to use. :param method: optimization objective - can be "mpt", "sharpe" and "variance" :param q: depends on method, e.g. for "mpt" it is risk aversion parameter (higher means lower aversion to risk) from https://en.wikipedia.org/wiki/Modern_portfolio_theory#Efficient_frontier_with_no_risk-free_asset q=2 is equivalent to full-kelly, q=1 is equivalent to half kelly :param gamma: Penalize changing weights (can be number or Series with individual weights such as fees) # use pandas covariance in init_step # handle sklearn models # set min history to 1 year # replace covariance estimator with empirical covariance and precompute it Behave like sklearn covariance estimator. # get sigma matrix # make sure sigma is properly indexed Calculate gradient for given objective function. Can be used to determine which stocks should be added / removed from portfolio. # get sigma and mu estimates # remove assets with NaN values # cov_est = self.cov_estimator.cov_est # if hasattr(cov_est, 'allow_nan') and cov_est.allow_nan: # na_assets = (X.notnull().sum() < self.min_history).values # else: # na_assets = X.isnull().any().values # check NA assets # get sigma and mu estimations # make Series from gamma # use gamma as a function # find optimal portfolio Maximize sharpe ratio b.T * mu / sqrt(b.T * sigma * b + q) # it is possible that slsqp gives out-of-bounds error, try it again with different x0 Minimize b.T * sigma * b - q * b.T * mu # portfolio constraints # forced weights # constraints # # additional constraints on selling # if sym not in allow_sell: # r = np.zeros(n) # r[i] = -1 # G.append(r) # h.append(-last_b[i]) # try: # b = maximize(mu, sigma, q) # except ValueError as e: # raise e # b = last_b # second optimization for fees Minimize b.T * sigma * b subject to b.T * mu >= q. If you find no such solution, just maximize return. # no feasible solution - maximize return instead # regularization parameter for singular cases | 2.451277 | 2 |
ip/ip2geo.py | BlueRhino/small-toys | 0 | 6630758 | <reponame>BlueRhino/small-toys<filename>ip/ip2geo.py
# encoding='utf-8'
"""
use api from https://www.ipvigilante.com/
"""
import time
import requests
def ip2geo(ip_: str, format_='json'):
url = f'https://ipvigilante.com/{format_}/{ip_}'
headers = {
'accept': f'application/{format_}',
'content-type': f'application/{format_}',
}
response = requests.request('GET', url, headers=headers, timeout=5)
return response.content
if __name__ == '__main__':
# aa = ip2geo('192.168.3.11', 'csv')
ips_file = open('ips', mode='r')
ips = ips_file.readlines()
count = 0
with open(f'res_file+{int(time.time())}', mode='w', encoding='utf-8') as res_file:
for ip in ips:
ip = ip.split(':')[0]
count += 1
print(f'count = {count},ip = {ip}')
res_file.write(ip2geo(ip, 'csv').decode('utf-8')+'\n')
if count % 30 == 0:
res_file.flush()
| # encoding='utf-8'
"""
use api from https://www.ipvigilante.com/
"""
import time
import requests
def ip2geo(ip_: str, format_='json'):
url = f'https://ipvigilante.com/{format_}/{ip_}'
headers = {
'accept': f'application/{format_}',
'content-type': f'application/{format_}',
}
response = requests.request('GET', url, headers=headers, timeout=5)
return response.content
if __name__ == '__main__':
# aa = ip2geo('192.168.3.11', 'csv')
ips_file = open('ips', mode='r')
ips = ips_file.readlines()
count = 0
with open(f'res_file+{int(time.time())}', mode='w', encoding='utf-8') as res_file:
for ip in ips:
ip = ip.split(':')[0]
count += 1
print(f'count = {count},ip = {ip}')
res_file.write(ip2geo(ip, 'csv').decode('utf-8')+'\n')
if count % 30 == 0:
res_file.flush() | en | 0.375502 | # encoding='utf-8' use api from https://www.ipvigilante.com/ # aa = ip2geo('192.168.3.11', 'csv') | 3.02527 | 3 |
Basic python/problem/ScrollView_withFloatLayout.py | Shourov1702040/hpylearners_Python | 7 | 6630759 | <gh_stars>1-10
from kivymd.app import MDApp
from kivy.lang import Builder
KV = """
Screen:
MDToolbar:
title:'Contacts jbsidis'
pos_hint:{'top':1}
md_bg_color: [.2,0,1,.9]
left_action_items : [["menu", lambda x:print(23)]]
right_action_items : [["dots-vertical",lambda x:print(234)]]
elevation:0
FloatLayout:
BoxLayout:
pos_hint: {"center_x": .5, "center_y": .30}
ScrollView:
BoxLayout:
orientation: "vertical"
spacing: dp(60)
size_hint_y: .64
BoxLayout:
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'account'
markup: True
TextInput:
id: email_jbsidis1
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Email"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'phone'
markup: True
TextInput:
id: email_jbsidis2
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Phone"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
TextInput:
id: email_jbsidis3
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Area"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
## MDIconButton:
## #size_hint: .4, .05
## pos_hint: {"center_x": .6, "top": .975}
## theme_text_color: "Custom"
## text_color: [0,0,0,.8]
## icon: 'phone'
## markup: True
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'map-marker'
markup: True
TextInput:
id: email_jbsidis4
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Address"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'a11.png'
markup: True
TextInput:
id: email_jbsidis5
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "State"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
TextInput:
id: email_jbsidis6
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Zipcode"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
## MDIconButton:
## #size_hint: .4, .05
## pos_hint: {"center_x": .6, "top": .975}
## theme_text_color: "Custom"
## text_color: [0,0,0,.8]
## icon: 'phone'
## markup: True
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'message'
markup: True
TextInput:
id: email_jbsidis7
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Email"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'calendar'
markup: True
TextInput:
id: email_jbsidis8
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Birthday"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'message'
markup: True
TextInput:
id: email_jbsidis7
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Email"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'calendar'
markup: True
TextInput:
id: email_jbsidis8
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Birthday"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'message'
markup: True
TextInput:
id: email_jbsidis7
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Email"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'calendar'
markup: True
TextInput:
id: email_jbsidis8
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Birthday"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'message'
markup: True
TextInput:
id: email_jbsidis7
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Email"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'calendar'
markup: True
TextInput:
id: email_jbsidis8
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Birthday"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'message'
markup: True
TextInput:
id: email_jbsidis7
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Email"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'calendar'
markup: True
TextInput:
id: email_jbsidis8
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Birthday"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'message'
markup: True
TextInput:
id: email_jbsidis7
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Email"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'calendar'
markup: True
TextInput:
id: email_jbsidis8
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Birthday"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
spacing: dp(30)
FloatLayout:
MDRectangleFlatButton:
pos_hint: {'center_x':.3, 'center_y':.5}
text:'Choose'
font_size:14
halign: "center"
on_release: print(root.ids.email_jbsidis1.text,root.ids.email_jbsidis2.text,root.ids.email_jbsidis3.text,root.ids.email_jbsidis4.text,root.ids.email_jbsidis5.text,root.ids.email_jbsidis6.text,root.ids.email_jbsidis7.text,root.ids.email_jbsidis8.text)
MDRectangleFlatButton:
pos_hint: {'center_x':.7, 'center_y':.5}
text:'Cancel'
font_size:14
halign: "center"
on_release:
root.ids.email_jbsidis1.text=""
root.ids.email_jbsidis2.text=""
root.ids.email_jbsidis3.text=""
root.ids.email_jbsidis4.text=""
root.ids.email_jbsidis5.text=""
root.ids.email_jbsidis6.text=""
root.ids.email_jbsidis7.text=""
root.ids.email_jbsidis8.text=""
FloatLayout:
MDIconButton:
pos_hint: {"center_x": .8, "center_y": 0.945}
icon: "magnify"
theme_text_color: "Custom"
text_color: [1,1,1,1]
"""
class WeatherApp(MDApp):
def __init__(self, **kwargs):
super().__init__(**kwargs)
pass
## def on_start(self):
##
## self.root.ids.Test_ss.text = " Dhur bal\nOi salar putera\nMod khaba naki "
def build(self):
self.screen = Builder.load_string(KV)
return self.screen
WeatherApp().run()
| from kivymd.app import MDApp
from kivy.lang import Builder
KV = """
Screen:
MDToolbar:
title:'Contacts jbsidis'
pos_hint:{'top':1}
md_bg_color: [.2,0,1,.9]
left_action_items : [["menu", lambda x:print(23)]]
right_action_items : [["dots-vertical",lambda x:print(234)]]
elevation:0
FloatLayout:
BoxLayout:
pos_hint: {"center_x": .5, "center_y": .30}
ScrollView:
BoxLayout:
orientation: "vertical"
spacing: dp(60)
size_hint_y: .64
BoxLayout:
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'account'
markup: True
TextInput:
id: email_jbsidis1
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Email"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'phone'
markup: True
TextInput:
id: email_jbsidis2
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Phone"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
TextInput:
id: email_jbsidis3
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Area"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
## MDIconButton:
## #size_hint: .4, .05
## pos_hint: {"center_x": .6, "top": .975}
## theme_text_color: "Custom"
## text_color: [0,0,0,.8]
## icon: 'phone'
## markup: True
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'map-marker'
markup: True
TextInput:
id: email_jbsidis4
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Address"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'a11.png'
markup: True
TextInput:
id: email_jbsidis5
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "State"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
TextInput:
id: email_jbsidis6
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Zipcode"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
## MDIconButton:
## #size_hint: .4, .05
## pos_hint: {"center_x": .6, "top": .975}
## theme_text_color: "Custom"
## text_color: [0,0,0,.8]
## icon: 'phone'
## markup: True
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'message'
markup: True
TextInput:
id: email_jbsidis7
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Email"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'calendar'
markup: True
TextInput:
id: email_jbsidis8
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Birthday"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'message'
markup: True
TextInput:
id: email_jbsidis7
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Email"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'calendar'
markup: True
TextInput:
id: email_jbsidis8
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Birthday"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'message'
markup: True
TextInput:
id: email_jbsidis7
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Email"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'calendar'
markup: True
TextInput:
id: email_jbsidis8
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Birthday"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'message'
markup: True
TextInput:
id: email_jbsidis7
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Email"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'calendar'
markup: True
TextInput:
id: email_jbsidis8
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Birthday"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'message'
markup: True
TextInput:
id: email_jbsidis7
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Email"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'calendar'
markup: True
TextInput:
id: email_jbsidis8
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Birthday"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'message'
markup: True
TextInput:
id: email_jbsidis7
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Email"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
MDIconButton:
#size_hint: .4, .05
pos_hint: {"center_x": .3, "top": .975}
theme_text_color: "Custom"
text_color: [0,0,0,.8]
icon: 'calendar'
markup: True
TextInput:
id: email_jbsidis8
height: dp(40)
pos_hint: {"center_x": .4, "top": .975}
hint_text_color: [0,0,0, 1]
foreground_color: [0,0,0,.4]
hint_text: "Birthday"
background_color: [1,1,1,0]
background_image: ""
background_normal: ""
background_active: ""
multiline: False
size_hint: .5 ,None #.06
canvas.after:
Color:
rgba: [0.0039,0.1921,0.2078,1]
Line:
width: dp(0.5)
rounded_rectangle:
(self.x, self.y, self.width-dp(20), dp(43),\
dp(8),dp(8),dp(8),dp(8),\
dp(50))
BoxLayout:
spacing: dp(30)
FloatLayout:
MDRectangleFlatButton:
pos_hint: {'center_x':.3, 'center_y':.5}
text:'Choose'
font_size:14
halign: "center"
on_release: print(root.ids.email_jbsidis1.text,root.ids.email_jbsidis2.text,root.ids.email_jbsidis3.text,root.ids.email_jbsidis4.text,root.ids.email_jbsidis5.text,root.ids.email_jbsidis6.text,root.ids.email_jbsidis7.text,root.ids.email_jbsidis8.text)
MDRectangleFlatButton:
pos_hint: {'center_x':.7, 'center_y':.5}
text:'Cancel'
font_size:14
halign: "center"
on_release:
root.ids.email_jbsidis1.text=""
root.ids.email_jbsidis2.text=""
root.ids.email_jbsidis3.text=""
root.ids.email_jbsidis4.text=""
root.ids.email_jbsidis5.text=""
root.ids.email_jbsidis6.text=""
root.ids.email_jbsidis7.text=""
root.ids.email_jbsidis8.text=""
FloatLayout:
MDIconButton:
pos_hint: {"center_x": .8, "center_y": 0.945}
icon: "magnify"
theme_text_color: "Custom"
text_color: [1,1,1,1]
"""
class WeatherApp(MDApp):
def __init__(self, **kwargs):
super().__init__(**kwargs)
pass
## def on_start(self):
##
## self.root.ids.Test_ss.text = " Dhur bal\nOi salar putera\nMod khaba naki "
def build(self):
self.screen = Builder.load_string(KV)
return self.screen
WeatherApp().run() | en | 0.431503 | Screen: MDToolbar: title:'Contacts jbsidis' pos_hint:{'top':1} md_bg_color: [.2,0,1,.9] left_action_items : [["menu", lambda x:print(23)]] right_action_items : [["dots-vertical",lambda x:print(234)]] elevation:0 FloatLayout: BoxLayout: pos_hint: {"center_x": .5, "center_y": .30} ScrollView: BoxLayout: orientation: "vertical" spacing: dp(60) size_hint_y: .64 BoxLayout: BoxLayout: MDIconButton: #size_hint: .4, .05 pos_hint: {"center_x": .3, "top": .975} theme_text_color: "Custom" text_color: [0,0,0,.8] icon: 'account' markup: True TextInput: id: email_jbsidis1 height: dp(40) pos_hint: {"center_x": .4, "top": .975} hint_text_color: [0,0,0, 1] foreground_color: [0,0,0,.4] hint_text: "Email" background_color: [1,1,1,0] background_image: "" background_normal: "" background_active: "" multiline: False size_hint: .5 ,None #.06 canvas.after: Color: rgba: [0.0039,0.1921,0.2078,1] Line: width: dp(0.5) rounded_rectangle: (self.x, self.y, self.width-dp(20), dp(43),\ dp(8),dp(8),dp(8),dp(8),\ dp(50)) BoxLayout: MDIconButton: #size_hint: .4, .05 pos_hint: {"center_x": .3, "top": .975} theme_text_color: "Custom" text_color: [0,0,0,.8] icon: 'phone' markup: True TextInput: id: email_jbsidis2 height: dp(40) pos_hint: {"center_x": .4, "top": .975} hint_text_color: [0,0,0, 1] foreground_color: [0,0,0,.4] hint_text: "Phone" background_color: [1,1,1,0] background_image: "" background_normal: "" background_active: "" multiline: False size_hint: .5 ,None #.06 canvas.after: Color: rgba: [0.0039,0.1921,0.2078,1] Line: width: dp(0.5) rounded_rectangle: (self.x, self.y, self.width-dp(20), dp(43),\ dp(8),dp(8),dp(8),dp(8),\ dp(50)) TextInput: id: email_jbsidis3 height: dp(40) pos_hint: {"center_x": .4, "top": .975} hint_text_color: [0,0,0, 1] foreground_color: [0,0,0,.4] hint_text: "Area" background_color: [1,1,1,0] background_image: "" background_normal: "" background_active: "" multiline: False size_hint: .5 ,None #.06 canvas.after: Color: rgba: [0.0039,0.1921,0.2078,1] Line: width: dp(0.5) rounded_rectangle: (self.x, self.y, self.width-dp(20), dp(43),\ dp(8),dp(8),dp(8),dp(8),\ dp(50)) ## MDIconButton: ## #size_hint: .4, .05 ## pos_hint: {"center_x": .6, "top": .975} ## theme_text_color: "Custom" ## text_color: [0,0,0,.8] ## icon: 'phone' ## markup: True BoxLayout: MDIconButton: #size_hint: .4, .05 pos_hint: {"center_x": .3, "top": .975} theme_text_color: "Custom" text_color: [0,0,0,.8] icon: 'map-marker' markup: True TextInput: id: email_jbsidis4 height: dp(40) pos_hint: {"center_x": .4, "top": .975} hint_text_color: [0,0,0, 1] foreground_color: [0,0,0,.4] hint_text: "Address" background_color: [1,1,1,0] background_image: "" background_normal: "" background_active: "" multiline: False size_hint: .5 ,None #.06 canvas.after: Color: rgba: [0.0039,0.1921,0.2078,1] Line: width: dp(0.5) rounded_rectangle: (self.x, self.y, self.width-dp(20), dp(43),\ dp(8),dp(8),dp(8),dp(8),\ dp(50)) BoxLayout: MDIconButton: #size_hint: .4, .05 pos_hint: {"center_x": .3, "top": .975} theme_text_color: "Custom" text_color: [0,0,0,.8] icon: 'a11.png' markup: True TextInput: id: email_jbsidis5 height: dp(40) pos_hint: {"center_x": .4, "top": .975} hint_text_color: [0,0,0, 1] foreground_color: [0,0,0,.4] hint_text: "State" background_color: [1,1,1,0] background_image: "" background_normal: "" background_active: "" multiline: False size_hint: .5 ,None #.06 canvas.after: Color: rgba: [0.0039,0.1921,0.2078,1] Line: width: dp(0.5) rounded_rectangle: (self.x, self.y, self.width-dp(20), dp(43),\ dp(8),dp(8),dp(8),dp(8),\ dp(50)) TextInput: id: email_jbsidis6 height: dp(40) pos_hint: {"center_x": .4, "top": .975} hint_text_color: [0,0,0, 1] foreground_color: [0,0,0,.4] hint_text: "Zipcode" background_color: [1,1,1,0] background_image: "" background_normal: "" background_active: "" multiline: False size_hint: .5 ,None #.06 canvas.after: Color: rgba: [0.0039,0.1921,0.2078,1] Line: width: dp(0.5) rounded_rectangle: (self.x, self.y, self.width-dp(20), dp(43),\ dp(8),dp(8),dp(8),dp(8),\ dp(50)) ## MDIconButton: ## #size_hint: .4, .05 ## pos_hint: {"center_x": .6, "top": .975} ## theme_text_color: "Custom" ## text_color: [0,0,0,.8] ## icon: 'phone' ## markup: True BoxLayout: MDIconButton: #size_hint: .4, .05 pos_hint: {"center_x": .3, "top": .975} theme_text_color: "Custom" text_color: [0,0,0,.8] icon: 'message' markup: True TextInput: id: email_jbsidis7 height: dp(40) pos_hint: {"center_x": .4, "top": .975} hint_text_color: [0,0,0, 1] foreground_color: [0,0,0,.4] hint_text: "Email" background_color: [1,1,1,0] background_image: "" background_normal: "" background_active: "" multiline: False size_hint: .5 ,None #.06 canvas.after: Color: rgba: [0.0039,0.1921,0.2078,1] Line: width: dp(0.5) rounded_rectangle: (self.x, self.y, self.width-dp(20), dp(43),\ dp(8),dp(8),dp(8),dp(8),\ dp(50)) BoxLayout: MDIconButton: #size_hint: .4, .05 pos_hint: {"center_x": .3, "top": .975} theme_text_color: "Custom" text_color: [0,0,0,.8] icon: 'calendar' markup: True TextInput: id: email_jbsidis8 height: dp(40) pos_hint: {"center_x": .4, "top": .975} hint_text_color: [0,0,0, 1] foreground_color: [0,0,0,.4] hint_text: "Birthday" background_color: [1,1,1,0] background_image: "" background_normal: "" background_active: "" multiline: False size_hint: .5 ,None #.06 canvas.after: Color: rgba: [0.0039,0.1921,0.2078,1] Line: width: dp(0.5) rounded_rectangle: (self.x, self.y, self.width-dp(20), dp(43),\ dp(8),dp(8),dp(8),dp(8),\ dp(50)) BoxLayout: MDIconButton: #size_hint: .4, .05 pos_hint: {"center_x": .3, "top": .975} theme_text_color: "Custom" text_color: [0,0,0,.8] icon: 'message' markup: True TextInput: id: email_jbsidis7 height: dp(40) pos_hint: {"center_x": .4, "top": .975} hint_text_color: [0,0,0, 1] foreground_color: [0,0,0,.4] hint_text: "Email" background_color: [1,1,1,0] background_image: "" background_normal: "" background_active: "" multiline: False size_hint: .5 ,None #.06 canvas.after: Color: rgba: [0.0039,0.1921,0.2078,1] Line: width: dp(0.5) rounded_rectangle: (self.x, self.y, self.width-dp(20), dp(43),\ dp(8),dp(8),dp(8),dp(8),\ dp(50)) BoxLayout: MDIconButton: #size_hint: .4, .05 pos_hint: {"center_x": .3, "top": .975} theme_text_color: "Custom" text_color: [0,0,0,.8] icon: 'calendar' markup: True TextInput: id: email_jbsidis8 height: dp(40) pos_hint: {"center_x": .4, "top": .975} hint_text_color: [0,0,0, 1] foreground_color: [0,0,0,.4] hint_text: "Birthday" background_color: [1,1,1,0] background_image: "" background_normal: "" background_active: "" multiline: False size_hint: .5 ,None #.06 canvas.after: Color: rgba: [0.0039,0.1921,0.2078,1] Line: width: dp(0.5) rounded_rectangle: (self.x, self.y, self.width-dp(20), dp(43),\ dp(8),dp(8),dp(8),dp(8),\ dp(50)) BoxLayout: MDIconButton: #size_hint: .4, .05 pos_hint: {"center_x": .3, "top": .975} theme_text_color: "Custom" text_color: [0,0,0,.8] icon: 'message' markup: True TextInput: id: email_jbsidis7 height: dp(40) pos_hint: {"center_x": .4, "top": .975} hint_text_color: [0,0,0, 1] foreground_color: [0,0,0,.4] hint_text: "Email" background_color: [1,1,1,0] background_image: "" background_normal: "" background_active: "" multiline: False size_hint: .5 ,None #.06 canvas.after: Color: rgba: [0.0039,0.1921,0.2078,1] Line: width: dp(0.5) rounded_rectangle: (self.x, self.y, self.width-dp(20), dp(43),\ dp(8),dp(8),dp(8),dp(8),\ dp(50)) BoxLayout: MDIconButton: #size_hint: .4, .05 pos_hint: {"center_x": .3, "top": .975} theme_text_color: "Custom" text_color: [0,0,0,.8] icon: 'calendar' markup: True TextInput: id: email_jbsidis8 height: dp(40) pos_hint: {"center_x": .4, "top": .975} hint_text_color: [0,0,0, 1] foreground_color: [0,0,0,.4] hint_text: "Birthday" background_color: [1,1,1,0] background_image: "" background_normal: "" background_active: "" multiline: False size_hint: .5 ,None #.06 canvas.after: Color: rgba: [0.0039,0.1921,0.2078,1] Line: width: dp(0.5) rounded_rectangle: (self.x, self.y, self.width-dp(20), dp(43),\ dp(8),dp(8),dp(8),dp(8),\ dp(50)) BoxLayout: MDIconButton: #size_hint: .4, .05 pos_hint: {"center_x": .3, "top": .975} theme_text_color: "Custom" text_color: [0,0,0,.8] icon: 'message' markup: True TextInput: id: email_jbsidis7 height: dp(40) pos_hint: {"center_x": .4, "top": .975} hint_text_color: [0,0,0, 1] foreground_color: [0,0,0,.4] hint_text: "Email" background_color: [1,1,1,0] background_image: "" background_normal: "" background_active: "" multiline: False size_hint: .5 ,None #.06 canvas.after: Color: rgba: [0.0039,0.1921,0.2078,1] Line: width: dp(0.5) rounded_rectangle: (self.x, self.y, self.width-dp(20), dp(43),\ dp(8),dp(8),dp(8),dp(8),\ dp(50)) BoxLayout: MDIconButton: #size_hint: .4, .05 pos_hint: {"center_x": .3, "top": .975} theme_text_color: "Custom" text_color: [0,0,0,.8] icon: 'calendar' markup: True TextInput: id: email_jbsidis8 height: dp(40) pos_hint: {"center_x": .4, "top": .975} hint_text_color: [0,0,0, 1] foreground_color: [0,0,0,.4] hint_text: "Birthday" background_color: [1,1,1,0] background_image: "" background_normal: "" background_active: "" multiline: False size_hint: .5 ,None #.06 canvas.after: Color: rgba: [0.0039,0.1921,0.2078,1] Line: width: dp(0.5) rounded_rectangle: (self.x, self.y, self.width-dp(20), dp(43),\ dp(8),dp(8),dp(8),dp(8),\ dp(50)) BoxLayout: MDIconButton: #size_hint: .4, .05 pos_hint: {"center_x": .3, "top": .975} theme_text_color: "Custom" text_color: [0,0,0,.8] icon: 'message' markup: True TextInput: id: email_jbsidis7 height: dp(40) pos_hint: {"center_x": .4, "top": .975} hint_text_color: [0,0,0, 1] foreground_color: [0,0,0,.4] hint_text: "Email" background_color: [1,1,1,0] background_image: "" background_normal: "" background_active: "" multiline: False size_hint: .5 ,None #.06 canvas.after: Color: rgba: [0.0039,0.1921,0.2078,1] Line: width: dp(0.5) rounded_rectangle: (self.x, self.y, self.width-dp(20), dp(43),\ dp(8),dp(8),dp(8),dp(8),\ dp(50)) BoxLayout: MDIconButton: #size_hint: .4, .05 pos_hint: {"center_x": .3, "top": .975} theme_text_color: "Custom" text_color: [0,0,0,.8] icon: 'calendar' markup: True TextInput: id: email_jbsidis8 height: dp(40) pos_hint: {"center_x": .4, "top": .975} hint_text_color: [0,0,0, 1] foreground_color: [0,0,0,.4] hint_text: "Birthday" background_color: [1,1,1,0] background_image: "" background_normal: "" background_active: "" multiline: False size_hint: .5 ,None #.06 canvas.after: Color: rgba: [0.0039,0.1921,0.2078,1] Line: width: dp(0.5) rounded_rectangle: (self.x, self.y, self.width-dp(20), dp(43),\ dp(8),dp(8),dp(8),dp(8),\ dp(50)) BoxLayout: MDIconButton: #size_hint: .4, .05 pos_hint: {"center_x": .3, "top": .975} theme_text_color: "Custom" text_color: [0,0,0,.8] icon: 'message' markup: True TextInput: id: email_jbsidis7 height: dp(40) pos_hint: {"center_x": .4, "top": .975} hint_text_color: [0,0,0, 1] foreground_color: [0,0,0,.4] hint_text: "Email" background_color: [1,1,1,0] background_image: "" background_normal: "" background_active: "" multiline: False size_hint: .5 ,None #.06 canvas.after: Color: rgba: [0.0039,0.1921,0.2078,1] Line: width: dp(0.5) rounded_rectangle: (self.x, self.y, self.width-dp(20), dp(43),\ dp(8),dp(8),dp(8),dp(8),\ dp(50)) BoxLayout: MDIconButton: #size_hint: .4, .05 pos_hint: {"center_x": .3, "top": .975} theme_text_color: "Custom" text_color: [0,0,0,.8] icon: 'calendar' markup: True TextInput: id: email_jbsidis8 height: dp(40) pos_hint: {"center_x": .4, "top": .975} hint_text_color: [0,0,0, 1] foreground_color: [0,0,0,.4] hint_text: "Birthday" background_color: [1,1,1,0] background_image: "" background_normal: "" background_active: "" multiline: False size_hint: .5 ,None #.06 canvas.after: Color: rgba: [0.0039,0.1921,0.2078,1] Line: width: dp(0.5) rounded_rectangle: (self.x, self.y, self.width-dp(20), dp(43),\ dp(8),dp(8),dp(8),dp(8),\ dp(50)) BoxLayout: spacing: dp(30) FloatLayout: MDRectangleFlatButton: pos_hint: {'center_x':.3, 'center_y':.5} text:'Choose' font_size:14 halign: "center" on_release: print(root.ids.email_jbsidis1.text,root.ids.email_jbsidis2.text,root.ids.email_jbsidis3.text,root.ids.email_jbsidis4.text,root.ids.email_jbsidis5.text,root.ids.email_jbsidis6.text,root.ids.email_jbsidis7.text,root.ids.email_jbsidis8.text) MDRectangleFlatButton: pos_hint: {'center_x':.7, 'center_y':.5} text:'Cancel' font_size:14 halign: "center" on_release: root.ids.email_jbsidis1.text="" root.ids.email_jbsidis2.text="" root.ids.email_jbsidis3.text="" root.ids.email_jbsidis4.text="" root.ids.email_jbsidis5.text="" root.ids.email_jbsidis6.text="" root.ids.email_jbsidis7.text="" root.ids.email_jbsidis8.text="" FloatLayout: MDIconButton: pos_hint: {"center_x": .8, "center_y": 0.945} icon: "magnify" theme_text_color: "Custom" text_color: [1,1,1,1] ## def on_start(self): ## ## self.root.ids.Test_ss.text = " Dhur bal\nOi salar putera\nMod khaba naki " | 2.247373 | 2 |
pythonteste/desafio71.py | dangiotto/Python | 1 | 6630760 | print('='*30)
print ('{:^30}'.format('BANCO CEV'))
print('='*30)
cedula = 0
resto = 0
while True:
saque =int(input('Qual valor quer sacar ? R$'))
if saque >= 50:
cedula = int(saque/50)
saque -= (cedula*50)
print(f'{cedula} cédulas de R$ 50.')
if saque >= 20:
cedula = int(saque/20)
saque -= (cedula * 20)
print(f'{cedula} cédulas de R$ 20.')
if saque >= 10:
cedula = int(saque/10)
saque -= (cedula * 10)
print(f'{cedula} cédulas de R$ 10.')
if saque >= 1:
cedula = int(saque/1)
print(f'{cedula} cédulas de R$ 1.')
print('='*30,'\nVOLTE SEMPRE AO BANCO CEV\n','='*30)
| print('='*30)
print ('{:^30}'.format('BANCO CEV'))
print('='*30)
cedula = 0
resto = 0
while True:
saque =int(input('Qual valor quer sacar ? R$'))
if saque >= 50:
cedula = int(saque/50)
saque -= (cedula*50)
print(f'{cedula} cédulas de R$ 50.')
if saque >= 20:
cedula = int(saque/20)
saque -= (cedula * 20)
print(f'{cedula} cédulas de R$ 20.')
if saque >= 10:
cedula = int(saque/10)
saque -= (cedula * 10)
print(f'{cedula} cédulas de R$ 10.')
if saque >= 1:
cedula = int(saque/1)
print(f'{cedula} cédulas de R$ 1.')
print('='*30,'\nVOLTE SEMPRE AO BANCO CEV\n','='*30)
| none | 1 | 3.703275 | 4 |
|
var/spack/repos/builtin/packages/py-nc-time-axis/package.py | xiki-tempula/spack | 9 | 6630761 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyNcTimeAxis(PythonPackage):
"""cftime support for matplotlib axis."""
homepage = "https://github.com/scitools/nc-time-axis"
url = "https://pypi.io/packages/source/n/nc-time-axis/nc-time-axis-1.1.0.tar.gz"
version('1.1.0', sha256='ea9d4f7f9e9189c96f7d320235ac6c4be7f63dc5aa256b3ee5d5cca5845e6e26')
depends_on('py-cftime', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-six', type=('build', 'run'))
| # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyNcTimeAxis(PythonPackage):
"""cftime support for matplotlib axis."""
homepage = "https://github.com/scitools/nc-time-axis"
url = "https://pypi.io/packages/source/n/nc-time-axis/nc-time-axis-1.1.0.tar.gz"
version('1.1.0', sha256='ea9d4f7f9e9189c96f7d320235ac6c4be7f63dc5aa256b3ee5d5cca5845e6e26')
depends_on('py-cftime', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-six', type=('build', 'run'))
| en | 0.650545 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) cftime support for matplotlib axis. | 1.482361 | 1 |
tests/orm/test_authinfos.py | azadoks/aiida-core | 180 | 6630762 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Unit tests for the AuthInfo ORM class."""
from aiida.backends.testbase import AiidaTestCase
from aiida.common import exceptions
from aiida.orm import authinfos
class TestAuthinfo(AiidaTestCase):
"""Unit tests for the AuthInfo ORM class."""
def setUp(self):
super().setUp()
for auth_info in authinfos.AuthInfo.objects.all():
authinfos.AuthInfo.objects.delete(auth_info.pk)
self.auth_info = self.computer.configure() # pylint: disable=no-member
def test_set_auth_params(self):
"""Test the auth_params setter."""
auth_params = {'safe_interval': 100}
self.auth_info.set_auth_params(auth_params)
self.assertEqual(self.auth_info.get_auth_params(), auth_params)
def test_delete(self):
"""Test deleting a single AuthInfo."""
pk = self.auth_info.pk
self.assertEqual(len(authinfos.AuthInfo.objects.all()), 1)
authinfos.AuthInfo.objects.delete(pk)
self.assertEqual(len(authinfos.AuthInfo.objects.all()), 0)
with self.assertRaises(exceptions.NotExistent):
authinfos.AuthInfo.objects.delete(pk)
| # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Unit tests for the AuthInfo ORM class."""
from aiida.backends.testbase import AiidaTestCase
from aiida.common import exceptions
from aiida.orm import authinfos
class TestAuthinfo(AiidaTestCase):
"""Unit tests for the AuthInfo ORM class."""
def setUp(self):
super().setUp()
for auth_info in authinfos.AuthInfo.objects.all():
authinfos.AuthInfo.objects.delete(auth_info.pk)
self.auth_info = self.computer.configure() # pylint: disable=no-member
def test_set_auth_params(self):
"""Test the auth_params setter."""
auth_params = {'safe_interval': 100}
self.auth_info.set_auth_params(auth_params)
self.assertEqual(self.auth_info.get_auth_params(), auth_params)
def test_delete(self):
"""Test deleting a single AuthInfo."""
pk = self.auth_info.pk
self.assertEqual(len(authinfos.AuthInfo.objects.all()), 1)
authinfos.AuthInfo.objects.delete(pk)
self.assertEqual(len(authinfos.AuthInfo.objects.all()), 0)
with self.assertRaises(exceptions.NotExistent):
authinfos.AuthInfo.objects.delete(pk) | en | 0.53866 | # -*- coding: utf-8 -*- ########################################################################### # Copyright (c), The AiiDA team. All rights reserved. # # This file is part of the AiiDA code. # # # # The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### Unit tests for the AuthInfo ORM class. Unit tests for the AuthInfo ORM class. # pylint: disable=no-member Test the auth_params setter. Test deleting a single AuthInfo. | 2.303665 | 2 |
examples/src/dbnd_examples/tutorial_syntax/T31_same_tasks_in_pipeline.py | ipattarapong/dbnd | 0 | 6630763 | import logging
from dbnd import config, new_dbnd_context, pipeline, task
logger = logging.getLogger(__name__)
@task
def operation_A(x_input="x"):
logger.info("Running %s -> operation_x", x_input)
if x_input == "ha":
raise Exception()
return "{} -> operation_x".format(x_input)
@pipeline
def pipe_A_operations(pipe_argument="pipe"):
z = operation_A(pipe_argument)
x = operation_A(z)
y = operation_A(x, task_name="zzz")
# this operation is not wired to any outputs or return values
# but we need it to run, so it will be "related" to pipe_operations automatically
operation_A("standalone")
# you will see y outputs as pipe_argument output in UI
return y
if __name__ == "__main__":
with config({"zzz": {"x_input": "ha2"}}):
operations_task = pipe_A_operations.task(task_version="now")
operations_task.dbnd_run()
| import logging
from dbnd import config, new_dbnd_context, pipeline, task
logger = logging.getLogger(__name__)
@task
def operation_A(x_input="x"):
logger.info("Running %s -> operation_x", x_input)
if x_input == "ha":
raise Exception()
return "{} -> operation_x".format(x_input)
@pipeline
def pipe_A_operations(pipe_argument="pipe"):
z = operation_A(pipe_argument)
x = operation_A(z)
y = operation_A(x, task_name="zzz")
# this operation is not wired to any outputs or return values
# but we need it to run, so it will be "related" to pipe_operations automatically
operation_A("standalone")
# you will see y outputs as pipe_argument output in UI
return y
if __name__ == "__main__":
with config({"zzz": {"x_input": "ha2"}}):
operations_task = pipe_A_operations.task(task_version="now")
operations_task.dbnd_run()
| en | 0.806128 | # this operation is not wired to any outputs or return values # but we need it to run, so it will be "related" to pipe_operations automatically # you will see y outputs as pipe_argument output in UI | 2.532282 | 3 |
deepchem/splits/tests/test_splitter.py | weiwang2330/deepchem | 1 | 6630764 | <reponame>weiwang2330/deepchem
"""
Tests for splitter objects.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from rdkit.Chem.Fingerprints import FingerprintMols
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import tempfile
import unittest
import numpy as np
import deepchem as dc
from deepchem.data import NumpyDataset
from deepchem.splits import IndexSplitter
from rdkit import Chem, DataStructs
class TestSplitters(unittest.TestCase):
"""
Test some basic splitters.
"""
def test_random_group_split(self):
solubility_dataset = dc.data.tests.load_solubility_data()
groups = [0, 4, 1, 2, 3, 7, 0, 3, 1, 0]
# 0 1 2 3 4 5 6 7 8 9
group_splitter = dc.splits.RandomGroupSplitter(groups)
train_idxs, valid_idxs, test_idxs = group_splitter.split(
solubility_dataset, frac_train=0.5, frac_valid=0.25, frac_test=0.25)
class_ind = [-1] * 10
all_idxs = []
for s in train_idxs + valid_idxs + test_idxs:
all_idxs.append(s)
assert sorted(all_idxs) == list(range(10))
for split_idx, split in enumerate([train_idxs, valid_idxs, test_idxs]):
for s in split:
if class_ind[s] == -1:
class_ind[s] = split_idx
else:
assert class_ind[s] == split_idx
def test_singletask_random_split(self):
"""
Test singletask RandomSplitter class.
"""
solubility_dataset = dc.data.tests.load_solubility_data()
random_splitter = dc.splits.RandomSplitter()
train_data, valid_data, test_data = \
random_splitter.train_valid_test_split(
solubility_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
merged_dataset = dc.data.DiskDataset.merge(
[train_data, valid_data, test_data])
assert sorted(merged_dataset.ids) == (sorted(solubility_dataset.ids))
def test_singletask_index_split(self):
"""
Test singletask IndexSplitter class.
"""
solubility_dataset = dc.data.tests.load_solubility_data()
random_splitter = dc.splits.IndexSplitter()
train_data, valid_data, test_data = \
random_splitter.train_valid_test_split(
solubility_dataset)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
merged_dataset = dc.data.DiskDataset.merge(
[train_data, valid_data, test_data])
assert sorted(merged_dataset.ids) == (sorted(solubility_dataset.ids))
# TODO(rbharath): The IndexSplitter() had a bug with splitting sharded
# data. Make a test for properly splitting of sharded data. Perhaps using
# reshard() to handle this?
def test_singletask_scaffold_split(self):
"""
Test singletask ScaffoldSplitter class.
"""
solubility_dataset = dc.data.tests.load_solubility_data()
scaffold_splitter = dc.splits.ScaffoldSplitter()
train_data, valid_data, test_data = \
scaffold_splitter.train_valid_test_split(
solubility_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
def test_singletask_fingerprint_split(self):
"""
Test singletask Fingerprint class.
"""
solubility_dataset = dc.data.tests.load_solubility_data()
assert (len(solubility_dataset.X) == 10)
scaffold_splitter = dc.splits.FingerprintSplitter()
train_data, valid_data, test_data = \
scaffold_splitter.train_valid_test_split(
solubility_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
s1 = set(train_data.ids)
assert valid_data.ids[0] not in s1
assert test_data.ids[0] not in s1
def test_singletask_stratified_split(self):
"""
Test singletask SingletaskStratifiedSplitter class.
"""
solubility_dataset = dc.data.tests.load_solubility_data()
stratified_splitter = dc.splits.ScaffoldSplitter()
train_data, valid_data, test_data = \
stratified_splitter.train_valid_test_split(
solubility_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
merged_dataset = dc.data.DiskDataset.merge(
[train_data, valid_data, test_data])
assert sorted(merged_dataset.ids) == (sorted(solubility_dataset.ids))
def test_singletask_butina_split(self):
"""
Test singletask ScaffoldSplitter class.
"""
solubility_dataset = dc.data.tests.load_butina_data()
scaffold_splitter = dc.splits.ButinaSplitter()
train_data, valid_data, test_data = \
scaffold_splitter.train_valid_test_split(
solubility_dataset)
print(len(train_data), len(valid_data))
assert len(train_data) == 7
assert len(valid_data) == 3
assert len(test_data) == 0
def test_k_fold_splitter(self):
"""
Test that we can 5 fold index wise over 5 points
"""
ds = NumpyDataset(np.array(range(5)), np.array(range(5)))
index_splitter = IndexSplitter()
K = 5
fold_datasets = index_splitter.k_fold_split(ds, K)
for fold in range(K):
train, cv = fold_datasets[fold][0], fold_datasets[fold][1]
self.assertTrue(cv.X[0] == fold)
train_data = set(list(train.X))
self.assertFalse(fold in train_data)
self.assertEqual(K - 1, len(train))
self.assertEqual(1, len(cv))
def test_singletask_random_k_fold_split(self):
"""
Test singletask RandomSplitter class.
"""
solubility_dataset = dc.data.tests.load_solubility_data()
random_splitter = dc.splits.RandomSplitter()
ids_set = set(solubility_dataset.ids)
K = 5
fold_datasets = random_splitter.k_fold_split(solubility_dataset, K)
for fold in range(K):
fold_dataset = fold_datasets[fold][1]
# Verify lengths is 10/k == 2
assert len(fold_dataset) == 2
# Verify that compounds in this fold are subset of original compounds
fold_ids_set = set(fold_dataset.ids)
assert fold_ids_set.issubset(ids_set)
# Verify that no two folds have overlapping compounds.
for other_fold in range(K):
if fold == other_fold:
continue
other_fold_dataset = fold_datasets[other_fold][1]
other_fold_ids_set = set(other_fold_dataset.ids)
assert fold_ids_set.isdisjoint(other_fold_ids_set)
def test_singletask_index_k_fold_split(self):
"""
Test singletask IndexSplitter class.
"""
solubility_dataset = dc.data.tests.load_solubility_data()
index_splitter = dc.splits.IndexSplitter()
ids_set = set(solubility_dataset.ids)
K = 5
fold_datasets = index_splitter.k_fold_split(solubility_dataset, K)
for fold in range(K):
fold_dataset = fold_datasets[fold][1]
# Verify lengths is 10/k == 2
assert len(fold_dataset) == 2
# Verify that compounds in this fold are subset of original compounds
fold_ids_set = set(fold_dataset.ids)
assert fold_ids_set.issubset(ids_set)
# Verify that no two folds have overlapping compounds.
for other_fold in range(K):
if fold == other_fold:
continue
other_fold_dataset = fold_datasets[other_fold][1]
other_fold_ids_set = set(other_fold_dataset.ids)
assert fold_ids_set.isdisjoint(other_fold_ids_set)
merged_dataset = dc.data.DiskDataset.merge([x[1] for x in fold_datasets])
assert len(merged_dataset) == len(solubility_dataset)
assert sorted(merged_dataset.ids) == (sorted(solubility_dataset.ids))
def test_singletask_scaffold_k_fold_split(self):
"""
Test singletask ScaffoldSplitter class.
"""
solubility_dataset = dc.data.tests.load_solubility_data()
scaffold_splitter = dc.splits.ScaffoldSplitter()
ids_set = set(solubility_dataset.ids)
K = 5
fold_datasets = scaffold_splitter.k_fold_split(solubility_dataset, K)
for fold in range(K):
fold_dataset = fold_datasets[fold][1]
# Verify lengths is 10/k == 2
assert len(fold_dataset) == 2
# Verify that compounds in this fold are subset of original compounds
fold_ids_set = set(fold_dataset.ids)
assert fold_ids_set.issubset(ids_set)
# Verify that no two folds have overlapping compounds.
for other_fold in range(K):
if fold == other_fold:
continue
other_fold_dataset = fold_datasets[other_fold][1]
other_fold_ids_set = set(other_fold_dataset.ids)
assert fold_ids_set.isdisjoint(other_fold_ids_set)
merged_dataset = dc.data.DiskDataset.merge([x[1] for x in fold_datasets])
assert len(merged_dataset) == len(solubility_dataset)
assert sorted(merged_dataset.ids) == (sorted(solubility_dataset.ids))
def test_singletask_stratified_column_indices(self):
"""
Test RandomStratifiedSplitter's split method on simple singletas.
"""
# Test singletask case.
n_samples = 100
n_positives = 20
n_features = 10
n_tasks = 1
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
y[:n_positives] = 1
w = np.ones((n_samples, n_tasks))
ids = np.arange(n_samples)
stratified_splitter = dc.splits.RandomStratifiedSplitter()
column_indices = stratified_splitter.get_task_split_indices(
y, w, frac_split=.5)
split_index = column_indices[0]
# The split index should partition dataset in half.
assert np.count_nonzero(y[:split_index]) == 10
def test_singletask_stratified_column_indices_mask(self):
"""
Test RandomStratifiedSplitter's split method on dataset with mask.
"""
# Test singletask case.
n_samples = 100
n_positives = 20
n_features = 10
n_tasks = 1
# Test case where some weights are zero (i.e. masked)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
y[:n_positives] = 1
w = np.ones((n_samples, n_tasks))
# Set half the positives to have zero weight
w[:n_positives // 2] = 0
ids = np.arange(n_samples)
stratified_splitter = dc.splits.RandomStratifiedSplitter()
column_indices = stratified_splitter.get_task_split_indices(
y, w, frac_split=.5)
split_index = column_indices[0]
# There are 10 nonzero actives.
# The split index should partition this into half, so expect 5
w_present = (w != 0)
y_present = y * w_present
assert np.count_nonzero(y_present[:split_index]) == 5
def test_multitask_stratified_column_indices(self):
"""
Test RandomStratifiedSplitter split on multitask dataset.
"""
n_samples = 100
n_features = 10
n_tasks = 10
X = np.random.rand(n_samples, n_features)
p = .05 # proportion actives
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
stratified_splitter = dc.splits.RandomStratifiedSplitter()
split_indices = stratified_splitter.get_task_split_indices(
y, w, frac_split=.5)
for task in range(n_tasks):
split_index = split_indices[task]
task_actives = np.count_nonzero(y[:, task])
# The split index should partition dataset in half.
assert np.count_nonzero(y[:split_index, task]) == int(task_actives / 2)
def test_multitask_stratified_column_indices_masked(self):
"""
Test RandomStratifiedSplitter split on multitask dataset.
"""
n_samples = 200
n_features = 10
n_tasks = 10
X = np.random.rand(n_samples, n_features)
p = .05 # proportion actives
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
# Mask half the examples
w[:n_samples // 2] = 0
stratified_splitter = dc.splits.RandomStratifiedSplitter()
split_indices = stratified_splitter.get_task_split_indices(
y, w, frac_split=.5)
w_present = (w != 0)
y_present = y * w_present
for task in range(n_tasks):
split_index = split_indices[task]
task_actives = np.count_nonzero(y_present[:, task])
# The split index should partition dataset in half.
assert np.count_nonzero(y_present[:split_index, task]) == int(
task_actives / 2)
def test_singletask_stratified_split(self):
"""
Test RandomStratifiedSplitter on a singletask split.
"""
np.random.seed(2314)
# Test singletask case.
n_samples = 20
n_positives = 10
n_features = 10
n_tasks = 1
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
y[:n_positives] = 1
w = np.ones((n_samples, n_tasks))
ids = np.arange(n_samples)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
stratified_splitter = dc.splits.RandomStratifiedSplitter()
dataset_1, dataset_2 = stratified_splitter.split(dataset, frac_split=.5)
# Should have split cleanly in half (picked random seed to ensure this)
assert len(dataset_1) == 10
assert len(dataset_2) == 10
# Check positives are correctly distributed
y_1 = dataset_1.y
assert np.count_nonzero(y_1) == n_positives / 2
y_2 = dataset_2.y
assert np.count_nonzero(y_2) == n_positives / 2
def test_singletask_stratified_k_fold_split(self):
"""
Test RandomStratifiedSplitter k-fold class.
"""
n_samples = 100
n_positives = 20
n_features = 10
n_tasks = 1
X = np.random.rand(n_samples, n_features)
y = np.zeros(n_samples)
y[:n_positives] = 1
w = np.ones(n_samples)
ids = np.arange(n_samples)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
stratified_splitter = dc.splits.RandomStratifiedSplitter()
ids_set = set(dataset.ids)
K = 5
fold_datasets = stratified_splitter.k_fold_split(dataset, K)
for fold in range(K):
fold_dataset = fold_datasets[fold]
# Verify lengths is 100/k == 20
# Note: This wouldn't work for multitask str
# assert len(fold_dataset) == n_samples/K
fold_labels = fold_dataset.y
# Verify that each fold has n_positives/K = 4 positive examples.
assert np.count_nonzero(fold_labels == 1) == n_positives / K
# Verify that compounds in this fold are subset of original compounds
fold_ids_set = set(fold_dataset.ids)
assert fold_ids_set.issubset(ids_set)
# Verify that no two folds have overlapping compounds.
for other_fold in range(K):
if fold == other_fold:
continue
other_fold_dataset = fold_datasets[other_fold]
other_fold_ids_set = set(other_fold_dataset.ids)
assert fold_ids_set.isdisjoint(other_fold_ids_set)
merged_dataset = dc.data.DiskDataset.merge(fold_datasets)
assert len(merged_dataset) == len(dataset)
assert sorted(merged_dataset.ids) == (sorted(dataset.ids))
def test_multitask_random_split(self):
"""
Test multitask RandomSplitter class.
"""
multitask_dataset = dc.data.tests.load_multitask_data()
random_splitter = dc.splits.RandomSplitter()
train_data, valid_data, test_data = \
random_splitter.train_valid_test_split(
multitask_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
def test_multitask_index_split(self):
"""
Test multitask IndexSplitter class.
"""
multitask_dataset = dc.data.tests.load_multitask_data()
index_splitter = dc.splits.IndexSplitter()
train_data, valid_data, test_data = \
index_splitter.train_valid_test_split(
multitask_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
def test_multitask_scaffold_split(self):
"""
Test multitask ScaffoldSplitter class.
"""
multitask_dataset = dc.data.tests.load_multitask_data()
scaffold_splitter = dc.splits.ScaffoldSplitter()
train_data, valid_data, test_data = \
scaffold_splitter.train_valid_test_split(
multitask_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
def test_stratified_multitask_split(self):
"""
Test multitask RandomStratifiedSplitter class
"""
# sparsity is determined by number of w weights that are 0 for a given
# task structure of w np array is such that each row corresponds to a
# sample. The loaded sparse dataset has many rows with only zeros
sparse_dataset = dc.data.tests.load_sparse_multitask_dataset()
stratified_splitter = dc.splits.RandomStratifiedSplitter()
datasets = stratified_splitter.train_valid_test_split(
sparse_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
train_data, valid_data, test_data = datasets
for dataset_index, dataset in enumerate(datasets):
w = dataset.w
# verify that there are no rows (samples) in weights matrix w
# that have no hits.
assert len(np.where(~w.any(axis=1))[0]) == 0
if __name__ == "__main__":
import nose
nose.run(defaultTest=__name__)
| """
Tests for splitter objects.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from rdkit.Chem.Fingerprints import FingerprintMols
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import tempfile
import unittest
import numpy as np
import deepchem as dc
from deepchem.data import NumpyDataset
from deepchem.splits import IndexSplitter
from rdkit import Chem, DataStructs
class TestSplitters(unittest.TestCase):
"""
Test some basic splitters.
"""
def test_random_group_split(self):
solubility_dataset = dc.data.tests.load_solubility_data()
groups = [0, 4, 1, 2, 3, 7, 0, 3, 1, 0]
# 0 1 2 3 4 5 6 7 8 9
group_splitter = dc.splits.RandomGroupSplitter(groups)
train_idxs, valid_idxs, test_idxs = group_splitter.split(
solubility_dataset, frac_train=0.5, frac_valid=0.25, frac_test=0.25)
class_ind = [-1] * 10
all_idxs = []
for s in train_idxs + valid_idxs + test_idxs:
all_idxs.append(s)
assert sorted(all_idxs) == list(range(10))
for split_idx, split in enumerate([train_idxs, valid_idxs, test_idxs]):
for s in split:
if class_ind[s] == -1:
class_ind[s] = split_idx
else:
assert class_ind[s] == split_idx
def test_singletask_random_split(self):
"""
Test singletask RandomSplitter class.
"""
solubility_dataset = dc.data.tests.load_solubility_data()
random_splitter = dc.splits.RandomSplitter()
train_data, valid_data, test_data = \
random_splitter.train_valid_test_split(
solubility_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
merged_dataset = dc.data.DiskDataset.merge(
[train_data, valid_data, test_data])
assert sorted(merged_dataset.ids) == (sorted(solubility_dataset.ids))
def test_singletask_index_split(self):
"""
Test singletask IndexSplitter class.
"""
solubility_dataset = dc.data.tests.load_solubility_data()
random_splitter = dc.splits.IndexSplitter()
train_data, valid_data, test_data = \
random_splitter.train_valid_test_split(
solubility_dataset)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
merged_dataset = dc.data.DiskDataset.merge(
[train_data, valid_data, test_data])
assert sorted(merged_dataset.ids) == (sorted(solubility_dataset.ids))
# TODO(rbharath): The IndexSplitter() had a bug with splitting sharded
# data. Make a test for properly splitting of sharded data. Perhaps using
# reshard() to handle this?
def test_singletask_scaffold_split(self):
"""
Test singletask ScaffoldSplitter class.
"""
solubility_dataset = dc.data.tests.load_solubility_data()
scaffold_splitter = dc.splits.ScaffoldSplitter()
train_data, valid_data, test_data = \
scaffold_splitter.train_valid_test_split(
solubility_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
def test_singletask_fingerprint_split(self):
"""
Test singletask Fingerprint class.
"""
solubility_dataset = dc.data.tests.load_solubility_data()
assert (len(solubility_dataset.X) == 10)
scaffold_splitter = dc.splits.FingerprintSplitter()
train_data, valid_data, test_data = \
scaffold_splitter.train_valid_test_split(
solubility_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
s1 = set(train_data.ids)
assert valid_data.ids[0] not in s1
assert test_data.ids[0] not in s1
def test_singletask_stratified_split(self):
"""
Test singletask SingletaskStratifiedSplitter class.
"""
solubility_dataset = dc.data.tests.load_solubility_data()
stratified_splitter = dc.splits.ScaffoldSplitter()
train_data, valid_data, test_data = \
stratified_splitter.train_valid_test_split(
solubility_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
merged_dataset = dc.data.DiskDataset.merge(
[train_data, valid_data, test_data])
assert sorted(merged_dataset.ids) == (sorted(solubility_dataset.ids))
def test_singletask_butina_split(self):
"""
Test singletask ScaffoldSplitter class.
"""
solubility_dataset = dc.data.tests.load_butina_data()
scaffold_splitter = dc.splits.ButinaSplitter()
train_data, valid_data, test_data = \
scaffold_splitter.train_valid_test_split(
solubility_dataset)
print(len(train_data), len(valid_data))
assert len(train_data) == 7
assert len(valid_data) == 3
assert len(test_data) == 0
def test_k_fold_splitter(self):
"""
Test that we can 5 fold index wise over 5 points
"""
ds = NumpyDataset(np.array(range(5)), np.array(range(5)))
index_splitter = IndexSplitter()
K = 5
fold_datasets = index_splitter.k_fold_split(ds, K)
for fold in range(K):
train, cv = fold_datasets[fold][0], fold_datasets[fold][1]
self.assertTrue(cv.X[0] == fold)
train_data = set(list(train.X))
self.assertFalse(fold in train_data)
self.assertEqual(K - 1, len(train))
self.assertEqual(1, len(cv))
def test_singletask_random_k_fold_split(self):
"""
Test singletask RandomSplitter class.
"""
solubility_dataset = dc.data.tests.load_solubility_data()
random_splitter = dc.splits.RandomSplitter()
ids_set = set(solubility_dataset.ids)
K = 5
fold_datasets = random_splitter.k_fold_split(solubility_dataset, K)
for fold in range(K):
fold_dataset = fold_datasets[fold][1]
# Verify lengths is 10/k == 2
assert len(fold_dataset) == 2
# Verify that compounds in this fold are subset of original compounds
fold_ids_set = set(fold_dataset.ids)
assert fold_ids_set.issubset(ids_set)
# Verify that no two folds have overlapping compounds.
for other_fold in range(K):
if fold == other_fold:
continue
other_fold_dataset = fold_datasets[other_fold][1]
other_fold_ids_set = set(other_fold_dataset.ids)
assert fold_ids_set.isdisjoint(other_fold_ids_set)
def test_singletask_index_k_fold_split(self):
"""
Test singletask IndexSplitter class.
"""
solubility_dataset = dc.data.tests.load_solubility_data()
index_splitter = dc.splits.IndexSplitter()
ids_set = set(solubility_dataset.ids)
K = 5
fold_datasets = index_splitter.k_fold_split(solubility_dataset, K)
for fold in range(K):
fold_dataset = fold_datasets[fold][1]
# Verify lengths is 10/k == 2
assert len(fold_dataset) == 2
# Verify that compounds in this fold are subset of original compounds
fold_ids_set = set(fold_dataset.ids)
assert fold_ids_set.issubset(ids_set)
# Verify that no two folds have overlapping compounds.
for other_fold in range(K):
if fold == other_fold:
continue
other_fold_dataset = fold_datasets[other_fold][1]
other_fold_ids_set = set(other_fold_dataset.ids)
assert fold_ids_set.isdisjoint(other_fold_ids_set)
merged_dataset = dc.data.DiskDataset.merge([x[1] for x in fold_datasets])
assert len(merged_dataset) == len(solubility_dataset)
assert sorted(merged_dataset.ids) == (sorted(solubility_dataset.ids))
def test_singletask_scaffold_k_fold_split(self):
"""
Test singletask ScaffoldSplitter class.
"""
solubility_dataset = dc.data.tests.load_solubility_data()
scaffold_splitter = dc.splits.ScaffoldSplitter()
ids_set = set(solubility_dataset.ids)
K = 5
fold_datasets = scaffold_splitter.k_fold_split(solubility_dataset, K)
for fold in range(K):
fold_dataset = fold_datasets[fold][1]
# Verify lengths is 10/k == 2
assert len(fold_dataset) == 2
# Verify that compounds in this fold are subset of original compounds
fold_ids_set = set(fold_dataset.ids)
assert fold_ids_set.issubset(ids_set)
# Verify that no two folds have overlapping compounds.
for other_fold in range(K):
if fold == other_fold:
continue
other_fold_dataset = fold_datasets[other_fold][1]
other_fold_ids_set = set(other_fold_dataset.ids)
assert fold_ids_set.isdisjoint(other_fold_ids_set)
merged_dataset = dc.data.DiskDataset.merge([x[1] for x in fold_datasets])
assert len(merged_dataset) == len(solubility_dataset)
assert sorted(merged_dataset.ids) == (sorted(solubility_dataset.ids))
def test_singletask_stratified_column_indices(self):
"""
Test RandomStratifiedSplitter's split method on simple singletas.
"""
# Test singletask case.
n_samples = 100
n_positives = 20
n_features = 10
n_tasks = 1
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
y[:n_positives] = 1
w = np.ones((n_samples, n_tasks))
ids = np.arange(n_samples)
stratified_splitter = dc.splits.RandomStratifiedSplitter()
column_indices = stratified_splitter.get_task_split_indices(
y, w, frac_split=.5)
split_index = column_indices[0]
# The split index should partition dataset in half.
assert np.count_nonzero(y[:split_index]) == 10
def test_singletask_stratified_column_indices_mask(self):
"""
Test RandomStratifiedSplitter's split method on dataset with mask.
"""
# Test singletask case.
n_samples = 100
n_positives = 20
n_features = 10
n_tasks = 1
# Test case where some weights are zero (i.e. masked)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
y[:n_positives] = 1
w = np.ones((n_samples, n_tasks))
# Set half the positives to have zero weight
w[:n_positives // 2] = 0
ids = np.arange(n_samples)
stratified_splitter = dc.splits.RandomStratifiedSplitter()
column_indices = stratified_splitter.get_task_split_indices(
y, w, frac_split=.5)
split_index = column_indices[0]
# There are 10 nonzero actives.
# The split index should partition this into half, so expect 5
w_present = (w != 0)
y_present = y * w_present
assert np.count_nonzero(y_present[:split_index]) == 5
def test_multitask_stratified_column_indices(self):
"""
Test RandomStratifiedSplitter split on multitask dataset.
"""
n_samples = 100
n_features = 10
n_tasks = 10
X = np.random.rand(n_samples, n_features)
p = .05 # proportion actives
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
stratified_splitter = dc.splits.RandomStratifiedSplitter()
split_indices = stratified_splitter.get_task_split_indices(
y, w, frac_split=.5)
for task in range(n_tasks):
split_index = split_indices[task]
task_actives = np.count_nonzero(y[:, task])
# The split index should partition dataset in half.
assert np.count_nonzero(y[:split_index, task]) == int(task_actives / 2)
def test_multitask_stratified_column_indices_masked(self):
"""
Test RandomStratifiedSplitter split on multitask dataset.
"""
n_samples = 200
n_features = 10
n_tasks = 10
X = np.random.rand(n_samples, n_features)
p = .05 # proportion actives
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
# Mask half the examples
w[:n_samples // 2] = 0
stratified_splitter = dc.splits.RandomStratifiedSplitter()
split_indices = stratified_splitter.get_task_split_indices(
y, w, frac_split=.5)
w_present = (w != 0)
y_present = y * w_present
for task in range(n_tasks):
split_index = split_indices[task]
task_actives = np.count_nonzero(y_present[:, task])
# The split index should partition dataset in half.
assert np.count_nonzero(y_present[:split_index, task]) == int(
task_actives / 2)
def test_singletask_stratified_split(self):
"""
Test RandomStratifiedSplitter on a singletask split.
"""
np.random.seed(2314)
# Test singletask case.
n_samples = 20
n_positives = 10
n_features = 10
n_tasks = 1
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
y[:n_positives] = 1
w = np.ones((n_samples, n_tasks))
ids = np.arange(n_samples)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
stratified_splitter = dc.splits.RandomStratifiedSplitter()
dataset_1, dataset_2 = stratified_splitter.split(dataset, frac_split=.5)
# Should have split cleanly in half (picked random seed to ensure this)
assert len(dataset_1) == 10
assert len(dataset_2) == 10
# Check positives are correctly distributed
y_1 = dataset_1.y
assert np.count_nonzero(y_1) == n_positives / 2
y_2 = dataset_2.y
assert np.count_nonzero(y_2) == n_positives / 2
def test_singletask_stratified_k_fold_split(self):
"""
Test RandomStratifiedSplitter k-fold class.
"""
n_samples = 100
n_positives = 20
n_features = 10
n_tasks = 1
X = np.random.rand(n_samples, n_features)
y = np.zeros(n_samples)
y[:n_positives] = 1
w = np.ones(n_samples)
ids = np.arange(n_samples)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
stratified_splitter = dc.splits.RandomStratifiedSplitter()
ids_set = set(dataset.ids)
K = 5
fold_datasets = stratified_splitter.k_fold_split(dataset, K)
for fold in range(K):
fold_dataset = fold_datasets[fold]
# Verify lengths is 100/k == 20
# Note: This wouldn't work for multitask str
# assert len(fold_dataset) == n_samples/K
fold_labels = fold_dataset.y
# Verify that each fold has n_positives/K = 4 positive examples.
assert np.count_nonzero(fold_labels == 1) == n_positives / K
# Verify that compounds in this fold are subset of original compounds
fold_ids_set = set(fold_dataset.ids)
assert fold_ids_set.issubset(ids_set)
# Verify that no two folds have overlapping compounds.
for other_fold in range(K):
if fold == other_fold:
continue
other_fold_dataset = fold_datasets[other_fold]
other_fold_ids_set = set(other_fold_dataset.ids)
assert fold_ids_set.isdisjoint(other_fold_ids_set)
merged_dataset = dc.data.DiskDataset.merge(fold_datasets)
assert len(merged_dataset) == len(dataset)
assert sorted(merged_dataset.ids) == (sorted(dataset.ids))
def test_multitask_random_split(self):
"""
Test multitask RandomSplitter class.
"""
multitask_dataset = dc.data.tests.load_multitask_data()
random_splitter = dc.splits.RandomSplitter()
train_data, valid_data, test_data = \
random_splitter.train_valid_test_split(
multitask_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
def test_multitask_index_split(self):
"""
Test multitask IndexSplitter class.
"""
multitask_dataset = dc.data.tests.load_multitask_data()
index_splitter = dc.splits.IndexSplitter()
train_data, valid_data, test_data = \
index_splitter.train_valid_test_split(
multitask_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
def test_multitask_scaffold_split(self):
"""
Test multitask ScaffoldSplitter class.
"""
multitask_dataset = dc.data.tests.load_multitask_data()
scaffold_splitter = dc.splits.ScaffoldSplitter()
train_data, valid_data, test_data = \
scaffold_splitter.train_valid_test_split(
multitask_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
assert len(train_data) == 8
assert len(valid_data) == 1
assert len(test_data) == 1
def test_stratified_multitask_split(self):
"""
Test multitask RandomStratifiedSplitter class
"""
# sparsity is determined by number of w weights that are 0 for a given
# task structure of w np array is such that each row corresponds to a
# sample. The loaded sparse dataset has many rows with only zeros
sparse_dataset = dc.data.tests.load_sparse_multitask_dataset()
stratified_splitter = dc.splits.RandomStratifiedSplitter()
datasets = stratified_splitter.train_valid_test_split(
sparse_dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
train_data, valid_data, test_data = datasets
for dataset_index, dataset in enumerate(datasets):
w = dataset.w
# verify that there are no rows (samples) in weights matrix w
# that have no hits.
assert len(np.where(~w.any(axis=1))[0]) == 0
if __name__ == "__main__":
import nose
nose.run(defaultTest=__name__) | en | 0.850426 | Tests for splitter objects. Test some basic splitters. # 0 1 2 3 4 5 6 7 8 9 Test singletask RandomSplitter class. Test singletask IndexSplitter class. # TODO(rbharath): The IndexSplitter() had a bug with splitting sharded # data. Make a test for properly splitting of sharded data. Perhaps using # reshard() to handle this? Test singletask ScaffoldSplitter class. Test singletask Fingerprint class. Test singletask SingletaskStratifiedSplitter class. Test singletask ScaffoldSplitter class. Test that we can 5 fold index wise over 5 points Test singletask RandomSplitter class. # Verify lengths is 10/k == 2 # Verify that compounds in this fold are subset of original compounds # Verify that no two folds have overlapping compounds. Test singletask IndexSplitter class. # Verify lengths is 10/k == 2 # Verify that compounds in this fold are subset of original compounds # Verify that no two folds have overlapping compounds. Test singletask ScaffoldSplitter class. # Verify lengths is 10/k == 2 # Verify that compounds in this fold are subset of original compounds # Verify that no two folds have overlapping compounds. Test RandomStratifiedSplitter's split method on simple singletas. # Test singletask case. # The split index should partition dataset in half. Test RandomStratifiedSplitter's split method on dataset with mask. # Test singletask case. # Test case where some weights are zero (i.e. masked) # Set half the positives to have zero weight # There are 10 nonzero actives. # The split index should partition this into half, so expect 5 Test RandomStratifiedSplitter split on multitask dataset. # proportion actives # The split index should partition dataset in half. Test RandomStratifiedSplitter split on multitask dataset. # proportion actives # Mask half the examples # The split index should partition dataset in half. Test RandomStratifiedSplitter on a singletask split. # Test singletask case. # Should have split cleanly in half (picked random seed to ensure this) # Check positives are correctly distributed Test RandomStratifiedSplitter k-fold class. # Verify lengths is 100/k == 20 # Note: This wouldn't work for multitask str # assert len(fold_dataset) == n_samples/K # Verify that each fold has n_positives/K = 4 positive examples. # Verify that compounds in this fold are subset of original compounds # Verify that no two folds have overlapping compounds. Test multitask RandomSplitter class. Test multitask IndexSplitter class. Test multitask ScaffoldSplitter class. Test multitask RandomStratifiedSplitter class # sparsity is determined by number of w weights that are 0 for a given # task structure of w np array is such that each row corresponds to a # sample. The loaded sparse dataset has many rows with only zeros # verify that there are no rows (samples) in weights matrix w # that have no hits. | 2.600178 | 3 |
ci.py | karl-zh/packages | 0 | 6630765 | # coding=utf-8
import sys
import os
import stat
import time
import datetime
import subprocess
import shlex
import shutil
import json
import requests
def execute_command(cmdstring, cwd=None, shell=True):
"""Execute the system command at the specified address."""
if shell:
cmdstring_list = cmdstring
sub = subprocess.Popen(cmdstring_list, cwd=cwd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, shell=shell, bufsize=4096)
stdout_str = ''
while sub.poll() is None:
stdout_str += sub.stdout.read()
time.sleep(0.1)
return stdout_str
def determine_url_valid(url_from_srv):
"""Check the validity of urls."""
headers = {'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate',
'Accept': '*/*',
'User-Agent': 'curl/7.54.0'}
try:
for i in range(0, 3):
r = requests.get(url_from_srv, stream=True, headers=headers)
if r.status_code == requests.codes.not_found:
if i == 2:
print("Warning : %s is invalid." % url_from_srv)
return False
time.sleep(1)
else:
break
return True
except Exception, e:
# print('e.message:%s\t' % e.message)
print('Network connection error or the url : %s is invalid.\n' %
url_from_srv)
def check_json_file(work_root):
"""Check the json file."""
file_count = 1
folder_walk_result = os.walk(work_root)
for path, d, filelist in folder_walk_result:
for filename in filelist:
if filename == 'package.json':
json_pathname = os.path.join(path, 'package.json')
print("\nNo.%d" % file_count)
file_count += 1
if not json_file_content_check(json_pathname):
return False
return True
def json_file_content_check(json_pathname):
"""Check the content of json file."""
with open(json_pathname, 'r+') as f:
json_content = f.read()
package_info = json.loads(json_content)
print(package_info['name'])
if package_info['category'] == '' :
print ('The category of ' + package_info['name'] + ' package is lost.')
return False
if package_info['enable'] == '' :
print ('The enable of ' + package_info['name'] + ' package is lost.')
return False
if package_info['author']['name'] == '' :
print ('The author name of ' + package_info['name'] + ' package is lost.')
return False
if package_info['author']['email'] == '' :
print ('The author email of ' + package_info['name'] + ' package is lost.')
return False
if package_info['license'] == '' :
print ('The license of ' + package_info['name'] + ' package is lost.')
return False
if package_info['repository'] == '' :
print ('The repository of ' + package_info['name'] + ' package is lost.')
return False
else :
if not determine_url_valid(package_info['repository']):
return False
for i in range(0, len(package_info['site'])):
package_version = package_info['site'][i]['version']
package_url = package_info['site'][i]['URL']
print("%s : %s" % (package_version, package_url))
if not package_url[-4:] == '.git':
print(package_info['site'][i]['filename'])
if not determine_url_valid(package_url):
return False
return True
def main():
"""The entry point of the script."""
try:
work_root = os.getcwd()
print(work_root)
if not check_json_file(work_root):
sys.exit(1)
sys.exit(0)
except Exception, e:
print('error.message: %s\n\t' % (e.message))
sys.exit(1)
if __name__ == '__main__':
main()
| # coding=utf-8
import sys
import os
import stat
import time
import datetime
import subprocess
import shlex
import shutil
import json
import requests
def execute_command(cmdstring, cwd=None, shell=True):
"""Execute the system command at the specified address."""
if shell:
cmdstring_list = cmdstring
sub = subprocess.Popen(cmdstring_list, cwd=cwd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, shell=shell, bufsize=4096)
stdout_str = ''
while sub.poll() is None:
stdout_str += sub.stdout.read()
time.sleep(0.1)
return stdout_str
def determine_url_valid(url_from_srv):
"""Check the validity of urls."""
headers = {'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate',
'Accept': '*/*',
'User-Agent': 'curl/7.54.0'}
try:
for i in range(0, 3):
r = requests.get(url_from_srv, stream=True, headers=headers)
if r.status_code == requests.codes.not_found:
if i == 2:
print("Warning : %s is invalid." % url_from_srv)
return False
time.sleep(1)
else:
break
return True
except Exception, e:
# print('e.message:%s\t' % e.message)
print('Network connection error or the url : %s is invalid.\n' %
url_from_srv)
def check_json_file(work_root):
"""Check the json file."""
file_count = 1
folder_walk_result = os.walk(work_root)
for path, d, filelist in folder_walk_result:
for filename in filelist:
if filename == 'package.json':
json_pathname = os.path.join(path, 'package.json')
print("\nNo.%d" % file_count)
file_count += 1
if not json_file_content_check(json_pathname):
return False
return True
def json_file_content_check(json_pathname):
"""Check the content of json file."""
with open(json_pathname, 'r+') as f:
json_content = f.read()
package_info = json.loads(json_content)
print(package_info['name'])
if package_info['category'] == '' :
print ('The category of ' + package_info['name'] + ' package is lost.')
return False
if package_info['enable'] == '' :
print ('The enable of ' + package_info['name'] + ' package is lost.')
return False
if package_info['author']['name'] == '' :
print ('The author name of ' + package_info['name'] + ' package is lost.')
return False
if package_info['author']['email'] == '' :
print ('The author email of ' + package_info['name'] + ' package is lost.')
return False
if package_info['license'] == '' :
print ('The license of ' + package_info['name'] + ' package is lost.')
return False
if package_info['repository'] == '' :
print ('The repository of ' + package_info['name'] + ' package is lost.')
return False
else :
if not determine_url_valid(package_info['repository']):
return False
for i in range(0, len(package_info['site'])):
package_version = package_info['site'][i]['version']
package_url = package_info['site'][i]['URL']
print("%s : %s" % (package_version, package_url))
if not package_url[-4:] == '.git':
print(package_info['site'][i]['filename'])
if not determine_url_valid(package_url):
return False
return True
def main():
"""The entry point of the script."""
try:
work_root = os.getcwd()
print(work_root)
if not check_json_file(work_root):
sys.exit(1)
sys.exit(0)
except Exception, e:
print('error.message: %s\n\t' % (e.message))
sys.exit(1)
if __name__ == '__main__':
main()
| en | 0.640092 | # coding=utf-8 Execute the system command at the specified address. Check the validity of urls. # print('e.message:%s\t' % e.message) Check the json file. Check the content of json file. The entry point of the script. | 2.813652 | 3 |
ricecooker/utils/downloader.py | richard-dinh/ricecooker | 0 | 6630766 | <filename>ricecooker/utils/downloader.py
import concurrent.futures
import copy
import os
import re
import requests
import time
from urllib.parse import urlparse, urljoin
import uuid
from bs4 import BeautifulSoup
from selenium import webdriver
import selenium.webdriver.support.ui as selenium_ui
from requests_file import FileAdapter
from ricecooker.config import LOGGER, PHANTOMJS_PATH, STRICT
from ricecooker.utils.html import download_file
from ricecooker.utils.caching import CacheForeverHeuristic, FileCache, CacheControlAdapter, InvalidatingCacheControlAdapter
DOWNLOAD_SESSION = requests.Session() # Session for downloading content from urls
DOWNLOAD_SESSION.mount('https://', requests.adapters.HTTPAdapter(max_retries=3))
DOWNLOAD_SESSION.mount('file://', FileAdapter())
# use_dir_lock works with all filesystems and OSes
cache = FileCache('.webcache', use_dir_lock=True)
forever_adapter= CacheControlAdapter(heuristic=CacheForeverHeuristic(), cache=cache)
DOWNLOAD_SESSION.mount('http://', forever_adapter)
DOWNLOAD_SESSION.mount('https://', forever_adapter)
DEFAULT_HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:20.0) Gecko/20100101 Firefox/20.0",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive"
}
USE_PYPPETEER = False
try:
import asyncio
from pyppeteer import launch, errors
async def load_page(path, timeout=30, strict=True):
browser = await launch({'headless': True})
content = None
cookies = None
try:
page = await browser.newPage()
try:
await page.goto(path, {'timeout': timeout * 1000, 'waitUntil': ['load', 'domcontentloaded', 'networkidle0']})
except errors.TimeoutError:
# some sites have API calls running regularly, so the timeout may be that there's never any true
# network idle time. Try 'networkidle2' option instead before determining we can't scrape.
if not strict:
await page.goto(path, {'timeout': timeout * 1000, 'waitUntil': ['load', 'domcontentloaded', 'networkidle2']})
else:
raise
# get the entire rendered page, including the doctype
content = await page.content()
cookies = await page.cookies()
except Exception as e:
LOGGER.warning("Error scraping page: {}".format(e))
finally:
await browser.close()
return content, {'cookies': cookies}
USE_PYPPETEER = True
except:
print("Unable to load pyppeteer, using phantomjs for JS loading.")
pass
def read(path, loadjs=False, session=None, driver=None, timeout=60,
clear_cookies=True, loadjs_wait_time=3, loadjs_wait_for_callback=None, strict=True):
"""Reads from source and returns contents
Args:
path: (str) url or local path to download
loadjs: (boolean) indicates whether to load js (optional)
session: (requests.Session) session to use to download (optional)
driver: (selenium.webdriver) webdriver to use to download (optional)
timeout: (int) Maximum number of seconds to wait for the request to complete.
clear_cookies: (boolean) whether to clear cookies.
loadjs_wait_time: (int) if loading JS, seconds to wait after the
page has loaded before grabbing the page source
loadjs_wait_for_callback: (function<selenium.webdriver>) if loading
JS, a callback that will be invoked to determine when we can
grab the page source. The callback will be called with the
webdriver, and should return True when we're ready to grab the
page source. For example, pass in an argument like:
``lambda driver: driver.find_element_by_id('list-container')``
to wait for the #list-container element to be present before rendering.
strict: (bool) If False, when download fails, retry but allow parsing even if there
is still minimal network traffic happening. Useful for sites that regularly poll APIs.
Returns: str content from file or page
"""
session = session or DOWNLOAD_SESSION
if clear_cookies:
session.cookies.clear()
try:
if loadjs: # Wait until js loads then return contents
if USE_PYPPETEER:
content = asyncio.get_event_loop().run_until_complete(load_page(path))
return content
if PHANTOMJS_PATH:
driver = driver or webdriver.PhantomJS(executable_path=PHANTOMJS_PATH)
else:
driver = driver or webdriver.PhantomJS()
driver.get(path)
if loadjs_wait_for_callback:
selenium_ui.WebDriverWait(driver, 60).until(loadjs_wait_for_callback)
time.sleep(loadjs_wait_time)
return driver.page_source
else: # Read page contents from url
retry_count = 0
max_retries = 5
while True:
try:
response = session.get(path, stream=True, timeout=timeout)
break
except (requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout) as e:
retry_count += 1
print("Error with connection ('{msg}'); about to perform retry {count} of {trymax}."
.format(msg=str(e), count=retry_count, trymax=max_retries))
time.sleep(retry_count * 1)
if retry_count >= max_retries:
raise e
response.raise_for_status()
return response.content
except (requests.exceptions.MissingSchema, requests.exceptions.InvalidSchema):
with open(path, 'rb') as fobj: # If path is a local file path, try to open the file
return fobj.read()
def make_request(url, clear_cookies=False, headers=None, timeout=60, *args, **kwargs):
sess = DOWNLOAD_SESSION
if clear_cookies:
sess.cookies.clear()
retry_count = 0
max_retries = 5
request_headers = DEFAULT_HEADERS
if headers:
request_headers = copy.copy(DEFAULT_HEADERS)
request_headers.update(headers)
while True:
try:
response = sess.get(url, headers=request_headers, timeout=timeout, *args, **kwargs)
break
except (requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout) as e:
retry_count += 1
print("Error with connection ('{msg}'); about to perform retry {count} of {trymax}."
.format(msg=str(e), count=retry_count, trymax=max_retries))
time.sleep(retry_count * 1)
if retry_count >= max_retries:
raise e
if response.status_code != 200:
print("NOT FOUND:", url)
if STRICT:
response.raise_for_status()
return response
_CSS_URL_RE = re.compile(r"url\(['\"]?(.*?)['\"]?\)")
# TODO(davidhu): Use MD5 hash of URL (ideally file) instead.
def _derive_filename(url):
name = os.path.basename(urlparse(url).path).replace('%', '_')
return ("%s.%s" % (uuid.uuid4().hex, name)).lower()
def download_static_assets(doc, destination, base_url,
request_fn=make_request, url_blacklist=[], js_middleware=None,
css_middleware=None, derive_filename=_derive_filename):
"""
Download all static assets referenced from an HTML page.
The goal is to easily create HTML5 apps! Downloads JS, CSS, images, and
audio clips.
Args:
doc: The HTML page source as a string or BeautifulSoup instance.
destination: The folder to download the static assets to!
base_url: The base URL where assets will be downloaded from.
request_fn: The function to be called to make requests, passed to
ricecooker.utils.html.download_file(). Pass in a custom one for custom
caching logic.
url_blacklist: A list of keywords of files to not include in downloading.
Will do substring matching, so e.g. 'acorn.js' will match
'/some/path/to/acorn.js'.
js_middleware: If specificed, JS content will be passed into this callback
which is expected to return JS content with any modifications.
css_middleware: If specificed, CSS content will be passed into this callback
which is expected to return CSS content with any modifications.
Return the modified page HTML with links rewritten to the locations of the
downloaded static files, as a BeautifulSoup object. (Call str() on it to
extract the raw HTML.)
"""
if not isinstance(doc, BeautifulSoup):
doc = BeautifulSoup(doc, "html.parser")
# Helper function to download all assets for a given CSS selector.
def download_assets(selector, attr, url_middleware=None,
content_middleware=None, node_filter=None):
nodes = doc.select(selector)
for i, node in enumerate(nodes):
if node_filter:
if not node_filter(node):
src = node[attr]
node[attr] = ''
print(' Skipping node with src ', src)
continue
if node[attr].startswith('data:'):
continue
url = urljoin(base_url, node[attr])
if _is_blacklisted(url, url_blacklist):
print(' Skipping downloading blacklisted url', url)
node[attr] = ""
continue
if url_middleware:
url = url_middleware(url)
filename = derive_filename(url)
node[attr] = filename
print(" Downloading", url, "to filename", filename)
download_file(url, destination, request_fn=request_fn,
filename=filename, middleware_callbacks=content_middleware)
def js_content_middleware(content, url, **kwargs):
if js_middleware:
content = js_middleware(content, url, **kwargs)
return content
def css_node_filter(node):
return "stylesheet" in node["rel"]
def css_content_middleware(content, url, **kwargs):
if css_middleware:
content = css_middleware(content, url, **kwargs)
root_parts = urlparse(url)
# Download linked fonts and images
def repl(match):
src = match.group(1)
if src.startswith('//localhost'):
return 'url()'
# Don't download data: files
if src.startswith('data:'):
return match.group(0)
parts = urlparse(src)
root_url = None
if url:
root_url = url[:url.rfind('/') + 1]
if parts.scheme and parts.netloc:
src_url = src
elif parts.path.startswith('/') and url:
src_url = '{}://{}{}'.format(root_parts.scheme, root_parts.netloc, parts.path)
elif url and root_url:
src_url = urljoin(root_url, src)
else:
src_url = urljoin(base_url, src)
if _is_blacklisted(src_url, url_blacklist):
print(' Skipping downloading blacklisted url', src_url)
return 'url()'
derived_filename = derive_filename(src_url)
# The _derive_filename function puts all files in the root, so all URLs need
# rewritten. When using get_archive_filename, relative URLs will still work.
new_url = src
if derive_filename == _derive_filename:
if url and parts.path.startswith('/'):
parent_url = derive_filename(url)
new_url = os.path.relpath(src, os.path.dirname(parent_url))
else:
new_url = derived_filename
download_file(src_url, destination, request_fn=request_fn,
filename=derived_filename)
return 'url("%s")' % new_url
return _CSS_URL_RE.sub(repl, content)
# Download all linked static assets.
download_assets("img[src]", "src") # Images
download_assets("link[href]", "href",
content_middleware=css_content_middleware,
node_filter=css_node_filter) # CSS
download_assets("script[src]", "src",
content_middleware=js_content_middleware) # JS
download_assets("source[src]", "src") # Potentially audio
download_assets("source[srcset]", "srcset") # Potentially audio
# ... and also run the middleware on CSS/JS embedded in the page source to
# get linked files.
for node in doc.select('style'):
node.string = css_content_middleware(node.get_text(), url='')
for node in doc.select('script'):
if not node.attrs.get('src'):
node.string = js_content_middleware(node.get_text(), url='')
return doc
def get_archive_filename(url, page_domain=None, download_root=None, urls_to_replace=None):
file_url_parsed = urlparse(url)
no_scheme_url = url
if file_url_parsed.scheme != '':
no_scheme_url = url.replace(file_url_parsed.scheme + '://', '')
rel_path = file_url_parsed.path.replace('%', '_')
domain = file_url_parsed.netloc.replace(':', '_')
if not domain and page_domain:
domain = page_domain
if rel_path.startswith('/'):
rel_path = rel_path[1:]
url_local_dir = os.path.join(domain, rel_path)
assert domain in url_local_dir
local_dir_name = os.path.dirname(url_local_dir)
if local_dir_name != url_local_dir and urls_to_replace is not None:
full_dir = os.path.join(download_root, local_dir_name)
os.makedirs(full_dir, exist_ok=True)
urls_to_replace[url] = no_scheme_url
return url_local_dir
def archive_page(url, download_root):
"""
Download fully rendered page and all related assets into ricecooker's site archive format.
:param url: URL to download
:param download_root: Site archive root directory
:return: A dict containing info about the page archive operation
"""
os.makedirs(download_root, exist_ok=True)
content, props = asyncio.get_event_loop().run_until_complete(load_page(url))
parsed_url = urlparse(url)
page_domain = parsed_url.netloc.replace(':', '_')
# get related assets
base_url = url[:url.rfind('/')]
urls_to_replace = {}
if content:
def html5_derive_filename(url):
return get_archive_filename(url, page_domain, download_root, urls_to_replace)
download_static_assets(content, download_root, base_url, derive_filename=html5_derive_filename)
for key in urls_to_replace:
url_parts = urlparse(key)
# When we get an absolute URL, it may appear in one of three different ways in the page:
key_variants = [
# 1. /path/to/file.html
key.replace(url_parts.scheme + '://' + url_parts.netloc, ''),
# 2. https://www.domain.com/path/to/file.html
key,
# 3. //www.domain.com/path/to/file.html
key.replace(url_parts.scheme + ':', ''),
]
orig_content = content
for variant in key_variants:
# searching within quotes ensures we only replace the exact URL we are
# trying to replace
# we avoid using BeautifulSoup because Python HTML parsers can be destructive and
# do things like strip out the doctype.
content = content.replace('="{}"'.format(variant), '="{}"'.format(urls_to_replace[key]))
content = content.replace('url({})'.format(variant), 'url({})'.format(urls_to_replace[key]))
if content == orig_content:
LOGGER.debug("link not replaced: {}".format(key))
LOGGER.debug("key_variants = {}".format(key_variants))
download_dir = os.path.join(page_domain, parsed_url.path.split('/')[-1].replace('?', '_'))
download_path = os.path.join(download_root, download_dir)
os.makedirs(download_path, exist_ok=True)
index_path = os.path.join(download_path, 'index.html')
f = open(index_path, 'w', encoding='utf-8')
f.write(content)
f.close()
page_info = {
'url': url,
'cookies': props['cookies'],
'index_path': index_path,
'resources': list(urls_to_replace.values())
}
return page_info
return None
def _is_blacklisted(url, url_blacklist):
return any((item in url.lower()) for item in url_blacklist)
def download_in_parallel(urls, func=None, max_workers=5):
"""
Takes a set of URLs, and downloads them in parallel
:param urls: A list of URLs to download in parallel
:param func: A function that takes the URL as a parameter.
If not specified, defaults to a session-managed
requests.get function.
:return: A dictionary of func return values, indexed by URL
"""
if func is None:
func = requests.get
results = {}
start = 0
end = len(urls)
batch_size = 100
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
while start < end:
batch = urls[start:end]
futures = {}
for url in batch:
futures[executor.submit(func, url)] = url
for future in concurrent.futures.as_completed(futures):
url = futures[future]
try:
result = future.result()
results[url] = result
except:
raise
start = start + batch_size
return results
| <filename>ricecooker/utils/downloader.py
import concurrent.futures
import copy
import os
import re
import requests
import time
from urllib.parse import urlparse, urljoin
import uuid
from bs4 import BeautifulSoup
from selenium import webdriver
import selenium.webdriver.support.ui as selenium_ui
from requests_file import FileAdapter
from ricecooker.config import LOGGER, PHANTOMJS_PATH, STRICT
from ricecooker.utils.html import download_file
from ricecooker.utils.caching import CacheForeverHeuristic, FileCache, CacheControlAdapter, InvalidatingCacheControlAdapter
DOWNLOAD_SESSION = requests.Session() # Session for downloading content from urls
DOWNLOAD_SESSION.mount('https://', requests.adapters.HTTPAdapter(max_retries=3))
DOWNLOAD_SESSION.mount('file://', FileAdapter())
# use_dir_lock works with all filesystems and OSes
cache = FileCache('.webcache', use_dir_lock=True)
forever_adapter= CacheControlAdapter(heuristic=CacheForeverHeuristic(), cache=cache)
DOWNLOAD_SESSION.mount('http://', forever_adapter)
DOWNLOAD_SESSION.mount('https://', forever_adapter)
DEFAULT_HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:20.0) Gecko/20100101 Firefox/20.0",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive"
}
USE_PYPPETEER = False
try:
import asyncio
from pyppeteer import launch, errors
async def load_page(path, timeout=30, strict=True):
browser = await launch({'headless': True})
content = None
cookies = None
try:
page = await browser.newPage()
try:
await page.goto(path, {'timeout': timeout * 1000, 'waitUntil': ['load', 'domcontentloaded', 'networkidle0']})
except errors.TimeoutError:
# some sites have API calls running regularly, so the timeout may be that there's never any true
# network idle time. Try 'networkidle2' option instead before determining we can't scrape.
if not strict:
await page.goto(path, {'timeout': timeout * 1000, 'waitUntil': ['load', 'domcontentloaded', 'networkidle2']})
else:
raise
# get the entire rendered page, including the doctype
content = await page.content()
cookies = await page.cookies()
except Exception as e:
LOGGER.warning("Error scraping page: {}".format(e))
finally:
await browser.close()
return content, {'cookies': cookies}
USE_PYPPETEER = True
except:
print("Unable to load pyppeteer, using phantomjs for JS loading.")
pass
def read(path, loadjs=False, session=None, driver=None, timeout=60,
clear_cookies=True, loadjs_wait_time=3, loadjs_wait_for_callback=None, strict=True):
"""Reads from source and returns contents
Args:
path: (str) url or local path to download
loadjs: (boolean) indicates whether to load js (optional)
session: (requests.Session) session to use to download (optional)
driver: (selenium.webdriver) webdriver to use to download (optional)
timeout: (int) Maximum number of seconds to wait for the request to complete.
clear_cookies: (boolean) whether to clear cookies.
loadjs_wait_time: (int) if loading JS, seconds to wait after the
page has loaded before grabbing the page source
loadjs_wait_for_callback: (function<selenium.webdriver>) if loading
JS, a callback that will be invoked to determine when we can
grab the page source. The callback will be called with the
webdriver, and should return True when we're ready to grab the
page source. For example, pass in an argument like:
``lambda driver: driver.find_element_by_id('list-container')``
to wait for the #list-container element to be present before rendering.
strict: (bool) If False, when download fails, retry but allow parsing even if there
is still minimal network traffic happening. Useful for sites that regularly poll APIs.
Returns: str content from file or page
"""
session = session or DOWNLOAD_SESSION
if clear_cookies:
session.cookies.clear()
try:
if loadjs: # Wait until js loads then return contents
if USE_PYPPETEER:
content = asyncio.get_event_loop().run_until_complete(load_page(path))
return content
if PHANTOMJS_PATH:
driver = driver or webdriver.PhantomJS(executable_path=PHANTOMJS_PATH)
else:
driver = driver or webdriver.PhantomJS()
driver.get(path)
if loadjs_wait_for_callback:
selenium_ui.WebDriverWait(driver, 60).until(loadjs_wait_for_callback)
time.sleep(loadjs_wait_time)
return driver.page_source
else: # Read page contents from url
retry_count = 0
max_retries = 5
while True:
try:
response = session.get(path, stream=True, timeout=timeout)
break
except (requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout) as e:
retry_count += 1
print("Error with connection ('{msg}'); about to perform retry {count} of {trymax}."
.format(msg=str(e), count=retry_count, trymax=max_retries))
time.sleep(retry_count * 1)
if retry_count >= max_retries:
raise e
response.raise_for_status()
return response.content
except (requests.exceptions.MissingSchema, requests.exceptions.InvalidSchema):
with open(path, 'rb') as fobj: # If path is a local file path, try to open the file
return fobj.read()
def make_request(url, clear_cookies=False, headers=None, timeout=60, *args, **kwargs):
sess = DOWNLOAD_SESSION
if clear_cookies:
sess.cookies.clear()
retry_count = 0
max_retries = 5
request_headers = DEFAULT_HEADERS
if headers:
request_headers = copy.copy(DEFAULT_HEADERS)
request_headers.update(headers)
while True:
try:
response = sess.get(url, headers=request_headers, timeout=timeout, *args, **kwargs)
break
except (requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout) as e:
retry_count += 1
print("Error with connection ('{msg}'); about to perform retry {count} of {trymax}."
.format(msg=str(e), count=retry_count, trymax=max_retries))
time.sleep(retry_count * 1)
if retry_count >= max_retries:
raise e
if response.status_code != 200:
print("NOT FOUND:", url)
if STRICT:
response.raise_for_status()
return response
_CSS_URL_RE = re.compile(r"url\(['\"]?(.*?)['\"]?\)")
# TODO(davidhu): Use MD5 hash of URL (ideally file) instead.
def _derive_filename(url):
name = os.path.basename(urlparse(url).path).replace('%', '_')
return ("%s.%s" % (uuid.uuid4().hex, name)).lower()
def download_static_assets(doc, destination, base_url,
request_fn=make_request, url_blacklist=[], js_middleware=None,
css_middleware=None, derive_filename=_derive_filename):
"""
Download all static assets referenced from an HTML page.
The goal is to easily create HTML5 apps! Downloads JS, CSS, images, and
audio clips.
Args:
doc: The HTML page source as a string or BeautifulSoup instance.
destination: The folder to download the static assets to!
base_url: The base URL where assets will be downloaded from.
request_fn: The function to be called to make requests, passed to
ricecooker.utils.html.download_file(). Pass in a custom one for custom
caching logic.
url_blacklist: A list of keywords of files to not include in downloading.
Will do substring matching, so e.g. 'acorn.js' will match
'/some/path/to/acorn.js'.
js_middleware: If specificed, JS content will be passed into this callback
which is expected to return JS content with any modifications.
css_middleware: If specificed, CSS content will be passed into this callback
which is expected to return CSS content with any modifications.
Return the modified page HTML with links rewritten to the locations of the
downloaded static files, as a BeautifulSoup object. (Call str() on it to
extract the raw HTML.)
"""
if not isinstance(doc, BeautifulSoup):
doc = BeautifulSoup(doc, "html.parser")
# Helper function to download all assets for a given CSS selector.
def download_assets(selector, attr, url_middleware=None,
content_middleware=None, node_filter=None):
nodes = doc.select(selector)
for i, node in enumerate(nodes):
if node_filter:
if not node_filter(node):
src = node[attr]
node[attr] = ''
print(' Skipping node with src ', src)
continue
if node[attr].startswith('data:'):
continue
url = urljoin(base_url, node[attr])
if _is_blacklisted(url, url_blacklist):
print(' Skipping downloading blacklisted url', url)
node[attr] = ""
continue
if url_middleware:
url = url_middleware(url)
filename = derive_filename(url)
node[attr] = filename
print(" Downloading", url, "to filename", filename)
download_file(url, destination, request_fn=request_fn,
filename=filename, middleware_callbacks=content_middleware)
def js_content_middleware(content, url, **kwargs):
if js_middleware:
content = js_middleware(content, url, **kwargs)
return content
def css_node_filter(node):
return "stylesheet" in node["rel"]
def css_content_middleware(content, url, **kwargs):
if css_middleware:
content = css_middleware(content, url, **kwargs)
root_parts = urlparse(url)
# Download linked fonts and images
def repl(match):
src = match.group(1)
if src.startswith('//localhost'):
return 'url()'
# Don't download data: files
if src.startswith('data:'):
return match.group(0)
parts = urlparse(src)
root_url = None
if url:
root_url = url[:url.rfind('/') + 1]
if parts.scheme and parts.netloc:
src_url = src
elif parts.path.startswith('/') and url:
src_url = '{}://{}{}'.format(root_parts.scheme, root_parts.netloc, parts.path)
elif url and root_url:
src_url = urljoin(root_url, src)
else:
src_url = urljoin(base_url, src)
if _is_blacklisted(src_url, url_blacklist):
print(' Skipping downloading blacklisted url', src_url)
return 'url()'
derived_filename = derive_filename(src_url)
# The _derive_filename function puts all files in the root, so all URLs need
# rewritten. When using get_archive_filename, relative URLs will still work.
new_url = src
if derive_filename == _derive_filename:
if url and parts.path.startswith('/'):
parent_url = derive_filename(url)
new_url = os.path.relpath(src, os.path.dirname(parent_url))
else:
new_url = derived_filename
download_file(src_url, destination, request_fn=request_fn,
filename=derived_filename)
return 'url("%s")' % new_url
return _CSS_URL_RE.sub(repl, content)
# Download all linked static assets.
download_assets("img[src]", "src") # Images
download_assets("link[href]", "href",
content_middleware=css_content_middleware,
node_filter=css_node_filter) # CSS
download_assets("script[src]", "src",
content_middleware=js_content_middleware) # JS
download_assets("source[src]", "src") # Potentially audio
download_assets("source[srcset]", "srcset") # Potentially audio
# ... and also run the middleware on CSS/JS embedded in the page source to
# get linked files.
for node in doc.select('style'):
node.string = css_content_middleware(node.get_text(), url='')
for node in doc.select('script'):
if not node.attrs.get('src'):
node.string = js_content_middleware(node.get_text(), url='')
return doc
def get_archive_filename(url, page_domain=None, download_root=None, urls_to_replace=None):
file_url_parsed = urlparse(url)
no_scheme_url = url
if file_url_parsed.scheme != '':
no_scheme_url = url.replace(file_url_parsed.scheme + '://', '')
rel_path = file_url_parsed.path.replace('%', '_')
domain = file_url_parsed.netloc.replace(':', '_')
if not domain and page_domain:
domain = page_domain
if rel_path.startswith('/'):
rel_path = rel_path[1:]
url_local_dir = os.path.join(domain, rel_path)
assert domain in url_local_dir
local_dir_name = os.path.dirname(url_local_dir)
if local_dir_name != url_local_dir and urls_to_replace is not None:
full_dir = os.path.join(download_root, local_dir_name)
os.makedirs(full_dir, exist_ok=True)
urls_to_replace[url] = no_scheme_url
return url_local_dir
def archive_page(url, download_root):
"""
Download fully rendered page and all related assets into ricecooker's site archive format.
:param url: URL to download
:param download_root: Site archive root directory
:return: A dict containing info about the page archive operation
"""
os.makedirs(download_root, exist_ok=True)
content, props = asyncio.get_event_loop().run_until_complete(load_page(url))
parsed_url = urlparse(url)
page_domain = parsed_url.netloc.replace(':', '_')
# get related assets
base_url = url[:url.rfind('/')]
urls_to_replace = {}
if content:
def html5_derive_filename(url):
return get_archive_filename(url, page_domain, download_root, urls_to_replace)
download_static_assets(content, download_root, base_url, derive_filename=html5_derive_filename)
for key in urls_to_replace:
url_parts = urlparse(key)
# When we get an absolute URL, it may appear in one of three different ways in the page:
key_variants = [
# 1. /path/to/file.html
key.replace(url_parts.scheme + '://' + url_parts.netloc, ''),
# 2. https://www.domain.com/path/to/file.html
key,
# 3. //www.domain.com/path/to/file.html
key.replace(url_parts.scheme + ':', ''),
]
orig_content = content
for variant in key_variants:
# searching within quotes ensures we only replace the exact URL we are
# trying to replace
# we avoid using BeautifulSoup because Python HTML parsers can be destructive and
# do things like strip out the doctype.
content = content.replace('="{}"'.format(variant), '="{}"'.format(urls_to_replace[key]))
content = content.replace('url({})'.format(variant), 'url({})'.format(urls_to_replace[key]))
if content == orig_content:
LOGGER.debug("link not replaced: {}".format(key))
LOGGER.debug("key_variants = {}".format(key_variants))
download_dir = os.path.join(page_domain, parsed_url.path.split('/')[-1].replace('?', '_'))
download_path = os.path.join(download_root, download_dir)
os.makedirs(download_path, exist_ok=True)
index_path = os.path.join(download_path, 'index.html')
f = open(index_path, 'w', encoding='utf-8')
f.write(content)
f.close()
page_info = {
'url': url,
'cookies': props['cookies'],
'index_path': index_path,
'resources': list(urls_to_replace.values())
}
return page_info
return None
def _is_blacklisted(url, url_blacklist):
return any((item in url.lower()) for item in url_blacklist)
def download_in_parallel(urls, func=None, max_workers=5):
"""
Takes a set of URLs, and downloads them in parallel
:param urls: A list of URLs to download in parallel
:param func: A function that takes the URL as a parameter.
If not specified, defaults to a session-managed
requests.get function.
:return: A dictionary of func return values, indexed by URL
"""
if func is None:
func = requests.get
results = {}
start = 0
end = len(urls)
batch_size = 100
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
while start < end:
batch = urls[start:end]
futures = {}
for url in batch:
futures[executor.submit(func, url)] = url
for future in concurrent.futures.as_completed(futures):
url = futures[future]
try:
result = future.result()
results[url] = result
except:
raise
start = start + batch_size
return results
| en | 0.76907 | # Session for downloading content from urls # use_dir_lock works with all filesystems and OSes # some sites have API calls running regularly, so the timeout may be that there's never any true # network idle time. Try 'networkidle2' option instead before determining we can't scrape. # get the entire rendered page, including the doctype Reads from source and returns contents Args: path: (str) url or local path to download loadjs: (boolean) indicates whether to load js (optional) session: (requests.Session) session to use to download (optional) driver: (selenium.webdriver) webdriver to use to download (optional) timeout: (int) Maximum number of seconds to wait for the request to complete. clear_cookies: (boolean) whether to clear cookies. loadjs_wait_time: (int) if loading JS, seconds to wait after the page has loaded before grabbing the page source loadjs_wait_for_callback: (function<selenium.webdriver>) if loading JS, a callback that will be invoked to determine when we can grab the page source. The callback will be called with the webdriver, and should return True when we're ready to grab the page source. For example, pass in an argument like: ``lambda driver: driver.find_element_by_id('list-container')`` to wait for the #list-container element to be present before rendering. strict: (bool) If False, when download fails, retry but allow parsing even if there is still minimal network traffic happening. Useful for sites that regularly poll APIs. Returns: str content from file or page # Wait until js loads then return contents # Read page contents from url # If path is a local file path, try to open the file # TODO(davidhu): Use MD5 hash of URL (ideally file) instead. Download all static assets referenced from an HTML page. The goal is to easily create HTML5 apps! Downloads JS, CSS, images, and audio clips. Args: doc: The HTML page source as a string or BeautifulSoup instance. destination: The folder to download the static assets to! base_url: The base URL where assets will be downloaded from. request_fn: The function to be called to make requests, passed to ricecooker.utils.html.download_file(). Pass in a custom one for custom caching logic. url_blacklist: A list of keywords of files to not include in downloading. Will do substring matching, so e.g. 'acorn.js' will match '/some/path/to/acorn.js'. js_middleware: If specificed, JS content will be passed into this callback which is expected to return JS content with any modifications. css_middleware: If specificed, CSS content will be passed into this callback which is expected to return CSS content with any modifications. Return the modified page HTML with links rewritten to the locations of the downloaded static files, as a BeautifulSoup object. (Call str() on it to extract the raw HTML.) # Helper function to download all assets for a given CSS selector. # Download linked fonts and images # Don't download data: files # The _derive_filename function puts all files in the root, so all URLs need # rewritten. When using get_archive_filename, relative URLs will still work. # Download all linked static assets. # Images # CSS # JS # Potentially audio # Potentially audio # ... and also run the middleware on CSS/JS embedded in the page source to # get linked files. Download fully rendered page and all related assets into ricecooker's site archive format. :param url: URL to download :param download_root: Site archive root directory :return: A dict containing info about the page archive operation # get related assets # When we get an absolute URL, it may appear in one of three different ways in the page: # 1. /path/to/file.html # 2. https://www.domain.com/path/to/file.html # 3. //www.domain.com/path/to/file.html # searching within quotes ensures we only replace the exact URL we are # trying to replace # we avoid using BeautifulSoup because Python HTML parsers can be destructive and # do things like strip out the doctype. Takes a set of URLs, and downloads them in parallel :param urls: A list of URLs to download in parallel :param func: A function that takes the URL as a parameter. If not specified, defaults to a session-managed requests.get function. :return: A dictionary of func return values, indexed by URL | 2.586643 | 3 |
services/buttoninputservice/ButtonInputService.py | CydrickT/HomeAutomation | 0 | 6630767 | <reponame>CydrickT/HomeAutomation<filename>services/buttoninputservice/ButtonInputService.py<gh_stars>0
import configparser
import json
import time
from datetime import datetime, timedelta
import RPi.GPIO as GPIO
from core.Service import Service
from topics.buttoninput.ButtonInputCommand import ButtonInputCommand
from topics.buttoninput.ButtonInputType import ButtonInputType
class ButtonInputService(Service):
def initialize(self):
self.button_up_gpio_id = self.config.getint('ButtonUpGpioId')
self.button_down_gpio_id = self.config.getint('ButtonDownGpioId')
self.button_state_manager = ButtonStateManager(self.config.getfloat('ShortToLongThresholdInSeconds'))
self.enableLightFeedback = self.config.getboolean('EnableLightFeedback')
self.light_up_gpio_id = self.config.getint('LightUpGpioId')
self.light_down_gpio_id = self.config.getint('LightDownGpioId')
self.input_loop_cycle_delay_when_pressed_in_seconds = self.config.getfloat('InputLoopCycleDelayWhenPressedInSeconds')
self.input_loop_cycle_delay_when_not_pressed_in_seconds = self.config.getfloat('InputLoopCycleDelayWhenNotPressedInSeconds')
self.setupBoard()
def setupBoard(self):
GPIO.setmode(GPIO.BOARD)
GPIO.setup(self.button_up_gpio_id, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(self.button_down_gpio_id, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
if self.enableLightFeedback:
GPIO.setup(self.light_up_gpio_id, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(self.light_down_gpio_id, GPIO.OUT, initial=GPIO.LOW)
def start(self):
while True:
if self.button_state_manager.anyButtonPressed():
time.sleep(self.input_loop_cycle_delay_when_pressed_in_seconds)
else:
time.sleep(self.input_loop_cycle_delay_when_not_pressed_in_seconds)
self.button_state_manager.resetCycle()
if GPIO.input(self.button_up_gpio_id) == GPIO.HIGH:
self.button_state_manager.up_currently_pressed = True
if GPIO.input(self.button_down_gpio_id) == GPIO.HIGH:
self.button_state_manager.down_currently_pressed = True
if not self.button_state_manager.signalSent:
if self.button_state_manager.isConsideredLongPress():
# We have crossed the short to long threshold. It's now considered a long press.
self.longPressDetected()
elif self.button_state_manager.upRecentlyReleased():
# Up was recently released, but was less than threshold. Considered a short button press.
self.shortUpReleased()
elif self.button_state_manager.downRecentlyReleased():
# Down was recently released, but was less than threshold. Considered a short button press.
self.shortDownReleased()
elif self.button_state_manager.bothButtonsCurrentlyReleased():
self.setLightState(False, False)
def longPressDetected(self):
buttonInputType = None
if self.button_state_manager.bothButtonsPreviouslyPressed():
buttonInputType = ButtonInputType.UpDownLong
self.setLightState(True, True)
elif self.button_state_manager.up_previously_pressed:
buttonInputType = ButtonInputType.UpLong
self.setLightState(True, False)
elif self.button_state_manager.down_previously_pressed:
buttonInputType = ButtonInputType.DownLong
GPIO.output(self.light_down_gpio_id, GPIO.HIGH)
self.setLightState(False, True)
self.sendSignal(buttonInputType)
def shortUpReleased(self):
buttonInputType = ButtonInputType.UpShort
if self.button_state_manager.down_previously_pressed:
# Was a short 2-button input
buttonInputType = ButtonInputType.UpDownShort
self.sendSignal(buttonInputType)
def shortDownReleased(self):
buttonInputType = ButtonInputType.DownShort
if self.button_state_manager.up_previously_pressed:
# Was a short 2-button input
buttonInputType = ButtonInputType.UpDownShort
self.sendSignal(buttonInputType)
def sendSignal(self, buttonInputType):
self.core.dataRouter.publish(ButtonInputCommand(buttonInputType))
self.button_state_manager.signalSent = True
def setLightState(self, lightUpOn, lightDownOn):
if self.enableLightFeedback:
lights = (self.light_up_gpio_id, self.light_down_gpio_id)
upValue = GPIO.HIGH if lightUpOn else GPIO.LOW
downValue = GPIO.HIGH if lightDownOn else GPIO.LOW
values = (upValue, downValue)
GPIO.output(lights, values)
class ButtonStateManager:
def __init__(self, short_to_long_threshold_in_seconds):
self.up_previously_pressed = False
self.down_previously_pressed = False
self.up_currently_pressed = False
self.down_currently_pressed = False
self.signalSent = False
self.timeDown = None
self.__short_to_long_threshold_in_seconds__ = short_to_long_threshold_in_seconds
def resetCycle(self):
if self.upRecentlyPressed() and not self.signalSent:
self.timeDown = datetime.now()
if self.downRecentlyPressed() and not self.signalSent:
self.timeDown = datetime.now()
if not self.up_currently_pressed and not self.down_currently_pressed and self.signalSent:
# Waiting until both buttons are released to fully reset everything.
self.signalSent = False
self.timeDown = None
self.up_previously_pressed = self.up_currently_pressed
self.down_previously_pressed = self.down_currently_pressed
self.up_currently_pressed = False
self.down_currently_pressed = False
def isConsideredLongPress(self):
return self.timeDown is not None and self.timeDown + timedelta(seconds=self.__short_to_long_threshold_in_seconds__) < datetime.now()
def upRecentlyPressed(self):
return self.up_currently_pressed and not self.up_previously_pressed
def downRecentlyPressed(self):
return self.down_currently_pressed and not self.down_previously_pressed
def upRecentlyReleased(self):
return not self.up_currently_pressed and self.up_previously_pressed
def downRecentlyReleased(self):
return not self.down_currently_pressed and self.down_previously_pressed
def bothButtonsCurrentlyPressed(self):
return self.up_currently_pressed and self.down_currently_pressed
def bothButtonsCurrentlyReleased(self):
return not self.up_currently_pressed and not self.down_currently_pressed
def bothButtonsPreviouslyPressed(self):
return self.up_currently_pressed and self.down_currently_pressed
def anyButtonPressed(self):
return self.up_currently_pressed or self.down_currently_pressed
| import configparser
import json
import time
from datetime import datetime, timedelta
import RPi.GPIO as GPIO
from core.Service import Service
from topics.buttoninput.ButtonInputCommand import ButtonInputCommand
from topics.buttoninput.ButtonInputType import ButtonInputType
class ButtonInputService(Service):
def initialize(self):
self.button_up_gpio_id = self.config.getint('ButtonUpGpioId')
self.button_down_gpio_id = self.config.getint('ButtonDownGpioId')
self.button_state_manager = ButtonStateManager(self.config.getfloat('ShortToLongThresholdInSeconds'))
self.enableLightFeedback = self.config.getboolean('EnableLightFeedback')
self.light_up_gpio_id = self.config.getint('LightUpGpioId')
self.light_down_gpio_id = self.config.getint('LightDownGpioId')
self.input_loop_cycle_delay_when_pressed_in_seconds = self.config.getfloat('InputLoopCycleDelayWhenPressedInSeconds')
self.input_loop_cycle_delay_when_not_pressed_in_seconds = self.config.getfloat('InputLoopCycleDelayWhenNotPressedInSeconds')
self.setupBoard()
def setupBoard(self):
GPIO.setmode(GPIO.BOARD)
GPIO.setup(self.button_up_gpio_id, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(self.button_down_gpio_id, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
if self.enableLightFeedback:
GPIO.setup(self.light_up_gpio_id, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(self.light_down_gpio_id, GPIO.OUT, initial=GPIO.LOW)
def start(self):
while True:
if self.button_state_manager.anyButtonPressed():
time.sleep(self.input_loop_cycle_delay_when_pressed_in_seconds)
else:
time.sleep(self.input_loop_cycle_delay_when_not_pressed_in_seconds)
self.button_state_manager.resetCycle()
if GPIO.input(self.button_up_gpio_id) == GPIO.HIGH:
self.button_state_manager.up_currently_pressed = True
if GPIO.input(self.button_down_gpio_id) == GPIO.HIGH:
self.button_state_manager.down_currently_pressed = True
if not self.button_state_manager.signalSent:
if self.button_state_manager.isConsideredLongPress():
# We have crossed the short to long threshold. It's now considered a long press.
self.longPressDetected()
elif self.button_state_manager.upRecentlyReleased():
# Up was recently released, but was less than threshold. Considered a short button press.
self.shortUpReleased()
elif self.button_state_manager.downRecentlyReleased():
# Down was recently released, but was less than threshold. Considered a short button press.
self.shortDownReleased()
elif self.button_state_manager.bothButtonsCurrentlyReleased():
self.setLightState(False, False)
def longPressDetected(self):
buttonInputType = None
if self.button_state_manager.bothButtonsPreviouslyPressed():
buttonInputType = ButtonInputType.UpDownLong
self.setLightState(True, True)
elif self.button_state_manager.up_previously_pressed:
buttonInputType = ButtonInputType.UpLong
self.setLightState(True, False)
elif self.button_state_manager.down_previously_pressed:
buttonInputType = ButtonInputType.DownLong
GPIO.output(self.light_down_gpio_id, GPIO.HIGH)
self.setLightState(False, True)
self.sendSignal(buttonInputType)
def shortUpReleased(self):
buttonInputType = ButtonInputType.UpShort
if self.button_state_manager.down_previously_pressed:
# Was a short 2-button input
buttonInputType = ButtonInputType.UpDownShort
self.sendSignal(buttonInputType)
def shortDownReleased(self):
buttonInputType = ButtonInputType.DownShort
if self.button_state_manager.up_previously_pressed:
# Was a short 2-button input
buttonInputType = ButtonInputType.UpDownShort
self.sendSignal(buttonInputType)
def sendSignal(self, buttonInputType):
self.core.dataRouter.publish(ButtonInputCommand(buttonInputType))
self.button_state_manager.signalSent = True
def setLightState(self, lightUpOn, lightDownOn):
if self.enableLightFeedback:
lights = (self.light_up_gpio_id, self.light_down_gpio_id)
upValue = GPIO.HIGH if lightUpOn else GPIO.LOW
downValue = GPIO.HIGH if lightDownOn else GPIO.LOW
values = (upValue, downValue)
GPIO.output(lights, values)
class ButtonStateManager:
def __init__(self, short_to_long_threshold_in_seconds):
self.up_previously_pressed = False
self.down_previously_pressed = False
self.up_currently_pressed = False
self.down_currently_pressed = False
self.signalSent = False
self.timeDown = None
self.__short_to_long_threshold_in_seconds__ = short_to_long_threshold_in_seconds
def resetCycle(self):
if self.upRecentlyPressed() and not self.signalSent:
self.timeDown = datetime.now()
if self.downRecentlyPressed() and not self.signalSent:
self.timeDown = datetime.now()
if not self.up_currently_pressed and not self.down_currently_pressed and self.signalSent:
# Waiting until both buttons are released to fully reset everything.
self.signalSent = False
self.timeDown = None
self.up_previously_pressed = self.up_currently_pressed
self.down_previously_pressed = self.down_currently_pressed
self.up_currently_pressed = False
self.down_currently_pressed = False
def isConsideredLongPress(self):
return self.timeDown is not None and self.timeDown + timedelta(seconds=self.__short_to_long_threshold_in_seconds__) < datetime.now()
def upRecentlyPressed(self):
return self.up_currently_pressed and not self.up_previously_pressed
def downRecentlyPressed(self):
return self.down_currently_pressed and not self.down_previously_pressed
def upRecentlyReleased(self):
return not self.up_currently_pressed and self.up_previously_pressed
def downRecentlyReleased(self):
return not self.down_currently_pressed and self.down_previously_pressed
def bothButtonsCurrentlyPressed(self):
return self.up_currently_pressed and self.down_currently_pressed
def bothButtonsCurrentlyReleased(self):
return not self.up_currently_pressed and not self.down_currently_pressed
def bothButtonsPreviouslyPressed(self):
return self.up_currently_pressed and self.down_currently_pressed
def anyButtonPressed(self):
return self.up_currently_pressed or self.down_currently_pressed | en | 0.987642 | # We have crossed the short to long threshold. It's now considered a long press. # Up was recently released, but was less than threshold. Considered a short button press. # Down was recently released, but was less than threshold. Considered a short button press. # Was a short 2-button input # Was a short 2-button input # Waiting until both buttons are released to fully reset everything. | 2.52845 | 3 |
src/common/model/numerical_integrator.py | GirZ0n/Methods-of-Computation | 2 | 6630768 | from abc import ABC, abstractmethod
from typing import Callable
from src.common.model.line_segment import LineSegment
class NumericalIntegrator(ABC):
@property
@abstractmethod
def accuracy_degree(self) -> int:
raise NotImplementedError
@abstractmethod
def integrate(self, *, f: Callable, segment: LineSegment, n: int, **kwargs) -> float:
raise NotImplementedError
| from abc import ABC, abstractmethod
from typing import Callable
from src.common.model.line_segment import LineSegment
class NumericalIntegrator(ABC):
@property
@abstractmethod
def accuracy_degree(self) -> int:
raise NotImplementedError
@abstractmethod
def integrate(self, *, f: Callable, segment: LineSegment, n: int, **kwargs) -> float:
raise NotImplementedError
| none | 1 | 2.915836 | 3 |
|
CORES.py | Joaongm/Projeto-RunAway | 0 | 6630769 | <reponame>Joaongm/Projeto-RunAway
cores = {'limpa': '\033[m', 'azul': '\033[34;40m',
'fundoazul': '\033[7;36m', 'red': '\033[4;31m', 'amarelo':'\033[33m'}
# print(cores)
'''print('\033[4;31;40m-*-\033[m'*30)
num=float(input('\033[7;30;44mDigite o num:' ))
print('{}O valor {}digitado{} foi: {}{}{} e sua porção inteira é: {}'.format(cores['limpa'],cores['azul'], cores['limpa'],cores['red'],num,cores['limpa'], int(num)))'''
'''30 black preto 40
31 red vermelho 41
32 green verde 42
33 yellow amarelo 43
34 blue azul 44
35 Magenta Magenta 45
36 cyan ciano 46
37 grey cinza 47
97 white branco 107'''
| cores = {'limpa': '\033[m', 'azul': '\033[34;40m',
'fundoazul': '\033[7;36m', 'red': '\033[4;31m', 'amarelo':'\033[33m'}
# print(cores)
'''print('\033[4;31;40m-*-\033[m'*30)
num=float(input('\033[7;30;44mDigite o num:' ))
print('{}O valor {}digitado{} foi: {}{}{} e sua porção inteira é: {}'.format(cores['limpa'],cores['azul'], cores['limpa'],cores['red'],num,cores['limpa'], int(num)))'''
'''30 black preto 40
31 red vermelho 41
32 green verde 42
33 yellow amarelo 43
34 blue azul 44
35 Magenta Magenta 45
36 cyan ciano 46
37 grey cinza 47
97 white branco 107''' | pt | 0.393014 | # print(cores) print('\033[4;31;40m-*-\033[m'*30) num=float(input('\033[7;30;44mDigite o num:' )) print('{}O valor {}digitado{} foi: {}{}{} e sua porção inteira é: {}'.format(cores['limpa'],cores['azul'], cores['limpa'],cores['red'],num,cores['limpa'], int(num))) 30 black preto 40 31 red vermelho 41 32 green verde 42 33 yellow amarelo 43 34 blue azul 44 35 Magenta Magenta 45 36 cyan ciano 46 37 grey cinza 47 97 white branco 107 | 3.727666 | 4 |
rurusetto/users/migrations/0016_auto_20210811_0523.py | siddhantdixit/rurusetto | 19 | 6630770 | # Generated by Django 3.2.5 on 2021-08-11 05:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0015_alter_profile_osu_id'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='discord',
field=models.CharField(default='', max_length=50),
),
migrations.AlterField(
model_name='profile',
name='interests',
field=models.CharField(default='', max_length=50),
),
migrations.AlterField(
model_name='profile',
name='location',
field=models.CharField(default='', max_length=50),
),
migrations.AlterField(
model_name='profile',
name='occupation',
field=models.CharField(default='', max_length=50),
),
migrations.AlterField(
model_name='profile',
name='osu_username',
field=models.CharField(default='', max_length=50),
),
migrations.AlterField(
model_name='profile',
name='twitter',
field=models.CharField(default='', max_length=50),
),
]
| # Generated by Django 3.2.5 on 2021-08-11 05:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0015_alter_profile_osu_id'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='discord',
field=models.CharField(default='', max_length=50),
),
migrations.AlterField(
model_name='profile',
name='interests',
field=models.CharField(default='', max_length=50),
),
migrations.AlterField(
model_name='profile',
name='location',
field=models.CharField(default='', max_length=50),
),
migrations.AlterField(
model_name='profile',
name='occupation',
field=models.CharField(default='', max_length=50),
),
migrations.AlterField(
model_name='profile',
name='osu_username',
field=models.CharField(default='', max_length=50),
),
migrations.AlterField(
model_name='profile',
name='twitter',
field=models.CharField(default='', max_length=50),
),
]
| en | 0.850649 | # Generated by Django 3.2.5 on 2021-08-11 05:23 | 1.711098 | 2 |
futuquant/common/__init__.py | jarryji/futuquant | 1 | 6630771 | <gh_stars>1-10
# -*- coding: utf-8 -*-
class RspHandlerBase(object):
"""callback function base class"""
def __init__(self):
pass
def on_recv_rsp(self, rsp_pb):
"""receive response callback function"""
return 0, None
| # -*- coding: utf-8 -*-
class RspHandlerBase(object):
"""callback function base class"""
def __init__(self):
pass
def on_recv_rsp(self, rsp_pb):
"""receive response callback function"""
return 0, None | en | 0.702094 | # -*- coding: utf-8 -*- callback function base class receive response callback function | 2.197901 | 2 |
week_2/capitaliza_string_alternadamente.py | angelitabrg/lih_lab_python2 | 0 | 6630772 | <filename>week_2/capitaliza_string_alternadamente.py
'''
A função capitaliza letras alternadamente:
'''
def fazAlgo(string):
pos = 0
string1 = ""
string = string.lower()
stringMa = string.upper()
while pos < len(string):
if pos % 2 == 0:
string1 = string1 + stringMa[pos]
else:
string1 = string1 + string[pos]
pos = pos + 1
return string1
# print(fazAlgo("paralelepipedo")) | <filename>week_2/capitaliza_string_alternadamente.py
'''
A função capitaliza letras alternadamente:
'''
def fazAlgo(string):
pos = 0
string1 = ""
string = string.lower()
stringMa = string.upper()
while pos < len(string):
if pos % 2 == 0:
string1 = string1 + stringMa[pos]
else:
string1 = string1 + string[pos]
pos = pos + 1
return string1
# print(fazAlgo("paralelepipedo")) | pt | 0.830692 | A função capitaliza letras alternadamente: # print(fazAlgo("paralelepipedo")) | 4.171821 | 4 |
tools/c7n_azure/tests/test_policy.py | anthonybgale/cloud-custodian | 2 | 6630773 | # Copyright 2015-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from azure.mgmt.storage.models import StorageAccount
from azure_common import BaseTest, DEFAULT_SUBSCRIPTION_ID, arm_template, cassette_name
from c7n_azure.constants import FUNCTION_EVENT_TRIGGER_MODE, FUNCTION_TIME_TRIGGER_MODE, \
CONTAINER_EVENT_TRIGGER_MODE, CONTAINER_TIME_TRIGGER_MODE
from c7n_azure.policy import AzureEventGridMode, AzureFunctionMode, AzureModeCommon
from mock import mock, patch, Mock
class AzurePolicyModeTest(BaseTest):
def setUp(self):
super(AzurePolicyModeTest, self).setUp()
def test_azure_function_event_mode_schema_validation(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite'],
'provision-options': {
'servicePlan': {
'name': 'test-cloud-custodian',
'location': 'eastus',
'resourceGroupName': 'test'},
'storageAccount': {
'name': 'testschemaname'
},
'appInsights': {
'name': 'testschemaname'
}
}}
})
self.assertTrue(p)
def test_azure_function_periodic_mode_schema_validation(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_TIME_TRIGGER_MODE,
'schedule': '0 * /5 * * * *',
'provision-options': {
'servicePlan': {
'name': 'test-cloud-custodian',
'location': 'eastus',
'resourceGroupName': 'test'},
'storageAccount': {
'name': 'testschemaname'
},
'appInsights': {
'name': 'testschemaname'
}
}}
})
self.assertTrue(p)
def test_container_event_mode_schema_validation(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-event-mode',
'resource': 'azure.vm',
'mode':
{'type': CONTAINER_EVENT_TRIGGER_MODE,
'events': ['VmWrite']}
})
self.assertTrue(p)
def test_container_periodic_mode_schema_validation(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-periodic-mode',
'resource': 'azure.vm',
'mode':
{'type': CONTAINER_TIME_TRIGGER_MODE,
'schedule': '* /5 * * * *'}
})
self.assertTrue(p)
def test_init_azure_function_mode_with_service_plan(self):
p = self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite'],
'provision-options': {
'servicePlan': {
'name': 'test-cloud-custodian',
'location': 'eastus',
'resourceGroupName': 'test'}
}}
})
function_mode = AzureFunctionMode(p)
params = function_mode.get_function_app_params()
self.assertEqual(function_mode.policy_name, p.data['name'])
self.assertTrue(params.storage_account['name'].startswith('custodian'))
self.assertEqual(params.app_insights['name'], 'test-cloud-custodian')
self.assertEqual(params.service_plan['name'], "test-cloud-custodian")
self.assertEqual(params.service_plan['location'], "eastus")
self.assertEqual(params.app_insights['location'], "eastus")
self.assertEqual(params.storage_account['location'], "eastus")
self.assertEqual(params.storage_account['resource_group_name'], 'test')
self.assertEqual(params.app_insights['resource_group_name'], 'test')
self.assertEqual(params.service_plan['resource_group_name'], "test")
self.assertTrue(params.function_app_name.startswith('test-azure-serverless-mode-'))
def test_init_azure_function_mode_no_service_plan_name(self):
p = self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite']}
})
function_mode = AzureFunctionMode(p)
params = function_mode.get_function_app_params()
self.assertEqual(function_mode.policy_name, p.data['name'])
self.assertEqual(params.service_plan['name'], "cloud-custodian")
self.assertEqual(params.service_plan['location'], "eastus")
self.assertEqual(params.service_plan['resource_group_name'], "cloud-custodian")
self.assertEqual(params.app_insights['name'], 'cloud-custodian')
self.assertEqual(params.app_insights['location'], "eastus")
self.assertEqual(params.app_insights['resource_group_name'], 'cloud-custodian')
self.assertTrue(params.storage_account['name'].startswith('custodian'))
self.assertEqual(params.storage_account['location'], "eastus")
self.assertEqual(params.storage_account['resource_group_name'], 'cloud-custodian')
self.assertTrue(params.function_app_name.startswith('test-azure-serverless-mode-'))
def test_init_azure_function_mode_invalid_policy_name(self):
p = self.load_policy({
'name': 'this-policy-name-is-going-to-be-too-long-since-the-maximum-size-is-60',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite']}
})
function_mode = AzureFunctionMode(p)
with self.assertRaises(ValueError):
function_mode.get_function_app_params()
def test_init_azure_function_mode_invalid_characters_in_policy_name(self):
p = self.load_policy({
'name': 'invalid_policy_name1',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite']}
})
function_mode = AzureFunctionMode(p)
params = function_mode.get_function_app_params()
self.assertRegexpMatches(params.function_app_name, "invalid-policy-name1-[a-zA-Z0-9]+")
def test_init_azure_function_mode_with_resource_ids(self):
ai_id = '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups' \
'/testrg/providers/microsoft.insights/components/testai'
sp_id = '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups' \
'/testrg/providers/Microsoft.Web/serverFarms/testsp'
sa_id = '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups' \
'/testrg/providers/Microsoft.Storage/storageAccounts/testsa'
p = self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite'],
'provision-options': {
'servicePlan': sp_id,
'storageAccount': sa_id,
'appInsights': ai_id
}}
})
function_mode = AzureFunctionMode(p)
params = function_mode.get_function_app_params()
self.assertEqual(function_mode.policy_name, p.data['name'])
self.assertEqual(params.storage_account['id'], sa_id)
self.assertEqual(params.storage_account['name'], 'testsa')
self.assertEqual(params.storage_account['resource_group_name'], 'testrg')
self.assertEqual(params.app_insights['id'], ai_id)
self.assertEqual(params.app_insights['name'], 'testai')
self.assertEqual(params.app_insights['resource_group_name'], 'testrg')
self.assertEqual(params.service_plan['id'], sp_id)
self.assertEqual(params.service_plan['name'], "testsp")
self.assertEqual(params.service_plan['resource_group_name'], "testrg")
self.assertTrue(params.function_app_name.startswith('test-azure-serverless-mode-'))
def test_event_grid_mode_creates_advanced_filtered_subscription(self):
p = self.load_policy({
'name': 'test-azure-event',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite']},
})
with mock.patch('c7n_azure.azure_events.AzureEventSubscription.create') as mock_create:
storage_account = StorageAccount(id=1, location='westus')
event_mode = AzureEventGridMode(p)
event_mode.target_subscription_ids = [DEFAULT_SUBSCRIPTION_ID]
event_mode._create_event_subscription(storage_account, 'some_queue', None)
name, args, kwargs = mock_create.mock_calls[0]
# verify the advanced filter created
event_filter = args[4].advanced_filters[0]
self.assertEqual(event_filter.key, 'Data.OperationName')
self.assertEqual(event_filter.values, ['Microsoft.Compute/virtualMachines/write'])
self.assertEqual(event_filter.operator_type, 'StringIn')
def test_event_grid_mode_creates_advanced_filtered_subscription_with_multiple_events(self):
p = self.load_policy({
'name': 'test-azure-event',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events':
['VmWrite',
{
'resourceProvider': 'Microsoft.Resources/subscriptions/resourceGroups',
'event': 'write'
}]},
})
with mock.patch('c7n_azure.azure_events.AzureEventSubscription.create') as mock_create:
storage_account = StorageAccount(id=1, location='westus')
event_mode = AzureEventGridMode(p)
event_mode.target_subscription_ids = [DEFAULT_SUBSCRIPTION_ID]
event_mode._create_event_subscription(storage_account, 'some_queue', None)
name, args, kwargs = mock_create.mock_calls[0]
# verify the advanced filter created
event_filter = args[4].advanced_filters[0]
self.assertEqual(event_filter.key, 'Data.OperationName')
self.assertEqual(event_filter.values,
['Microsoft.Compute/virtualMachines/write',
'Microsoft.Resources/subscriptions/resourceGroups/write'])
self.assertEqual(event_filter.operator_type, 'StringIn')
def test_extract_properties(self):
resource_id = '/subscriptions/{0}/resourceGroups/rg/providers' \
'/Microsoft.Web/serverFarms/test'.format(DEFAULT_SUBSCRIPTION_ID)
r = AzureFunctionMode.extract_properties({}, '', {})
self.assertEqual(r, {})
r = AzureFunctionMode.extract_properties({}, 'v', {'v': 'default'})
self.assertEqual(r, {'v': 'default'})
r = AzureFunctionMode.extract_properties({'v': resource_id}, 'v', {'v': 'default'})
self.assertEqual(r, {'id': resource_id, 'name': 'test', 'resource_group_name': 'rg'})
r = AzureFunctionMode.extract_properties(
{'v': {'test1': 'value1', 'testCamel': 'valueCamel'}},
'v',
{'test1': None, 'test_camel': None})
self.assertEqual(r, {'test1': 'value1', 'test_camel': 'valueCamel'})
r = AzureFunctionMode.extract_properties(
{'v': {'t1': 'v1', 'nestedValue': {'testCamel': 'valueCamel'}}},
'v',
{'t1': None, 'nested_value': {'test_camel': None}, 't2': 'v2'})
self.assertEqual(r, {'t1': 'v1', 't2': 'v2', 'nested_value': {'test_camel': 'valueCamel'}})
@arm_template('emptyrg.json')
@cassette_name('resourcegroup')
@patch('c7n_azure.resources.resourcegroup.DeleteResourceGroup._process_resource')
def test_empty_group_function_event(self, mock_delete):
p = self.load_policy({
'name': 'test-azure-resource-group',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['ResourceGroupWrite'],
'provision-options': {
'servicePlan': {
'name': 'test-cloud-custodian'
}
}},
'resource': 'azure.resourcegroup',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value': 'test_emptyrg'},
{'type': 'empty-group'}],
'actions': [
{'type': 'delete'}]})
event = AzurePolicyModeTest.get_sample_event()
resources = p.push(event, None)
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['name'], 'test_emptyrg')
self.assertTrue(mock_delete.called)
@arm_template('emptyrg.json')
@cassette_name('resourcegroup')
@patch('c7n_azure.resources.resourcegroup.DeleteResourceGroup._process_resource')
def test_empty_group_container_event(self, mock_delete):
p = self.load_policy({
'name': 'test-azure-resource-group',
'mode':
{'type': CONTAINER_EVENT_TRIGGER_MODE,
'events': ['ResourceGroupWrite']},
'resource': 'azure.resourcegroup',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value': 'test_emptyrg'},
{'type': 'empty-group'}],
'actions': [
{'type': 'delete'}]})
event = AzurePolicyModeTest.get_sample_event()
resources = p.push(event, None)
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['name'], 'test_emptyrg')
self.assertTrue(mock_delete.called)
@arm_template('emptyrg.json')
def test_empty_group_container_scheduled(self):
p = self.load_policy({
'name': 'test-azure-resource-group',
'mode':
{'type': CONTAINER_TIME_TRIGGER_MODE,
'schedule': '* * * * *'},
'resource': 'azure.resourcegroup',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value': 'test_emptyrg'},
{'type': 'empty-group'}]})
resources = p.push(None, None)
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['name'], 'test_emptyrg')
def test_extract_resource_id(self):
rg_id = "/subscriptions/ea98974b-5d2a-4d98-a78a-382f3715d07e/resourceGroups/test_emptyrg"
nsg_id = rg_id + '/providers/Microsoft.Network/networkSecurityGroups/test-nsg'
sr_id = nsg_id + '/securityRules/test-rule'
resource_type = ''
policy = Mock()
policy.resource_manager.resource_type.resource_type = resource_type
event = {'subject': rg_id}
policy.resource_manager.resource_type.resource_type =\
'Microsoft.Resources/subscriptions/resourceGroups'
self.assertEqual(AzureModeCommon.extract_resource_id(policy, event), rg_id)
event = {'subject': nsg_id}
policy.resource_manager.resource_type.resource_type =\
'Microsoft.Resources/subscriptions/resourceGroups'
self.assertEqual(AzureModeCommon.extract_resource_id(policy, event), rg_id)
event = {'subject': nsg_id}
policy.resource_manager.resource_type.resource_type =\
'Microsoft.Network/networksecuritygroups'
self.assertEqual(AzureModeCommon.extract_resource_id(policy, event), nsg_id)
event = {'subject': sr_id}
policy.resource_manager.resource_type.resource_type =\
'Microsoft.Network/networksecuritygroups'
self.assertEqual(AzureModeCommon.extract_resource_id(policy, event), nsg_id)
@staticmethod
def get_sample_event():
return {"subject": "/subscriptions/ea98974b-5d2a-4d98-a78a-382f3715d07e/"
"resourceGroups/test_emptyrg",
"eventType": "Microsoft.Resources.ResourceWriteSuccess",
"eventTime": "2019-07-16T18:30:43.3595255Z",
"id": "619d2674-b396-4356-9619-6c5a52fe4e88",
"data": {
"correlationId": "7dd5a476-e052-40e2-99e4-bb9852dc1f86",
"resourceProvider": "Microsoft.Resources",
"resourceUri": "/subscriptions/ea98974b-5d2a-4d98-a78a-382f3715d07e/"
"resourceGroups/test_emptyrg",
"operationName": "Microsoft.Resources/subscriptions/resourceGroups/write",
"status": "Succeeded"
},
"topic": "/subscriptions/ea98974b-5d2a-4d98-a78a-382f3715d07e"}
| # Copyright 2015-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from azure.mgmt.storage.models import StorageAccount
from azure_common import BaseTest, DEFAULT_SUBSCRIPTION_ID, arm_template, cassette_name
from c7n_azure.constants import FUNCTION_EVENT_TRIGGER_MODE, FUNCTION_TIME_TRIGGER_MODE, \
CONTAINER_EVENT_TRIGGER_MODE, CONTAINER_TIME_TRIGGER_MODE
from c7n_azure.policy import AzureEventGridMode, AzureFunctionMode, AzureModeCommon
from mock import mock, patch, Mock
class AzurePolicyModeTest(BaseTest):
def setUp(self):
super(AzurePolicyModeTest, self).setUp()
def test_azure_function_event_mode_schema_validation(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite'],
'provision-options': {
'servicePlan': {
'name': 'test-cloud-custodian',
'location': 'eastus',
'resourceGroupName': 'test'},
'storageAccount': {
'name': 'testschemaname'
},
'appInsights': {
'name': 'testschemaname'
}
}}
})
self.assertTrue(p)
def test_azure_function_periodic_mode_schema_validation(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_TIME_TRIGGER_MODE,
'schedule': '0 * /5 * * * *',
'provision-options': {
'servicePlan': {
'name': 'test-cloud-custodian',
'location': 'eastus',
'resourceGroupName': 'test'},
'storageAccount': {
'name': 'testschemaname'
},
'appInsights': {
'name': 'testschemaname'
}
}}
})
self.assertTrue(p)
def test_container_event_mode_schema_validation(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-event-mode',
'resource': 'azure.vm',
'mode':
{'type': CONTAINER_EVENT_TRIGGER_MODE,
'events': ['VmWrite']}
})
self.assertTrue(p)
def test_container_periodic_mode_schema_validation(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-periodic-mode',
'resource': 'azure.vm',
'mode':
{'type': CONTAINER_TIME_TRIGGER_MODE,
'schedule': '* /5 * * * *'}
})
self.assertTrue(p)
def test_init_azure_function_mode_with_service_plan(self):
p = self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite'],
'provision-options': {
'servicePlan': {
'name': 'test-cloud-custodian',
'location': 'eastus',
'resourceGroupName': 'test'}
}}
})
function_mode = AzureFunctionMode(p)
params = function_mode.get_function_app_params()
self.assertEqual(function_mode.policy_name, p.data['name'])
self.assertTrue(params.storage_account['name'].startswith('custodian'))
self.assertEqual(params.app_insights['name'], 'test-cloud-custodian')
self.assertEqual(params.service_plan['name'], "test-cloud-custodian")
self.assertEqual(params.service_plan['location'], "eastus")
self.assertEqual(params.app_insights['location'], "eastus")
self.assertEqual(params.storage_account['location'], "eastus")
self.assertEqual(params.storage_account['resource_group_name'], 'test')
self.assertEqual(params.app_insights['resource_group_name'], 'test')
self.assertEqual(params.service_plan['resource_group_name'], "test")
self.assertTrue(params.function_app_name.startswith('test-azure-serverless-mode-'))
def test_init_azure_function_mode_no_service_plan_name(self):
p = self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite']}
})
function_mode = AzureFunctionMode(p)
params = function_mode.get_function_app_params()
self.assertEqual(function_mode.policy_name, p.data['name'])
self.assertEqual(params.service_plan['name'], "cloud-custodian")
self.assertEqual(params.service_plan['location'], "eastus")
self.assertEqual(params.service_plan['resource_group_name'], "cloud-custodian")
self.assertEqual(params.app_insights['name'], 'cloud-custodian')
self.assertEqual(params.app_insights['location'], "eastus")
self.assertEqual(params.app_insights['resource_group_name'], 'cloud-custodian')
self.assertTrue(params.storage_account['name'].startswith('custodian'))
self.assertEqual(params.storage_account['location'], "eastus")
self.assertEqual(params.storage_account['resource_group_name'], 'cloud-custodian')
self.assertTrue(params.function_app_name.startswith('test-azure-serverless-mode-'))
def test_init_azure_function_mode_invalid_policy_name(self):
p = self.load_policy({
'name': 'this-policy-name-is-going-to-be-too-long-since-the-maximum-size-is-60',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite']}
})
function_mode = AzureFunctionMode(p)
with self.assertRaises(ValueError):
function_mode.get_function_app_params()
def test_init_azure_function_mode_invalid_characters_in_policy_name(self):
p = self.load_policy({
'name': 'invalid_policy_name1',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite']}
})
function_mode = AzureFunctionMode(p)
params = function_mode.get_function_app_params()
self.assertRegexpMatches(params.function_app_name, "invalid-policy-name1-[a-zA-Z0-9]+")
def test_init_azure_function_mode_with_resource_ids(self):
ai_id = '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups' \
'/testrg/providers/microsoft.insights/components/testai'
sp_id = '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups' \
'/testrg/providers/Microsoft.Web/serverFarms/testsp'
sa_id = '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups' \
'/testrg/providers/Microsoft.Storage/storageAccounts/testsa'
p = self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite'],
'provision-options': {
'servicePlan': sp_id,
'storageAccount': sa_id,
'appInsights': ai_id
}}
})
function_mode = AzureFunctionMode(p)
params = function_mode.get_function_app_params()
self.assertEqual(function_mode.policy_name, p.data['name'])
self.assertEqual(params.storage_account['id'], sa_id)
self.assertEqual(params.storage_account['name'], 'testsa')
self.assertEqual(params.storage_account['resource_group_name'], 'testrg')
self.assertEqual(params.app_insights['id'], ai_id)
self.assertEqual(params.app_insights['name'], 'testai')
self.assertEqual(params.app_insights['resource_group_name'], 'testrg')
self.assertEqual(params.service_plan['id'], sp_id)
self.assertEqual(params.service_plan['name'], "testsp")
self.assertEqual(params.service_plan['resource_group_name'], "testrg")
self.assertTrue(params.function_app_name.startswith('test-azure-serverless-mode-'))
def test_event_grid_mode_creates_advanced_filtered_subscription(self):
p = self.load_policy({
'name': 'test-azure-event',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite']},
})
with mock.patch('c7n_azure.azure_events.AzureEventSubscription.create') as mock_create:
storage_account = StorageAccount(id=1, location='westus')
event_mode = AzureEventGridMode(p)
event_mode.target_subscription_ids = [DEFAULT_SUBSCRIPTION_ID]
event_mode._create_event_subscription(storage_account, 'some_queue', None)
name, args, kwargs = mock_create.mock_calls[0]
# verify the advanced filter created
event_filter = args[4].advanced_filters[0]
self.assertEqual(event_filter.key, 'Data.OperationName')
self.assertEqual(event_filter.values, ['Microsoft.Compute/virtualMachines/write'])
self.assertEqual(event_filter.operator_type, 'StringIn')
def test_event_grid_mode_creates_advanced_filtered_subscription_with_multiple_events(self):
p = self.load_policy({
'name': 'test-azure-event',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events':
['VmWrite',
{
'resourceProvider': 'Microsoft.Resources/subscriptions/resourceGroups',
'event': 'write'
}]},
})
with mock.patch('c7n_azure.azure_events.AzureEventSubscription.create') as mock_create:
storage_account = StorageAccount(id=1, location='westus')
event_mode = AzureEventGridMode(p)
event_mode.target_subscription_ids = [DEFAULT_SUBSCRIPTION_ID]
event_mode._create_event_subscription(storage_account, 'some_queue', None)
name, args, kwargs = mock_create.mock_calls[0]
# verify the advanced filter created
event_filter = args[4].advanced_filters[0]
self.assertEqual(event_filter.key, 'Data.OperationName')
self.assertEqual(event_filter.values,
['Microsoft.Compute/virtualMachines/write',
'Microsoft.Resources/subscriptions/resourceGroups/write'])
self.assertEqual(event_filter.operator_type, 'StringIn')
def test_extract_properties(self):
resource_id = '/subscriptions/{0}/resourceGroups/rg/providers' \
'/Microsoft.Web/serverFarms/test'.format(DEFAULT_SUBSCRIPTION_ID)
r = AzureFunctionMode.extract_properties({}, '', {})
self.assertEqual(r, {})
r = AzureFunctionMode.extract_properties({}, 'v', {'v': 'default'})
self.assertEqual(r, {'v': 'default'})
r = AzureFunctionMode.extract_properties({'v': resource_id}, 'v', {'v': 'default'})
self.assertEqual(r, {'id': resource_id, 'name': 'test', 'resource_group_name': 'rg'})
r = AzureFunctionMode.extract_properties(
{'v': {'test1': 'value1', 'testCamel': 'valueCamel'}},
'v',
{'test1': None, 'test_camel': None})
self.assertEqual(r, {'test1': 'value1', 'test_camel': 'valueCamel'})
r = AzureFunctionMode.extract_properties(
{'v': {'t1': 'v1', 'nestedValue': {'testCamel': 'valueCamel'}}},
'v',
{'t1': None, 'nested_value': {'test_camel': None}, 't2': 'v2'})
self.assertEqual(r, {'t1': 'v1', 't2': 'v2', 'nested_value': {'test_camel': 'valueCamel'}})
@arm_template('emptyrg.json')
@cassette_name('resourcegroup')
@patch('c7n_azure.resources.resourcegroup.DeleteResourceGroup._process_resource')
def test_empty_group_function_event(self, mock_delete):
p = self.load_policy({
'name': 'test-azure-resource-group',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['ResourceGroupWrite'],
'provision-options': {
'servicePlan': {
'name': 'test-cloud-custodian'
}
}},
'resource': 'azure.resourcegroup',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value': 'test_emptyrg'},
{'type': 'empty-group'}],
'actions': [
{'type': 'delete'}]})
event = AzurePolicyModeTest.get_sample_event()
resources = p.push(event, None)
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['name'], 'test_emptyrg')
self.assertTrue(mock_delete.called)
@arm_template('emptyrg.json')
@cassette_name('resourcegroup')
@patch('c7n_azure.resources.resourcegroup.DeleteResourceGroup._process_resource')
def test_empty_group_container_event(self, mock_delete):
p = self.load_policy({
'name': 'test-azure-resource-group',
'mode':
{'type': CONTAINER_EVENT_TRIGGER_MODE,
'events': ['ResourceGroupWrite']},
'resource': 'azure.resourcegroup',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value': 'test_emptyrg'},
{'type': 'empty-group'}],
'actions': [
{'type': 'delete'}]})
event = AzurePolicyModeTest.get_sample_event()
resources = p.push(event, None)
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['name'], 'test_emptyrg')
self.assertTrue(mock_delete.called)
@arm_template('emptyrg.json')
def test_empty_group_container_scheduled(self):
p = self.load_policy({
'name': 'test-azure-resource-group',
'mode':
{'type': CONTAINER_TIME_TRIGGER_MODE,
'schedule': '* * * * *'},
'resource': 'azure.resourcegroup',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value': 'test_emptyrg'},
{'type': 'empty-group'}]})
resources = p.push(None, None)
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['name'], 'test_emptyrg')
def test_extract_resource_id(self):
rg_id = "/subscriptions/ea98974b-5d2a-4d98-a78a-382f3715d07e/resourceGroups/test_emptyrg"
nsg_id = rg_id + '/providers/Microsoft.Network/networkSecurityGroups/test-nsg'
sr_id = nsg_id + '/securityRules/test-rule'
resource_type = ''
policy = Mock()
policy.resource_manager.resource_type.resource_type = resource_type
event = {'subject': rg_id}
policy.resource_manager.resource_type.resource_type =\
'Microsoft.Resources/subscriptions/resourceGroups'
self.assertEqual(AzureModeCommon.extract_resource_id(policy, event), rg_id)
event = {'subject': nsg_id}
policy.resource_manager.resource_type.resource_type =\
'Microsoft.Resources/subscriptions/resourceGroups'
self.assertEqual(AzureModeCommon.extract_resource_id(policy, event), rg_id)
event = {'subject': nsg_id}
policy.resource_manager.resource_type.resource_type =\
'Microsoft.Network/networksecuritygroups'
self.assertEqual(AzureModeCommon.extract_resource_id(policy, event), nsg_id)
event = {'subject': sr_id}
policy.resource_manager.resource_type.resource_type =\
'Microsoft.Network/networksecuritygroups'
self.assertEqual(AzureModeCommon.extract_resource_id(policy, event), nsg_id)
@staticmethod
def get_sample_event():
return {"subject": "/subscriptions/ea98974b-5d2a-4d98-a78a-382f3715d07e/"
"resourceGroups/test_emptyrg",
"eventType": "Microsoft.Resources.ResourceWriteSuccess",
"eventTime": "2019-07-16T18:30:43.3595255Z",
"id": "619d2674-b396-4356-9619-6c5a52fe4e88",
"data": {
"correlationId": "7dd5a476-e052-40e2-99e4-bb9852dc1f86",
"resourceProvider": "Microsoft.Resources",
"resourceUri": "/subscriptions/ea98974b-5d2a-4d98-a78a-382f3715d07e/"
"resourceGroups/test_emptyrg",
"operationName": "Microsoft.Resources/subscriptions/resourceGroups/write",
"status": "Succeeded"
},
"topic": "/subscriptions/ea98974b-5d2a-4d98-a78a-382f3715d07e"}
| en | 0.858767 | # Copyright 2015-2018 Capital One Services, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # verify the advanced filter created # verify the advanced filter created | 1.57895 | 2 |
openml/extensions/extension_interface.py | Rong-Inspur/openml-python | 0 | 6630774 | # License: BSD 3-Clause
from abc import ABC, abstractmethod
from collections import OrderedDict # noqa: F401
from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING, Union
import numpy as np
import scipy.sparse
# Avoid import cycles: https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles
if TYPE_CHECKING:
from openml.flows import OpenMLFlow
from openml.tasks.task import OpenMLTask
from openml.runs.trace import OpenMLRunTrace, OpenMLTraceIteration # noqa F401
class Extension(ABC):
"""Defines the interface to connect machine learning libraries to OpenML-Python.
See ``openml.extension.sklearn.extension`` for an implementation to bootstrap from.
"""
################################################################################################
# General setup
@classmethod
@abstractmethod
def can_handle_flow(cls, flow: 'OpenMLFlow') -> bool:
"""Check whether a given flow can be handled by this extension.
This is typically done by parsing the ``external_version`` field.
Parameters
----------
flow : OpenMLFlow
Returns
-------
bool
"""
@classmethod
@abstractmethod
def can_handle_model(cls, model: Any) -> bool:
"""Check whether a model flow can be handled by this extension.
This is typically done by checking the type of the model, or the package it belongs to.
Parameters
----------
model : Any
Returns
-------
bool
"""
################################################################################################
# Abstract methods for flow serialization and de-serialization
@abstractmethod
def flow_to_model(self, flow: 'OpenMLFlow',
initialize_with_defaults: bool = False,
strict_version: bool = True) -> Any:
"""Instantiate a model from the flow representation.
Parameters
----------
flow : OpenMLFlow
initialize_with_defaults : bool, optional (default=False)
If this flag is set, the hyperparameter values of flows will be
ignored and a flow with its defaults is returned.
strict_version : bool, default=True
Whether to fail if version requirements are not fulfilled.
Returns
-------
Any
"""
@abstractmethod
def model_to_flow(self, model: Any) -> 'OpenMLFlow':
"""Transform a model to a flow for uploading it to OpenML.
Parameters
----------
model : Any
Returns
-------
OpenMLFlow
"""
@abstractmethod
def get_version_information(self) -> List[str]:
"""List versions of libraries required by the flow.
Returns
-------
List
"""
@abstractmethod
def create_setup_string(self, model: Any) -> str:
"""Create a string which can be used to reinstantiate the given model.
Parameters
----------
model : Any
Returns
-------
str
"""
################################################################################################
# Abstract methods for performing runs with extension modules
@abstractmethod
def is_estimator(self, model: Any) -> bool:
"""Check whether the given model is an estimator for the given extension.
This function is only required for backwards compatibility and will be removed in the
near future.
Parameters
----------
model : Any
Returns
-------
bool
"""
@abstractmethod
def seed_model(self, model: Any, seed: Optional[int]) -> Any:
"""Set the seed of all the unseeded components of a model and return the seeded model.
Required so that all seed information can be uploaded to OpenML for reproducible results.
Parameters
----------
model : Any
The model to be seeded
seed : int
Returns
-------
model
"""
@abstractmethod
def _run_model_on_fold(
self,
model: Any,
task: 'OpenMLTask',
X_train: Union[np.ndarray, scipy.sparse.spmatrix],
rep_no: int,
fold_no: int,
y_train: Optional[np.ndarray] = None,
X_test: Optional[Union[np.ndarray, scipy.sparse.spmatrix]] = None,
) -> Tuple[np.ndarray, np.ndarray, 'OrderedDict[str, float]', Optional['OpenMLRunTrace']]:
"""Run a model on a repeat,fold,subsample triplet of the task and return prediction information.
Returns the data that is necessary to construct the OpenML Run object. Is used by
:func:`openml.runs.run_flow_on_task`.
Parameters
----------
model : Any
The UNTRAINED model to run. The model instance will be copied and not altered.
task : OpenMLTask
The task to run the model on.
X_train : array-like
Training data for the given repetition and fold.
rep_no : int
The repeat of the experiment (0-based; in case of 1 time CV, always 0)
fold_no : int
The fold nr of the experiment (0-based; in case of holdout, always 0)
y_train : Optional[np.ndarray] (default=None)
Target attributes for supervised tasks. In case of classification, these are integer
indices to the potential classes specified by dataset.
X_test : Optional, array-like (default=None)
Test attributes to test for generalization in supervised tasks.
Returns
-------
predictions : np.ndarray
Model predictions.
probabilities : Optional, np.ndarray
Predicted probabilities (only applicable for supervised classification tasks).
user_defined_measures : OrderedDict[str, float]
User defined measures that were generated on this fold
trace : Optional, OpenMLRunTrace
Hyperparameter optimization trace (only applicable for supervised tasks with
hyperparameter optimization).
"""
@abstractmethod
def obtain_parameter_values(
self,
flow: 'OpenMLFlow',
model: Any = None,
) -> List[Dict[str, Any]]:
"""Extracts all parameter settings required for the flow from the model.
If no explicit model is provided, the parameters will be extracted from `flow.model`
instead.
Parameters
----------
flow : OpenMLFlow
OpenMLFlow object (containing flow ids, i.e., it has to be downloaded from the server)
model: Any, optional (default=None)
The model from which to obtain the parameter values. Must match the flow signature.
If None, use the model specified in ``OpenMLFlow.model``.
Returns
-------
list
A list of dicts, where each dict has the following entries:
- ``oml:name`` : str: The OpenML parameter name
- ``oml:value`` : mixed: A representation of the parameter value
- ``oml:component`` : int: flow id to which the parameter belongs
"""
################################################################################################
# Abstract methods for hyperparameter optimization
@abstractmethod
def instantiate_model_from_hpo_class(
self,
model: Any,
trace_iteration: 'OpenMLTraceIteration',
) -> Any:
"""Instantiate a base model which can be searched over by the hyperparameter optimization
model.
Parameters
----------
model : Any
A hyperparameter optimization model which defines the model to be instantiated.
trace_iteration : OpenMLTraceIteration
Describing the hyperparameter settings to instantiate.
Returns
-------
Any
"""
# TODO a trace belongs to a run and therefore a flow -> simplify this part of the interface!
| # License: BSD 3-Clause
from abc import ABC, abstractmethod
from collections import OrderedDict # noqa: F401
from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING, Union
import numpy as np
import scipy.sparse
# Avoid import cycles: https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles
if TYPE_CHECKING:
from openml.flows import OpenMLFlow
from openml.tasks.task import OpenMLTask
from openml.runs.trace import OpenMLRunTrace, OpenMLTraceIteration # noqa F401
class Extension(ABC):
"""Defines the interface to connect machine learning libraries to OpenML-Python.
See ``openml.extension.sklearn.extension`` for an implementation to bootstrap from.
"""
################################################################################################
# General setup
@classmethod
@abstractmethod
def can_handle_flow(cls, flow: 'OpenMLFlow') -> bool:
"""Check whether a given flow can be handled by this extension.
This is typically done by parsing the ``external_version`` field.
Parameters
----------
flow : OpenMLFlow
Returns
-------
bool
"""
@classmethod
@abstractmethod
def can_handle_model(cls, model: Any) -> bool:
"""Check whether a model flow can be handled by this extension.
This is typically done by checking the type of the model, or the package it belongs to.
Parameters
----------
model : Any
Returns
-------
bool
"""
################################################################################################
# Abstract methods for flow serialization and de-serialization
@abstractmethod
def flow_to_model(self, flow: 'OpenMLFlow',
initialize_with_defaults: bool = False,
strict_version: bool = True) -> Any:
"""Instantiate a model from the flow representation.
Parameters
----------
flow : OpenMLFlow
initialize_with_defaults : bool, optional (default=False)
If this flag is set, the hyperparameter values of flows will be
ignored and a flow with its defaults is returned.
strict_version : bool, default=True
Whether to fail if version requirements are not fulfilled.
Returns
-------
Any
"""
@abstractmethod
def model_to_flow(self, model: Any) -> 'OpenMLFlow':
"""Transform a model to a flow for uploading it to OpenML.
Parameters
----------
model : Any
Returns
-------
OpenMLFlow
"""
@abstractmethod
def get_version_information(self) -> List[str]:
"""List versions of libraries required by the flow.
Returns
-------
List
"""
@abstractmethod
def create_setup_string(self, model: Any) -> str:
"""Create a string which can be used to reinstantiate the given model.
Parameters
----------
model : Any
Returns
-------
str
"""
################################################################################################
# Abstract methods for performing runs with extension modules
@abstractmethod
def is_estimator(self, model: Any) -> bool:
"""Check whether the given model is an estimator for the given extension.
This function is only required for backwards compatibility and will be removed in the
near future.
Parameters
----------
model : Any
Returns
-------
bool
"""
@abstractmethod
def seed_model(self, model: Any, seed: Optional[int]) -> Any:
"""Set the seed of all the unseeded components of a model and return the seeded model.
Required so that all seed information can be uploaded to OpenML for reproducible results.
Parameters
----------
model : Any
The model to be seeded
seed : int
Returns
-------
model
"""
@abstractmethod
def _run_model_on_fold(
self,
model: Any,
task: 'OpenMLTask',
X_train: Union[np.ndarray, scipy.sparse.spmatrix],
rep_no: int,
fold_no: int,
y_train: Optional[np.ndarray] = None,
X_test: Optional[Union[np.ndarray, scipy.sparse.spmatrix]] = None,
) -> Tuple[np.ndarray, np.ndarray, 'OrderedDict[str, float]', Optional['OpenMLRunTrace']]:
"""Run a model on a repeat,fold,subsample triplet of the task and return prediction information.
Returns the data that is necessary to construct the OpenML Run object. Is used by
:func:`openml.runs.run_flow_on_task`.
Parameters
----------
model : Any
The UNTRAINED model to run. The model instance will be copied and not altered.
task : OpenMLTask
The task to run the model on.
X_train : array-like
Training data for the given repetition and fold.
rep_no : int
The repeat of the experiment (0-based; in case of 1 time CV, always 0)
fold_no : int
The fold nr of the experiment (0-based; in case of holdout, always 0)
y_train : Optional[np.ndarray] (default=None)
Target attributes for supervised tasks. In case of classification, these are integer
indices to the potential classes specified by dataset.
X_test : Optional, array-like (default=None)
Test attributes to test for generalization in supervised tasks.
Returns
-------
predictions : np.ndarray
Model predictions.
probabilities : Optional, np.ndarray
Predicted probabilities (only applicable for supervised classification tasks).
user_defined_measures : OrderedDict[str, float]
User defined measures that were generated on this fold
trace : Optional, OpenMLRunTrace
Hyperparameter optimization trace (only applicable for supervised tasks with
hyperparameter optimization).
"""
@abstractmethod
def obtain_parameter_values(
self,
flow: 'OpenMLFlow',
model: Any = None,
) -> List[Dict[str, Any]]:
"""Extracts all parameter settings required for the flow from the model.
If no explicit model is provided, the parameters will be extracted from `flow.model`
instead.
Parameters
----------
flow : OpenMLFlow
OpenMLFlow object (containing flow ids, i.e., it has to be downloaded from the server)
model: Any, optional (default=None)
The model from which to obtain the parameter values. Must match the flow signature.
If None, use the model specified in ``OpenMLFlow.model``.
Returns
-------
list
A list of dicts, where each dict has the following entries:
- ``oml:name`` : str: The OpenML parameter name
- ``oml:value`` : mixed: A representation of the parameter value
- ``oml:component`` : int: flow id to which the parameter belongs
"""
################################################################################################
# Abstract methods for hyperparameter optimization
@abstractmethod
def instantiate_model_from_hpo_class(
self,
model: Any,
trace_iteration: 'OpenMLTraceIteration',
) -> Any:
"""Instantiate a base model which can be searched over by the hyperparameter optimization
model.
Parameters
----------
model : Any
A hyperparameter optimization model which defines the model to be instantiated.
trace_iteration : OpenMLTraceIteration
Describing the hyperparameter settings to instantiate.
Returns
-------
Any
"""
# TODO a trace belongs to a run and therefore a flow -> simplify this part of the interface!
| en | 0.616455 | # License: BSD 3-Clause # noqa: F401 # Avoid import cycles: https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles # noqa F401 Defines the interface to connect machine learning libraries to OpenML-Python. See ``openml.extension.sklearn.extension`` for an implementation to bootstrap from. ################################################################################################ # General setup Check whether a given flow can be handled by this extension. This is typically done by parsing the ``external_version`` field. Parameters ---------- flow : OpenMLFlow Returns ------- bool Check whether a model flow can be handled by this extension. This is typically done by checking the type of the model, or the package it belongs to. Parameters ---------- model : Any Returns ------- bool ################################################################################################ # Abstract methods for flow serialization and de-serialization Instantiate a model from the flow representation. Parameters ---------- flow : OpenMLFlow initialize_with_defaults : bool, optional (default=False) If this flag is set, the hyperparameter values of flows will be ignored and a flow with its defaults is returned. strict_version : bool, default=True Whether to fail if version requirements are not fulfilled. Returns ------- Any Transform a model to a flow for uploading it to OpenML. Parameters ---------- model : Any Returns ------- OpenMLFlow List versions of libraries required by the flow. Returns ------- List Create a string which can be used to reinstantiate the given model. Parameters ---------- model : Any Returns ------- str ################################################################################################ # Abstract methods for performing runs with extension modules Check whether the given model is an estimator for the given extension. This function is only required for backwards compatibility and will be removed in the near future. Parameters ---------- model : Any Returns ------- bool Set the seed of all the unseeded components of a model and return the seeded model. Required so that all seed information can be uploaded to OpenML for reproducible results. Parameters ---------- model : Any The model to be seeded seed : int Returns ------- model Run a model on a repeat,fold,subsample triplet of the task and return prediction information. Returns the data that is necessary to construct the OpenML Run object. Is used by :func:`openml.runs.run_flow_on_task`. Parameters ---------- model : Any The UNTRAINED model to run. The model instance will be copied and not altered. task : OpenMLTask The task to run the model on. X_train : array-like Training data for the given repetition and fold. rep_no : int The repeat of the experiment (0-based; in case of 1 time CV, always 0) fold_no : int The fold nr of the experiment (0-based; in case of holdout, always 0) y_train : Optional[np.ndarray] (default=None) Target attributes for supervised tasks. In case of classification, these are integer indices to the potential classes specified by dataset. X_test : Optional, array-like (default=None) Test attributes to test for generalization in supervised tasks. Returns ------- predictions : np.ndarray Model predictions. probabilities : Optional, np.ndarray Predicted probabilities (only applicable for supervised classification tasks). user_defined_measures : OrderedDict[str, float] User defined measures that were generated on this fold trace : Optional, OpenMLRunTrace Hyperparameter optimization trace (only applicable for supervised tasks with hyperparameter optimization). Extracts all parameter settings required for the flow from the model. If no explicit model is provided, the parameters will be extracted from `flow.model` instead. Parameters ---------- flow : OpenMLFlow OpenMLFlow object (containing flow ids, i.e., it has to be downloaded from the server) model: Any, optional (default=None) The model from which to obtain the parameter values. Must match the flow signature. If None, use the model specified in ``OpenMLFlow.model``. Returns ------- list A list of dicts, where each dict has the following entries: - ``oml:name`` : str: The OpenML parameter name - ``oml:value`` : mixed: A representation of the parameter value - ``oml:component`` : int: flow id to which the parameter belongs ################################################################################################ # Abstract methods for hyperparameter optimization Instantiate a base model which can be searched over by the hyperparameter optimization model. Parameters ---------- model : Any A hyperparameter optimization model which defines the model to be instantiated. trace_iteration : OpenMLTraceIteration Describing the hyperparameter settings to instantiate. Returns ------- Any # TODO a trace belongs to a run and therefore a flow -> simplify this part of the interface! | 2.326919 | 2 |
1-mouth01/day07/exe03.py | gary-gggggg/gary | 4 | 6630775 | """乘法表"""
list1=[1,2,3,4,5,6,7,8,9]
list2=[1,2,3,4,5,6,7,8,9]
for l1 in list1:
for l2 in list2:
print(l1*l2, end="\t")
print() | """乘法表"""
list1=[1,2,3,4,5,6,7,8,9]
list2=[1,2,3,4,5,6,7,8,9]
for l1 in list1:
for l2 in list2:
print(l1*l2, end="\t")
print() | none | 1 | 3.871634 | 4 |
|
pyxcli/mirroring/tests/test_cg_recovery_manager.py | wegotthekeys/pyxcli | 19 | 6630776 | <reponame>wegotthekeys/pyxcli<gh_stars>10-100
##############################################################################
# Copyright 2016 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from pyxcli.mirroring.tests.test_base_recovery_manager \
import TestBaseRecoveryManager
from pyxcli.mirroring.cg_recovery_manager \
import CGRecoveryManager
class TestCGRecoveryManager(TestBaseRecoveryManager):
__test__ = True
def setUpRecoveryManager(self):
self.recovery_manager = CGRecoveryManager(True, self.xcli_client_mock)
def set_mirror_list(self, cvolumes, ccgs):
self.xcli_client_mock.cmd.mirror_list.return_value = ccgs
def set_main_mirror(self, vol1, cg1):
self.xcli_mirror = vol1
self.master = 'cg1'
self.slave = 'cg2'
def test_create_mirror(self):
mirror = self.recovery_manager.create_mirror('cg1', 'target_xiv',
'sync', 'cg2', rpo=30,
remote_rpo=30,
schedule='never',
activate_mirror='no')
self.assertEqual(mirror.local_peer_name, 'cg1')
with self.assertRaises(TypeError):
self.recovery_manager.create_mirror('cg1', 'target_xiv', 'sync',
'cg2', rpo=30,
create_slave='yes',
remote_rpo=30,
schedule='never',
activate_mirror='no')
| ##############################################################################
# Copyright 2016 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from pyxcli.mirroring.tests.test_base_recovery_manager \
import TestBaseRecoveryManager
from pyxcli.mirroring.cg_recovery_manager \
import CGRecoveryManager
class TestCGRecoveryManager(TestBaseRecoveryManager):
__test__ = True
def setUpRecoveryManager(self):
self.recovery_manager = CGRecoveryManager(True, self.xcli_client_mock)
def set_mirror_list(self, cvolumes, ccgs):
self.xcli_client_mock.cmd.mirror_list.return_value = ccgs
def set_main_mirror(self, vol1, cg1):
self.xcli_mirror = vol1
self.master = 'cg1'
self.slave = 'cg2'
def test_create_mirror(self):
mirror = self.recovery_manager.create_mirror('cg1', 'target_xiv',
'sync', 'cg2', rpo=30,
remote_rpo=30,
schedule='never',
activate_mirror='no')
self.assertEqual(mirror.local_peer_name, 'cg1')
with self.assertRaises(TypeError):
self.recovery_manager.create_mirror('cg1', 'target_xiv', 'sync',
'cg2', rpo=30,
create_slave='yes',
remote_rpo=30,
schedule='never',
activate_mirror='no') | en | 0.599643 | ############################################################################## # Copyright 2016 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## | 1.747457 | 2 |
tests/test_basic.py | shawwn/defvar | 0 | 6630777 | import unittest
import defvar
class TestCase(unittest.TestCase):
def test_basic(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
| import unittest
import defvar
class TestCase(unittest.TestCase):
def test_basic(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
| none | 1 | 2.362869 | 2 |
|
arc/materials.py | m-schleier/ARC-Alkali-Rydberg-Calculator | 53 | 6630778 | <reponame>m-schleier/ARC-Alkali-Rydberg-Calculator
import numpy as np
import os
from .alkali_atom_functions import DPATH
class OpticalMaterial(object):
"""
Abstract class implementing calculation of basic properties for optical
materials.
"""
#: Human-friendly name of material
name = ""
#: List of .csv files listing refractive index measurements
#: first column in these files is wavelength (in mu m), the second
#: refractive index
sources = []
# This array is loaded automatically based on sources list
sourcesN = []
#: Any notes about measured values
sourcesComment = []
#: Array of max and minimal wavelegth pairs [lambdaMin, lambdaMax]
#: for each of the sources. Automatically loaded from sources list
sourcesRange = []
def __init__(self):
for s in self.sources:
self.sourcesN.append(
np.loadtxt(os.path.join(DPATH, "refractive_index_data", s),
skiprows=1, delimiter=',', unpack=True)
)
self.sourcesRange.append([self.sourcesN[-1][0].min(),
self.sourcesN[-1][0].max()])
def getN(self, *args, **kwargs):
"""
Refractive index of material
"""
return "To-do: refractive index"
def getRho(self):
return "To-do: density"
def getElectricConductance(self):
return "To-do: electric condctance"
def getThermalConductance(self):
return "To-do: thermal conductance"
class Air(OpticalMaterial):
"""
Air as an optical material at normal conditions
"""
name = "Air (dry, normal conditions)"
sources = ["Mathar-1.3.csv",
"Mathar-2.8.csv",
"Mathar-4.35.csv",
"Mathar-7.5.csv"]
sourcesComment = ['vacuum', 'vacuum', 'vacuum', 'vacuum']
def getN(self, vacuumWavelength=None, *args, **kwargs):
"""
Assumes temperature: 15 °C, pressure: 101325 Pa
"""
if vacuumWavelength is not None:
x = vacuumWavelength
else:
raise ValueError("wavelength not specified for refractive index")
if (x > 0.23) and (x < 1.690):
return 1 + 0.05792105 / (238.0185 - x**(-2)) \
+ 0.00167917 / (57.362 - x**(-2))
else:
for i, rangeN in enumerate(self.sourcesRange):
if (x > rangeN[0]) and (x < rangeN[1]):
return np.interp(x, self.sourcesN[i][0],
self.sourcesN[i][1])
raise ValueError("No refrative index data available for requested"
" wavelength %.3f mum" % x)
class Sapphire(OpticalMaterial):
"""
Sapphire as optical material.
"""
name = "Sapphire"
# data from: https://refractiveindex.info
sources = ["Querry-o.csv", "Querry-e.csv"]
sourcesN = []
sourcesComment = ["o", "e"]
def getN(self, vacuumWavelength=None,
airWavelength=None,
axis='ordinary', *args, **kwargs):
"""
"""
if vacuumWavelength is not None:
air = Air()
x = vacuumWavelength / air.getN(vacuumWavelength=vacuumWavelength)
elif airWavelength is not None:
x = airWavelength
else:
raise ValueError("wavelength not specified for refractive index")
if (axis == 'ordinary') or (axis == 'o'):
# electric field polarisation perpendicular to cristal axis
if (x > 0.2) and (x < 5.0):
return (1 + 1.4313493 / (1 - (0.0726631 / x)**2)
+ 0.65054713 / (1 - (0.1193242 / x)**2)
+ 5.3414021 / (1 - (18.028251 / x)**2))**.5
else:
for i, rangeN in enumerate(self.sourcesRange):
if (x > rangeN[0]) and (x < rangeN[1]) \
and (self.sourcesComment[i] == "o"):
return np.interp(x, self.sourcesN[i][0],
self.sourcesN[i][1])
raise ValueError("No refrative index data available for "
"requested wavelength %.3f mum" % x)
elif (axis == 'extraordinary') or (axis == 'e'):
# electric field polarisation along cristal axis
if (x > 0.2) or (x < 5.0):
return (1 + 1.5039759 / (1 - (0.0740288 / x)**2)
+ 0.55069141 / (1 - (0.1216529 / x)**2)
+ 6.5927379 / (1 - (20.072248 / x)**2))**.5
else:
for i, rangeN in enumerate(self.sourcesRange):
if (x > rangeN[0]) and (x < rangeN[1]) \
and (self.sourcesComment[i] == "e"):
return np.interp(x, self.sourcesN[i][0],
self.sourcesN[i][1])
raise ValueError("No refrative index data available for "
"requested wavelength %.3f mum" % x)
else:
raise ValueError("Uknown axis")
| import numpy as np
import os
from .alkali_atom_functions import DPATH
class OpticalMaterial(object):
"""
Abstract class implementing calculation of basic properties for optical
materials.
"""
#: Human-friendly name of material
name = ""
#: List of .csv files listing refractive index measurements
#: first column in these files is wavelength (in mu m), the second
#: refractive index
sources = []
# This array is loaded automatically based on sources list
sourcesN = []
#: Any notes about measured values
sourcesComment = []
#: Array of max and minimal wavelegth pairs [lambdaMin, lambdaMax]
#: for each of the sources. Automatically loaded from sources list
sourcesRange = []
def __init__(self):
for s in self.sources:
self.sourcesN.append(
np.loadtxt(os.path.join(DPATH, "refractive_index_data", s),
skiprows=1, delimiter=',', unpack=True)
)
self.sourcesRange.append([self.sourcesN[-1][0].min(),
self.sourcesN[-1][0].max()])
def getN(self, *args, **kwargs):
"""
Refractive index of material
"""
return "To-do: refractive index"
def getRho(self):
return "To-do: density"
def getElectricConductance(self):
return "To-do: electric condctance"
def getThermalConductance(self):
return "To-do: thermal conductance"
class Air(OpticalMaterial):
"""
Air as an optical material at normal conditions
"""
name = "Air (dry, normal conditions)"
sources = ["Mathar-1.3.csv",
"Mathar-2.8.csv",
"Mathar-4.35.csv",
"Mathar-7.5.csv"]
sourcesComment = ['vacuum', 'vacuum', 'vacuum', 'vacuum']
def getN(self, vacuumWavelength=None, *args, **kwargs):
"""
Assumes temperature: 15 °C, pressure: 101325 Pa
"""
if vacuumWavelength is not None:
x = vacuumWavelength
else:
raise ValueError("wavelength not specified for refractive index")
if (x > 0.23) and (x < 1.690):
return 1 + 0.05792105 / (238.0185 - x**(-2)) \
+ 0.00167917 / (57.362 - x**(-2))
else:
for i, rangeN in enumerate(self.sourcesRange):
if (x > rangeN[0]) and (x < rangeN[1]):
return np.interp(x, self.sourcesN[i][0],
self.sourcesN[i][1])
raise ValueError("No refrative index data available for requested"
" wavelength %.3f mum" % x)
class Sapphire(OpticalMaterial):
"""
Sapphire as optical material.
"""
name = "Sapphire"
# data from: https://refractiveindex.info
sources = ["Querry-o.csv", "Querry-e.csv"]
sourcesN = []
sourcesComment = ["o", "e"]
def getN(self, vacuumWavelength=None,
airWavelength=None,
axis='ordinary', *args, **kwargs):
"""
"""
if vacuumWavelength is not None:
air = Air()
x = vacuumWavelength / air.getN(vacuumWavelength=vacuumWavelength)
elif airWavelength is not None:
x = airWavelength
else:
raise ValueError("wavelength not specified for refractive index")
if (axis == 'ordinary') or (axis == 'o'):
# electric field polarisation perpendicular to cristal axis
if (x > 0.2) and (x < 5.0):
return (1 + 1.4313493 / (1 - (0.0726631 / x)**2)
+ 0.65054713 / (1 - (0.1193242 / x)**2)
+ 5.3414021 / (1 - (18.028251 / x)**2))**.5
else:
for i, rangeN in enumerate(self.sourcesRange):
if (x > rangeN[0]) and (x < rangeN[1]) \
and (self.sourcesComment[i] == "o"):
return np.interp(x, self.sourcesN[i][0],
self.sourcesN[i][1])
raise ValueError("No refrative index data available for "
"requested wavelength %.3f mum" % x)
elif (axis == 'extraordinary') or (axis == 'e'):
# electric field polarisation along cristal axis
if (x > 0.2) or (x < 5.0):
return (1 + 1.5039759 / (1 - (0.0740288 / x)**2)
+ 0.55069141 / (1 - (0.1216529 / x)**2)
+ 6.5927379 / (1 - (20.072248 / x)**2))**.5
else:
for i, rangeN in enumerate(self.sourcesRange):
if (x > rangeN[0]) and (x < rangeN[1]) \
and (self.sourcesComment[i] == "e"):
return np.interp(x, self.sourcesN[i][0],
self.sourcesN[i][1])
raise ValueError("No refrative index data available for "
"requested wavelength %.3f mum" % x)
else:
raise ValueError("Uknown axis") | en | 0.787387 | Abstract class implementing calculation of basic properties for optical materials. #: Human-friendly name of material #: List of .csv files listing refractive index measurements #: first column in these files is wavelength (in mu m), the second #: refractive index # This array is loaded automatically based on sources list #: Any notes about measured values #: Array of max and minimal wavelegth pairs [lambdaMin, lambdaMax] #: for each of the sources. Automatically loaded from sources list Refractive index of material Air as an optical material at normal conditions Assumes temperature: 15 °C, pressure: 101325 Pa Sapphire as optical material. # data from: https://refractiveindex.info # electric field polarisation perpendicular to cristal axis # electric field polarisation along cristal axis | 3.117328 | 3 |
TestInterface/uploadFile.py | luoyefeiwu/learn_python | 0 | 6630779 | # coding=utf-8
# 保存
import requests
import json
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
'Content-Type': 'multipart/form-data'
}
data = [
{
"projectDeclare": {
"id": "123"
}
}
]
cookies = {}
with open("cookies.txt", 'r') as file:
for line in file.read().split(';'):
name, value = line.strip().split('=', 1)
cookies[name] = value
files = {'file': open('导入.txt', 'rb')}
requests = requests.post('http://localhost:8081/ydyl/upload/uploadFile', files=files,
cookies=cookies);
print(requests.text);
| # coding=utf-8
# 保存
import requests
import json
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
'Content-Type': 'multipart/form-data'
}
data = [
{
"projectDeclare": {
"id": "123"
}
}
]
cookies = {}
with open("cookies.txt", 'r') as file:
for line in file.read().split(';'):
name, value = line.strip().split('=', 1)
cookies[name] = value
files = {'file': open('导入.txt', 'rb')}
requests = requests.post('http://localhost:8081/ydyl/upload/uploadFile', files=files,
cookies=cookies);
print(requests.text);
| zh | 0.313082 | # coding=utf-8 # 保存 | 2.349675 | 2 |
filter_plugins/clean_config.py | aegiacometti/netconf-backup-gogs | 3 | 6630780 | <reponame>aegiacometti/netconf-backup-gogs
#
# Simple list append filter
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from jinja2 import TemplateError
import re
def clean_config(l, *argv):
for element in argv:
if type(element) is list:
for value in element:
l = clean_config(l, value)
else:
regex = re.sub('\.\.\.', '.*?\\n', element)
l = re.sub(regex, '', l)
return l
class FilterModule(object):
def filters(self):
return {
'clean_config': clean_config
}
| #
# Simple list append filter
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from jinja2 import TemplateError
import re
def clean_config(l, *argv):
for element in argv:
if type(element) is list:
for value in element:
l = clean_config(l, value)
else:
regex = re.sub('\.\.\.', '.*?\\n', element)
l = re.sub(regex, '', l)
return l
class FilterModule(object):
def filters(self):
return {
'clean_config': clean_config
} | en | 0.581569 | # # Simple list append filter # | 2.649754 | 3 |
examples/pandas_example.py | mdkhaledben/n-beats | 1 | 6630781 | <reponame>mdkhaledben/n-beats<gh_stars>1-10
import numpy as np
import pandas as pd
from nbeats_keras.model import NBeatsNet as NBeatsKeras
# This is an example linked to this issue: https://github.com/philipperemy/n-beats/issues/60.
# Here the target variable is no longer part of the inputs.
# NOTE: it is also possible to solve this problem with exogenous variables.
# See example/exo_example.py.
def main():
num_rows = 100
num_columns = 4
timesteps = 20
d = pd.DataFrame(data=np.random.uniform(size=(num_rows, num_columns)), columns=['A', 'B', 'C', 'D'])
print(d.head())
# Use <A, B, C> to predict D.
predictors = d[['A', 'B', 'C']]
targets = d['D']
# backcast length is timesteps.
# forecast length is 1.
predictors = np.array([predictors[i:i + timesteps] for i in range(num_rows - timesteps)])
targets = np.array([targets[i:i + 1] for i in range(num_rows - timesteps)])[:, :, None]
# noinspection PyArgumentEqualDefault
model_keras = NBeatsKeras(
input_dim=num_columns - 1,
output_dim=1,
forecast_length=1,
nb_blocks_per_stack=1,
backcast_length=timesteps
)
# plot_model(model_keras, 'pandas.png', show_shapes=True, show_dtype=True)
model_keras.compile(loss='mae', optimizer='adam')
model_keras.fit(predictors, targets, validation_split=0.2)
num_predictions = len(predictors)
predictions = model_keras.predict(predictors)
np.testing.assert_equal(predictions.shape, (num_predictions, 1, 1))
d['P'] = [np.nan] * (num_rows - num_predictions) + list(model_keras.predict(predictors).squeeze(axis=(1, 2)))
print(d)
if __name__ == '__main__':
main()
| import numpy as np
import pandas as pd
from nbeats_keras.model import NBeatsNet as NBeatsKeras
# This is an example linked to this issue: https://github.com/philipperemy/n-beats/issues/60.
# Here the target variable is no longer part of the inputs.
# NOTE: it is also possible to solve this problem with exogenous variables.
# See example/exo_example.py.
def main():
num_rows = 100
num_columns = 4
timesteps = 20
d = pd.DataFrame(data=np.random.uniform(size=(num_rows, num_columns)), columns=['A', 'B', 'C', 'D'])
print(d.head())
# Use <A, B, C> to predict D.
predictors = d[['A', 'B', 'C']]
targets = d['D']
# backcast length is timesteps.
# forecast length is 1.
predictors = np.array([predictors[i:i + timesteps] for i in range(num_rows - timesteps)])
targets = np.array([targets[i:i + 1] for i in range(num_rows - timesteps)])[:, :, None]
# noinspection PyArgumentEqualDefault
model_keras = NBeatsKeras(
input_dim=num_columns - 1,
output_dim=1,
forecast_length=1,
nb_blocks_per_stack=1,
backcast_length=timesteps
)
# plot_model(model_keras, 'pandas.png', show_shapes=True, show_dtype=True)
model_keras.compile(loss='mae', optimizer='adam')
model_keras.fit(predictors, targets, validation_split=0.2)
num_predictions = len(predictors)
predictions = model_keras.predict(predictors)
np.testing.assert_equal(predictions.shape, (num_predictions, 1, 1))
d['P'] = [np.nan] * (num_rows - num_predictions) + list(model_keras.predict(predictors).squeeze(axis=(1, 2)))
print(d)
if __name__ == '__main__':
main() | en | 0.681738 | # This is an example linked to this issue: https://github.com/philipperemy/n-beats/issues/60. # Here the target variable is no longer part of the inputs. # NOTE: it is also possible to solve this problem with exogenous variables. # See example/exo_example.py. # Use <A, B, C> to predict D. # backcast length is timesteps. # forecast length is 1. # noinspection PyArgumentEqualDefault # plot_model(model_keras, 'pandas.png', show_shapes=True, show_dtype=True) | 2.990135 | 3 |
Choice_PriceBot/choice_price_commands.py | Samuellyworld/CHOICE_TinyMan_Wrapper | 20 | 6630782 | <filename>Choice_PriceBot/choice_price_commands.py
# Imports
import json, time
from datetime import datetime
from algosdk.v2client import algod
from tinyman.v1.client import TinymanClient
import discord
from discord.ext import commands
# Get bot tokens
f = open('./keys.json',)
keys = json.load(f)
f.close()
# Create a discord client
discord_commander = commands.Bot(command_prefix="!")
# Get Algo Client / Using purestake; supplement your own API key for the algod_token
algod_address = 'https://mainnet-algorand.api.purestake.io/ps2'
algod_token = keys['algod_token']
headers = {'X-API-Key': algod_token}
algod_client = algod.AlgodClient(algod_token, algod_address, headers)
# Get TMan Client / 350338509 is the app ID for all TinymanClient implementations
# Get Assets and Pools - ALGO, CHOICE, USDC
tinyman = TinymanClient(algod_client, 350338509)
ALGO = tinyman.fetch_asset(0)
CHOICE = tinyman.fetch_asset(297995609)
LION = tinyman.fetch_asset(372666897)
USDC = tinyman.fetch_asset(31566704)
ALGO_USDC = tinyman.fetch_pool(ALGO, USDC)
CHOICE_ALGO = tinyman.fetch_pool(CHOICE, ALGO)
LION_USDC = tinyman.fetch_pool(LION, USDC)
# Retrieve price of choice
def get_prices():
quote_ALGO_USDC = ALGO_USDC.fetch_fixed_input_swap_quote(ALGO(1_000_000), slippage=0)
algo_price = float(quote_ALGO_USDC.amount_out_with_slippage.amount) / float(10**quote_ALGO_USDC.amount_out_with_slippage.asset.decimals)
algo_price = round(algo_price, 4)
quote_CHOICE_ALGO = CHOICE_ALGO.fetch_fixed_input_swap_quote(CHOICE(100), slippage=0)
choice_out = float(quote_CHOICE_ALGO.amount_out_with_slippage.amount) / float(10**quote_CHOICE_ALGO.amount_out_with_slippage.asset.decimals)
choice_price = round(algo_price * choice_out, 4)
quote_LION_USDC = LION_USDC.fetch_fixed_input_swap_quote(LION(10_000), slippage=0)
lion_price = float(quote_LION_USDC.amount_out_with_slippage.amount) / float(10**quote_LION_USDC.amount_out_with_slippage.asset.decimals)
lion_price = round(lion_price, 4)
return algo_price, choice_price, lion_price
# Command to show the price immediately
@discord_commander.command()
async def algo_price(ctx):
sender = str(ctx.author).split("#")[0]
algo_price, choice_price, lion_price = get_prices()
await ctx.send(
f'Hello There, {sender}\n' +
f'The current price of Algo is **${algo_price}** :rocket:'
)
@discord_commander.command()
async def choice_price(ctx):
sender = str(ctx.author).split("#")[0]
algo_price, choice_price, lion_price = get_prices()
await ctx.send(
f'Hello There, {sender}\n' +
f'The current price of Choice Coin is **${choice_price}** :rocket:'
)
@discord_commander.command()
async def lion_price(ctx):
sender = str(ctx.author).split("#")[0]
algo_price, choice_price, lion_price = get_prices()
await ctx.send(
f'Hello There, {sender}\n' +
f'The current price of Lion Coin is **${lion_price}** :rocket:'
)
@discord_commander.command()
async def tell_sawamy_he_sucks(ctx):
await ctx.send(
f'SAWAMY, you suck! :rocket:'
)
# Run the client and commander
discord_commander.run(keys['bot_token'])
| <filename>Choice_PriceBot/choice_price_commands.py
# Imports
import json, time
from datetime import datetime
from algosdk.v2client import algod
from tinyman.v1.client import TinymanClient
import discord
from discord.ext import commands
# Get bot tokens
f = open('./keys.json',)
keys = json.load(f)
f.close()
# Create a discord client
discord_commander = commands.Bot(command_prefix="!")
# Get Algo Client / Using purestake; supplement your own API key for the algod_token
algod_address = 'https://mainnet-algorand.api.purestake.io/ps2'
algod_token = keys['algod_token']
headers = {'X-API-Key': algod_token}
algod_client = algod.AlgodClient(algod_token, algod_address, headers)
# Get TMan Client / 350338509 is the app ID for all TinymanClient implementations
# Get Assets and Pools - ALGO, CHOICE, USDC
tinyman = TinymanClient(algod_client, 350338509)
ALGO = tinyman.fetch_asset(0)
CHOICE = tinyman.fetch_asset(297995609)
LION = tinyman.fetch_asset(372666897)
USDC = tinyman.fetch_asset(31566704)
ALGO_USDC = tinyman.fetch_pool(ALGO, USDC)
CHOICE_ALGO = tinyman.fetch_pool(CHOICE, ALGO)
LION_USDC = tinyman.fetch_pool(LION, USDC)
# Retrieve price of choice
def get_prices():
quote_ALGO_USDC = ALGO_USDC.fetch_fixed_input_swap_quote(ALGO(1_000_000), slippage=0)
algo_price = float(quote_ALGO_USDC.amount_out_with_slippage.amount) / float(10**quote_ALGO_USDC.amount_out_with_slippage.asset.decimals)
algo_price = round(algo_price, 4)
quote_CHOICE_ALGO = CHOICE_ALGO.fetch_fixed_input_swap_quote(CHOICE(100), slippage=0)
choice_out = float(quote_CHOICE_ALGO.amount_out_with_slippage.amount) / float(10**quote_CHOICE_ALGO.amount_out_with_slippage.asset.decimals)
choice_price = round(algo_price * choice_out, 4)
quote_LION_USDC = LION_USDC.fetch_fixed_input_swap_quote(LION(10_000), slippage=0)
lion_price = float(quote_LION_USDC.amount_out_with_slippage.amount) / float(10**quote_LION_USDC.amount_out_with_slippage.asset.decimals)
lion_price = round(lion_price, 4)
return algo_price, choice_price, lion_price
# Command to show the price immediately
@discord_commander.command()
async def algo_price(ctx):
sender = str(ctx.author).split("#")[0]
algo_price, choice_price, lion_price = get_prices()
await ctx.send(
f'Hello There, {sender}\n' +
f'The current price of Algo is **${algo_price}** :rocket:'
)
@discord_commander.command()
async def choice_price(ctx):
sender = str(ctx.author).split("#")[0]
algo_price, choice_price, lion_price = get_prices()
await ctx.send(
f'Hello There, {sender}\n' +
f'The current price of Choice Coin is **${choice_price}** :rocket:'
)
@discord_commander.command()
async def lion_price(ctx):
sender = str(ctx.author).split("#")[0]
algo_price, choice_price, lion_price = get_prices()
await ctx.send(
f'Hello There, {sender}\n' +
f'The current price of Lion Coin is **${lion_price}** :rocket:'
)
@discord_commander.command()
async def tell_sawamy_he_sucks(ctx):
await ctx.send(
f'SAWAMY, you suck! :rocket:'
)
# Run the client and commander
discord_commander.run(keys['bot_token'])
| en | 0.717566 | # Imports # Get bot tokens # Create a discord client # Get Algo Client / Using purestake; supplement your own API key for the algod_token # Get TMan Client / 350338509 is the app ID for all TinymanClient implementations # Get Assets and Pools - ALGO, CHOICE, USDC # Retrieve price of choice # Command to show the price immediately # Run the client and commander | 2.812242 | 3 |
Lib/lib2to3/fixes/fix_tuple_params.py | sireliah/polish-python | 1 | 6630783 | <reponame>sireliah/polish-python<filename>Lib/lib2to3/fixes/fix_tuple_params.py
"""Fixer dla function definitions przy tuple parameters.
def func(((a, b), c), d):
...
->
def func(x, d):
((a, b), c) = x
...
It will also support lambdas:
lambda (x, y): x + y -> lambda t: t[0] + t[1]
# The parens are a syntax error w Python 3
lambda (x): x + y -> lambda x: x + y
"""
# Author: <NAME>
# Local imports
z .. zaimportuj pytree
z ..pgen2 zaimportuj token
z .. zaimportuj fixer_base
z ..fixer_util zaimportuj Assign, Name, Newline, Number, Subscript, syms
def is_docstring(stmt):
zwróć isinstance(stmt, pytree.Node) oraz \
stmt.children[0].type == token.STRING
klasa FixTupleParams(fixer_base.BaseFix):
run_order = 4 #use a lower order since lambda jest part of other
#patterns
BM_compatible = Prawda
PATTERN = """
funcdef< 'def' any parameters< '(' args=any ')' >
['->' any] ':' suite=any+ >
|
lambda=
lambdef< 'lambda' args=vfpdef< '(' inner=any ')' >
':' body=any
>
"""
def transform(self, node, results):
jeżeli "lambda" w results:
zwróć self.transform_lambda(node, results)
new_lines = []
suite = results["suite"]
args = results["args"]
# This crap jest so "def foo(...): x = 5; y = 7" jest handled correctly.
# TODO(cwinter): suite-cleanup
jeżeli suite[0].children[1].type == token.INDENT:
start = 2
indent = suite[0].children[1].value
end = Newline()
inaczej:
start = 0
indent = "; "
end = pytree.Leaf(token.INDENT, "")
# We need access to self dla new_name(), oraz making this a method
# doesn't feel right. Closing over self oraz new_lines makes the
# code below cleaner.
def handle_tuple(tuple_arg, add_prefix=Nieprawda):
n = Name(self.new_name())
arg = tuple_arg.clone()
arg.prefix = ""
stmt = Assign(arg, n.clone())
jeżeli add_prefix:
n.prefix = " "
tuple_arg.replace(n)
new_lines.append(pytree.Node(syms.simple_stmt,
[stmt, end.clone()]))
jeżeli args.type == syms.tfpdef:
handle_tuple(args)
albo_inaczej args.type == syms.typedargslist:
dla i, arg w enumerate(args.children):
jeżeli arg.type == syms.tfpdef:
# Without add_prefix, the emitted code jest correct,
# just ugly.
handle_tuple(arg, add_prefix=(i > 0))
jeżeli nie new_lines:
zwróć
# This isn't strictly necessary, but it plays nicely przy other fixers.
# TODO(cwinter) get rid of this when children becomes a smart list
dla line w new_lines:
line.parent = suite[0]
# TODO(cwinter) suite-cleanup
after = start
jeżeli start == 0:
new_lines[0].prefix = " "
albo_inaczej is_docstring(suite[0].children[start]):
new_lines[0].prefix = indent
after = start + 1
dla line w new_lines:
line.parent = suite[0]
suite[0].children[after:after] = new_lines
dla i w range(after+1, after+len(new_lines)+1):
suite[0].children[i].prefix = indent
suite[0].changed()
def transform_lambda(self, node, results):
args = results["args"]
body = results["body"]
inner = simplify_args(results["inner"])
# Replace lambda ((((x)))): x przy lambda x: x
jeżeli inner.type == token.NAME:
inner = inner.clone()
inner.prefix = " "
args.replace(inner)
zwróć
params = find_params(args)
to_index = map_to_index(params)
tup_name = self.new_name(tuple_name(params))
new_param = Name(tup_name, prefix=" ")
args.replace(new_param.clone())
dla n w body.post_order():
jeżeli n.type == token.NAME oraz n.value w to_index:
subscripts = [c.clone() dla c w to_index[n.value]]
new = pytree.Node(syms.power,
[new_param.clone()] + subscripts)
new.prefix = n.prefix
n.replace(new)
### Helper functions dla transform_lambda()
def simplify_args(node):
jeżeli node.type w (syms.vfplist, token.NAME):
zwróć node
albo_inaczej node.type == syms.vfpdef:
# These look like vfpdef< '(' x ')' > where x jest NAME
# albo another vfpdef instance (leading to recursion).
dopóki node.type == syms.vfpdef:
node = node.children[1]
zwróć node
podnieś RuntimeError("Received unexpected node %s" % node)
def find_params(node):
jeżeli node.type == syms.vfpdef:
zwróć find_params(node.children[1])
albo_inaczej node.type == token.NAME:
zwróć node.value
zwróć [find_params(c) dla c w node.children jeżeli c.type != token.COMMA]
def map_to_index(param_list, prefix=[], d=Nic):
jeżeli d jest Nic:
d = {}
dla i, obj w enumerate(param_list):
trailer = [Subscript(Number(str(i)))]
jeżeli isinstance(obj, list):
map_to_index(obj, trailer, d=d)
inaczej:
d[obj] = prefix + trailer
zwróć d
def tuple_name(param_list):
l = []
dla obj w param_list:
jeżeli isinstance(obj, list):
l.append(tuple_name(obj))
inaczej:
l.append(obj)
zwróć "_".join(l)
| """Fixer dla function definitions przy tuple parameters.
def func(((a, b), c), d):
...
->
def func(x, d):
((a, b), c) = x
...
It will also support lambdas:
lambda (x, y): x + y -> lambda t: t[0] + t[1]
# The parens are a syntax error w Python 3
lambda (x): x + y -> lambda x: x + y
"""
# Author: <NAME>
# Local imports
z .. zaimportuj pytree
z ..pgen2 zaimportuj token
z .. zaimportuj fixer_base
z ..fixer_util zaimportuj Assign, Name, Newline, Number, Subscript, syms
def is_docstring(stmt):
zwróć isinstance(stmt, pytree.Node) oraz \
stmt.children[0].type == token.STRING
klasa FixTupleParams(fixer_base.BaseFix):
run_order = 4 #use a lower order since lambda jest part of other
#patterns
BM_compatible = Prawda
PATTERN = """
funcdef< 'def' any parameters< '(' args=any ')' >
['->' any] ':' suite=any+ >
|
lambda=
lambdef< 'lambda' args=vfpdef< '(' inner=any ')' >
':' body=any
>
"""
def transform(self, node, results):
jeżeli "lambda" w results:
zwróć self.transform_lambda(node, results)
new_lines = []
suite = results["suite"]
args = results["args"]
# This crap jest so "def foo(...): x = 5; y = 7" jest handled correctly.
# TODO(cwinter): suite-cleanup
jeżeli suite[0].children[1].type == token.INDENT:
start = 2
indent = suite[0].children[1].value
end = Newline()
inaczej:
start = 0
indent = "; "
end = pytree.Leaf(token.INDENT, "")
# We need access to self dla new_name(), oraz making this a method
# doesn't feel right. Closing over self oraz new_lines makes the
# code below cleaner.
def handle_tuple(tuple_arg, add_prefix=Nieprawda):
n = Name(self.new_name())
arg = tuple_arg.clone()
arg.prefix = ""
stmt = Assign(arg, n.clone())
jeżeli add_prefix:
n.prefix = " "
tuple_arg.replace(n)
new_lines.append(pytree.Node(syms.simple_stmt,
[stmt, end.clone()]))
jeżeli args.type == syms.tfpdef:
handle_tuple(args)
albo_inaczej args.type == syms.typedargslist:
dla i, arg w enumerate(args.children):
jeżeli arg.type == syms.tfpdef:
# Without add_prefix, the emitted code jest correct,
# just ugly.
handle_tuple(arg, add_prefix=(i > 0))
jeżeli nie new_lines:
zwróć
# This isn't strictly necessary, but it plays nicely przy other fixers.
# TODO(cwinter) get rid of this when children becomes a smart list
dla line w new_lines:
line.parent = suite[0]
# TODO(cwinter) suite-cleanup
after = start
jeżeli start == 0:
new_lines[0].prefix = " "
albo_inaczej is_docstring(suite[0].children[start]):
new_lines[0].prefix = indent
after = start + 1
dla line w new_lines:
line.parent = suite[0]
suite[0].children[after:after] = new_lines
dla i w range(after+1, after+len(new_lines)+1):
suite[0].children[i].prefix = indent
suite[0].changed()
def transform_lambda(self, node, results):
args = results["args"]
body = results["body"]
inner = simplify_args(results["inner"])
# Replace lambda ((((x)))): x przy lambda x: x
jeżeli inner.type == token.NAME:
inner = inner.clone()
inner.prefix = " "
args.replace(inner)
zwróć
params = find_params(args)
to_index = map_to_index(params)
tup_name = self.new_name(tuple_name(params))
new_param = Name(tup_name, prefix=" ")
args.replace(new_param.clone())
dla n w body.post_order():
jeżeli n.type == token.NAME oraz n.value w to_index:
subscripts = [c.clone() dla c w to_index[n.value]]
new = pytree.Node(syms.power,
[new_param.clone()] + subscripts)
new.prefix = n.prefix
n.replace(new)
### Helper functions dla transform_lambda()
def simplify_args(node):
jeżeli node.type w (syms.vfplist, token.NAME):
zwróć node
albo_inaczej node.type == syms.vfpdef:
# These look like vfpdef< '(' x ')' > where x jest NAME
# albo another vfpdef instance (leading to recursion).
dopóki node.type == syms.vfpdef:
node = node.children[1]
zwróć node
podnieś RuntimeError("Received unexpected node %s" % node)
def find_params(node):
jeżeli node.type == syms.vfpdef:
zwróć find_params(node.children[1])
albo_inaczej node.type == token.NAME:
zwróć node.value
zwróć [find_params(c) dla c w node.children jeżeli c.type != token.COMMA]
def map_to_index(param_list, prefix=[], d=Nic):
jeżeli d jest Nic:
d = {}
dla i, obj w enumerate(param_list):
trailer = [Subscript(Number(str(i)))]
jeżeli isinstance(obj, list):
map_to_index(obj, trailer, d=d)
inaczej:
d[obj] = prefix + trailer
zwróć d
def tuple_name(param_list):
l = []
dla obj w param_list:
jeżeli isinstance(obj, list):
l.append(tuple_name(obj))
inaczej:
l.append(obj)
zwróć "_".join(l) | en | 0.516455 | Fixer dla function definitions przy tuple parameters. def func(((a, b), c), d): ... -> def func(x, d): ((a, b), c) = x ... It will also support lambdas: lambda (x, y): x + y -> lambda t: t[0] + t[1] # The parens are a syntax error w Python 3 lambda (x): x + y -> lambda x: x + y # Author: <NAME> # Local imports #use a lower order since lambda jest part of other #patterns funcdef< 'def' any parameters< '(' args=any ')' > ['->' any] ':' suite=any+ > | lambda= lambdef< 'lambda' args=vfpdef< '(' inner=any ')' > ':' body=any > # This crap jest so "def foo(...): x = 5; y = 7" jest handled correctly. # TODO(cwinter): suite-cleanup # We need access to self dla new_name(), oraz making this a method # doesn't feel right. Closing over self oraz new_lines makes the # code below cleaner. # Without add_prefix, the emitted code jest correct, # just ugly. # This isn't strictly necessary, but it plays nicely przy other fixers. # TODO(cwinter) get rid of this when children becomes a smart list # TODO(cwinter) suite-cleanup # Replace lambda ((((x)))): x przy lambda x: x ### Helper functions dla transform_lambda() # These look like vfpdef< '(' x ')' > where x jest NAME # albo another vfpdef instance (leading to recursion). | 2.757215 | 3 |
python2/luis_sdk/luis_action.py | ParadoxBusinessGroup/Cognitive-LUIS-Python | 100 | 6630784 | '''
Copyright (c) Microsoft. All rights reserved.
Licensed under the MIT license.
Microsoft Cognitive Services (formerly Project Oxford): https://www.microsoft.com/cognitive-services
Microsoft Cognitive Services (formerly Project Oxford) GitHub:
https://github.com/Microsoft/ProjectOxford-ClientSDK
Copyright (c) Microsoft Corporation
All rights reserved.
MIT License:
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
from .luis_parameter import LUISParameter
class LUISAction(object):
'''
LUIS Action Class.
Describes the LUIS Action structure.
'''
def __init__(self, action):
'''
A constructor for the LUISAction class.
:param action: A dictionary containing the action data.
'''
self._name = action[u'name']
self._triggered = action[u'triggered']
self._parameters = []
for parameter in action[u'parameters']:
self._parameters.append(LUISParameter(parameter))
def get_name(self):
'''
A getter for the action's name.
:return: Actions's name.
'''
return self._name
def get_triggered(self):
'''
A getter for the action's triggered flag.
:return: A boolean that expresses whether the action was trigerred or not.
'''
return self._triggered
def get_parameters(self):
'''
A getter for the action's parameters.
:return: A list of parameter.
'''
return self._parameters
| '''
Copyright (c) Microsoft. All rights reserved.
Licensed under the MIT license.
Microsoft Cognitive Services (formerly Project Oxford): https://www.microsoft.com/cognitive-services
Microsoft Cognitive Services (formerly Project Oxford) GitHub:
https://github.com/Microsoft/ProjectOxford-ClientSDK
Copyright (c) Microsoft Corporation
All rights reserved.
MIT License:
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
from .luis_parameter import LUISParameter
class LUISAction(object):
'''
LUIS Action Class.
Describes the LUIS Action structure.
'''
def __init__(self, action):
'''
A constructor for the LUISAction class.
:param action: A dictionary containing the action data.
'''
self._name = action[u'name']
self._triggered = action[u'triggered']
self._parameters = []
for parameter in action[u'parameters']:
self._parameters.append(LUISParameter(parameter))
def get_name(self):
'''
A getter for the action's name.
:return: Actions's name.
'''
return self._name
def get_triggered(self):
'''
A getter for the action's triggered flag.
:return: A boolean that expresses whether the action was trigerred or not.
'''
return self._triggered
def get_parameters(self):
'''
A getter for the action's parameters.
:return: A list of parameter.
'''
return self._parameters
| en | 0.761097 | Copyright (c) Microsoft. All rights reserved. Licensed under the MIT license. Microsoft Cognitive Services (formerly Project Oxford): https://www.microsoft.com/cognitive-services Microsoft Cognitive Services (formerly Project Oxford) GitHub: https://github.com/Microsoft/ProjectOxford-ClientSDK Copyright (c) Microsoft Corporation All rights reserved. MIT License: Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. LUIS Action Class. Describes the LUIS Action structure. A constructor for the LUISAction class. :param action: A dictionary containing the action data. A getter for the action's name. :return: Actions's name. A getter for the action's triggered flag. :return: A boolean that expresses whether the action was trigerred or not. A getter for the action's parameters. :return: A list of parameter. | 1.745115 | 2 |
diofant/sets/__init__.py | project-kotinos/diofant___diofant | 1 | 6630785 | <filename>diofant/sets/__init__.py
"""
Package for set theory.
"""
from ..core.singleton import S
from .contains import Contains
from .fancysets import ImageSet, Range
from .sets import (Complement, EmptySet, FiniteSet, Intersection, Interval,
ProductSet, Set, SymmetricDifference, Union, imageset)
Naturals = S.Naturals
Naturals0 = S.Naturals0
Integers = S.Integers
Rationals = S.Rationals
Reals = S.Reals
del S
| <filename>diofant/sets/__init__.py
"""
Package for set theory.
"""
from ..core.singleton import S
from .contains import Contains
from .fancysets import ImageSet, Range
from .sets import (Complement, EmptySet, FiniteSet, Intersection, Interval,
ProductSet, Set, SymmetricDifference, Union, imageset)
Naturals = S.Naturals
Naturals0 = S.Naturals0
Integers = S.Integers
Rationals = S.Rationals
Reals = S.Reals
del S
| en | 0.906911 | Package for set theory. | 1.700583 | 2 |
experiments.py | umich-dbgroup/duoquest | 4 | 6630786 | <reponame>umich-dbgroup/duoquest<filename>experiments.py
import argparse
import configparser
import json
from duoquest.database import Database
from duoquest.external.eval import build_foreign_key_map_from_json
from duoquest.files import results_path
from duoquest.verifier import DuoquestVerifier
from duoquest.nlq_client import NLQClient
from duoquest.schema import Schema
from duoquest.server import DuoquestServer
from duoquest.vars import *
def load_schemas(schemas_path):
schemas = {}
schema_file = json.load(open(schemas_path))
kmaps = build_foreign_key_map_from_json(schema_file)
for schema_info in schema_file:
schemas[schema_info['db_id']] = Schema(schema_info)
return schemas, kmaps
def main():
parser = argparse.ArgumentParser()
parser.add_argument('dataset', choices=DATASETS)
parser.add_argument('mode', choices=MODES)
parser.add_argument('tsq_level', choices=TSQ_LEVELS)
parser.add_argument('--config_path', default='docker_cfg.ini')
parser.add_argument('--tsq_rows', type=int, default=1)
parser.add_argument('--timeout', default=DEFAULT_TIMEOUT, type=int,
help='Timeout if search does not terminate')
parser.add_argument('--tid', default=None, type=int, help='debug task id')
parser.add_argument('--start_tid', default=None, type=int,
help='start task id')
# Disable pieces of the system
parser.add_argument('--disable_clauses', action='store_true')
parser.add_argument('--disable_semantics', action='store_true')
parser.add_argument('--disable_column', action='store_true')
parser.add_argument('--disable_literals', action='store_true')
# Debugging options
parser.add_argument('--compare', choices=TSQ_LEVELS,
help='Compare results against this level')
parser.add_argument('--debug', action='store_true',
help='Debugging output')
args = parser.parse_args()
config = configparser.ConfigParser()
config.read(args.config_path)
# Load dataset
data = None
db_path = None
schemas_path = None
if args.dataset == 'spider':
data = json.load(open(config['spider'][f'{args.mode}_path']))
db_path = config['spider'][f'{args.mode}_db_path']
schemas_path = config['spider'][f'{args.mode}_tables_path']
elif args.dataset == 'wikisql':
# TODO
pass
out_base = results_path(config, args.dataset, args.mode, args.tsq_level,
args.tsq_rows, args.timeout, args.disable_clauses,
args.disable_semantics, args.disable_column, args.disable_literals)
verifier = DuoquestVerifier(debug=args.debug,
no_fk_select=True,
no_fk_where=True,
no_fk_having=True,
no_fk_group_by=True,
agg_projected=True,
disable_set_ops=True,
disable_subquery=True,
literals_given=True,
disable_clauses=args.disable_clauses,
disable_semantics=args.disable_semantics,
disable_column=args.disable_column,
disable_literals=args.disable_literals)
server = DuoquestServer(int(config['duoquest']['port']),
config['duoquest']['authkey'].encode('utf-8'), verifier, out_base)
schemas, kmaps = load_schemas(schemas_path)
db = Database(db_path, args.dataset)
nlqc = NLQClient(config['nlq']['host'], int(config['nlq']['port']),
config['nlq']['authkey'].encode('utf-8'), args.dataset, args.mode)
server.run_experiments(schemas, db, nlqc, data, args.tsq_level,
args.tsq_rows, tid=args.tid, compare=args.compare,
start_tid=args.start_tid, timeout=args.timeout)
if __name__ == '__main__':
main()
| import argparse
import configparser
import json
from duoquest.database import Database
from duoquest.external.eval import build_foreign_key_map_from_json
from duoquest.files import results_path
from duoquest.verifier import DuoquestVerifier
from duoquest.nlq_client import NLQClient
from duoquest.schema import Schema
from duoquest.server import DuoquestServer
from duoquest.vars import *
def load_schemas(schemas_path):
schemas = {}
schema_file = json.load(open(schemas_path))
kmaps = build_foreign_key_map_from_json(schema_file)
for schema_info in schema_file:
schemas[schema_info['db_id']] = Schema(schema_info)
return schemas, kmaps
def main():
parser = argparse.ArgumentParser()
parser.add_argument('dataset', choices=DATASETS)
parser.add_argument('mode', choices=MODES)
parser.add_argument('tsq_level', choices=TSQ_LEVELS)
parser.add_argument('--config_path', default='docker_cfg.ini')
parser.add_argument('--tsq_rows', type=int, default=1)
parser.add_argument('--timeout', default=DEFAULT_TIMEOUT, type=int,
help='Timeout if search does not terminate')
parser.add_argument('--tid', default=None, type=int, help='debug task id')
parser.add_argument('--start_tid', default=None, type=int,
help='start task id')
# Disable pieces of the system
parser.add_argument('--disable_clauses', action='store_true')
parser.add_argument('--disable_semantics', action='store_true')
parser.add_argument('--disable_column', action='store_true')
parser.add_argument('--disable_literals', action='store_true')
# Debugging options
parser.add_argument('--compare', choices=TSQ_LEVELS,
help='Compare results against this level')
parser.add_argument('--debug', action='store_true',
help='Debugging output')
args = parser.parse_args()
config = configparser.ConfigParser()
config.read(args.config_path)
# Load dataset
data = None
db_path = None
schemas_path = None
if args.dataset == 'spider':
data = json.load(open(config['spider'][f'{args.mode}_path']))
db_path = config['spider'][f'{args.mode}_db_path']
schemas_path = config['spider'][f'{args.mode}_tables_path']
elif args.dataset == 'wikisql':
# TODO
pass
out_base = results_path(config, args.dataset, args.mode, args.tsq_level,
args.tsq_rows, args.timeout, args.disable_clauses,
args.disable_semantics, args.disable_column, args.disable_literals)
verifier = DuoquestVerifier(debug=args.debug,
no_fk_select=True,
no_fk_where=True,
no_fk_having=True,
no_fk_group_by=True,
agg_projected=True,
disable_set_ops=True,
disable_subquery=True,
literals_given=True,
disable_clauses=args.disable_clauses,
disable_semantics=args.disable_semantics,
disable_column=args.disable_column,
disable_literals=args.disable_literals)
server = DuoquestServer(int(config['duoquest']['port']),
config['duoquest']['authkey'].encode('utf-8'), verifier, out_base)
schemas, kmaps = load_schemas(schemas_path)
db = Database(db_path, args.dataset)
nlqc = NLQClient(config['nlq']['host'], int(config['nlq']['port']),
config['nlq']['authkey'].encode('utf-8'), args.dataset, args.mode)
server.run_experiments(schemas, db, nlqc, data, args.tsq_level,
args.tsq_rows, tid=args.tid, compare=args.compare,
start_tid=args.start_tid, timeout=args.timeout)
if __name__ == '__main__':
main() | en | 0.481945 | # Disable pieces of the system # Debugging options # Load dataset # TODO | 2.131908 | 2 |
advance/libFiles.py | shashidev091/learnToRockPython | 1 | 6630787 | <filename>advance/libFiles.py
from time import ctime
from pathlib import Path
Path(r"C:\Program Files\Microsoft")
Path("/usr/local/bin")
Path()
Path("ecommerce/__init__.py")
Path.home()
"""
- important methods
"""
path = Path("ecommerce/__init__.py")
path.exists()
path.is_file()
path.is_dir()
print(path.name)
print(path.stem)
print(path.parent)
print(path.suffix)
path = path.with_name("file.txt")
print(path)
# it will give the suffix of the given extension of a file
path = path.with_suffix(".txt")
"""
- more required methods remember to are
"""
# path.exists()
# path.mkdir()
# path.rmdir()
# path.rename("rename_to")
#
out = path.iterdir()
print(out)
path = Path("ecommerce")
something = [p for p in path.iterdir()]
print(something)
"""
- glob()
- it gives an iterator to search recursively
"""
py_files = [p for p in path.glob("*.py")]
print(py_files)
# this search recursively
py_files = [p for p in path.glob("**/*.py")]
print(py_files)
# to be able to search more recursively we should use rglob()
py_files = [p for p in path.rglob("*.py")]
print(py_files)
"""
- important methods to work with files
- path.exists() => checks weather the file exists or not
- path.unlink() => deletes the file
- path.rename("newName.extension") => used to rename the file
- path.stat() => gives the information about the file
"""
print(ctime(path.stat().st_ctime))
"""
- we also have
=> path.read_bytes() --> returns the content of the file as binaries
=> path.read_text() --> returns the content of the file as text
"""
# with open("__init__.py", "r") as file
# file_name mode
# then you have to close it too
# and this all tasks is handled by the read_text()
"""
- Same as reading the files we have writing too
- path.write_text()
- path.write_bytes()
- we can do all kinds of tasks with this files but however for copying its not that good
"""
| <filename>advance/libFiles.py
from time import ctime
from pathlib import Path
Path(r"C:\Program Files\Microsoft")
Path("/usr/local/bin")
Path()
Path("ecommerce/__init__.py")
Path.home()
"""
- important methods
"""
path = Path("ecommerce/__init__.py")
path.exists()
path.is_file()
path.is_dir()
print(path.name)
print(path.stem)
print(path.parent)
print(path.suffix)
path = path.with_name("file.txt")
print(path)
# it will give the suffix of the given extension of a file
path = path.with_suffix(".txt")
"""
- more required methods remember to are
"""
# path.exists()
# path.mkdir()
# path.rmdir()
# path.rename("rename_to")
#
out = path.iterdir()
print(out)
path = Path("ecommerce")
something = [p for p in path.iterdir()]
print(something)
"""
- glob()
- it gives an iterator to search recursively
"""
py_files = [p for p in path.glob("*.py")]
print(py_files)
# this search recursively
py_files = [p for p in path.glob("**/*.py")]
print(py_files)
# to be able to search more recursively we should use rglob()
py_files = [p for p in path.rglob("*.py")]
print(py_files)
"""
- important methods to work with files
- path.exists() => checks weather the file exists or not
- path.unlink() => deletes the file
- path.rename("newName.extension") => used to rename the file
- path.stat() => gives the information about the file
"""
print(ctime(path.stat().st_ctime))
"""
- we also have
=> path.read_bytes() --> returns the content of the file as binaries
=> path.read_text() --> returns the content of the file as text
"""
# with open("__init__.py", "r") as file
# file_name mode
# then you have to close it too
# and this all tasks is handled by the read_text()
"""
- Same as reading the files we have writing too
- path.write_text()
- path.write_bytes()
- we can do all kinds of tasks with this files but however for copying its not that good
"""
| en | 0.862653 | - important methods # it will give the suffix of the given extension of a file - more required methods remember to are # path.exists() # path.mkdir() # path.rmdir() # path.rename("rename_to") # - glob() - it gives an iterator to search recursively # this search recursively # to be able to search more recursively we should use rglob() - important methods to work with files - path.exists() => checks weather the file exists or not - path.unlink() => deletes the file - path.rename("newName.extension") => used to rename the file - path.stat() => gives the information about the file - we also have => path.read_bytes() --> returns the content of the file as binaries => path.read_text() --> returns the content of the file as text # with open("__init__.py", "r") as file # file_name mode # then you have to close it too # and this all tasks is handled by the read_text() - Same as reading the files we have writing too - path.write_text() - path.write_bytes() - we can do all kinds of tasks with this files but however for copying its not that good | 3.320843 | 3 |
awx/main/tests/functional/test_ldap.py | acidburn0zzz/awx | 1 | 6630788 | <gh_stars>1-10
import ldap
import ldif
import pytest
import os
from mockldap import MockLdap
from awx.api.versioning import reverse
@pytest.fixture
def ldap_generator():
def fn(fname, host='localhost'):
fh = open(os.path.join(os.path.dirname(os.path.realpath(__file__)), fname), 'rb')
ctrl = ldif.LDIFRecordList(fh)
ctrl.parse()
directory = dict(ctrl.all_records)
mockldap = MockLdap(directory)
mockldap.start()
mockldap['ldap://{}/'.format(host)]
conn = ldap.initialize('ldap://{}/'.format(host))
return conn
#mockldap.stop()
return fn
@pytest.fixture
def ldap_settings_generator():
def fn(prefix='', dc='ansible', host='ldap.ansible.com'):
prefix = '_{}'.format(prefix) if prefix else ''
data = {
'AUTH_LDAP_SERVER_URI': 'ldap://{}'.format(host),
'AUTH_LDAP_BIND_DN': 'cn=eng_user1,ou=people,dc={},dc=com'.format(dc),
'AUTH_LDAP_BIND_PASSWORD': 'password',
"AUTH_LDAP_USER_SEARCH": [
"ou=people,dc={},dc=com".format(dc),
"SCOPE_SUBTREE",
"(cn=%(user)s)"
],
"AUTH_LDAP_TEAM_MAP": {
"LDAP Sales": {
"organization": "LDAP Organization",
"users": "cn=sales,ou=groups,dc={},dc=com".format(dc),
"remove": True
},
"LDAP IT": {
"organization": "LDAP Organization",
"users": "cn=it,ou=groups,dc={},dc=com".format(dc),
"remove": True
},
"LDAP Engineering": {
"organization": "LDAP Organization",
"users": "cn=engineering,ou=groups,dc={},dc=com".format(dc),
"remove": True
}
},
"AUTH_LDAP_REQUIRE_GROUP": None,
"AUTH_LDAP_USER_ATTR_MAP": {
"first_name": "givenName",
"last_name": "sn",
"email": "mail"
},
"AUTH_LDAP_GROUP_SEARCH": [
"dc={},dc=com".format(dc),
"SCOPE_SUBTREE",
"(objectClass=groupOfNames)"
],
"AUTH_LDAP_USER_FLAGS_BY_GROUP": {
"is_superuser": "cn=superusers,ou=groups,dc={},dc=com".format(dc)
},
"AUTH_LDAP_ORGANIZATION_MAP": {
"LDAP Organization": {
"admins": "cn=engineering_admins,ou=groups,dc={},dc=com".format(dc),
"remove_admins": False,
"users": [
"cn=engineering,ou=groups,dc={},dc=com".format(dc),
"cn=sales,ou=groups,dc={},dc=com".format(dc),
"cn=it,ou=groups,dc={},dc=com".format(dc)
],
"remove_users": False
}
},
}
if prefix:
data_new = dict()
for k,v in data.iteritems():
k_new = k.replace('AUTH_LDAP', 'AUTH_LDAP{}'.format(prefix))
data_new[k_new] = v
else:
data_new = data
return data_new
return fn
# Note: mockldap isn't fully featured. Fancy queries aren't fully baked.
# However, objects returned are solid so they should flow through django ldap middleware nicely.
@pytest.mark.django_db
def test_login(ldap_generator, patch, post, admin, ldap_settings_generator):
auth_url = reverse('api:auth_token_view')
ldap_settings_url = reverse('api:setting_singleton_detail', kwargs={'category_slug': 'ldap'})
# Generate mock ldap servers and init with ldap data
ldap_generator("../data/ldap_example.ldif", "ldap.example.com")
ldap_generator("../data/ldap_redhat.ldif", "ldap.redhat.com")
ldap_generator("../data/ldap_ansible.ldif", "ldap.ansible.com")
ldap_settings_example = ldap_settings_generator(dc='example')
ldap_settings_ansible = ldap_settings_generator(prefix='1', dc='ansible')
ldap_settings_redhat = ldap_settings_generator(prefix='2', dc='redhat')
# eng_user1 exists in ansible and redhat but not example
patch(ldap_settings_url, user=admin, data=ldap_settings_example, expect=200)
post(auth_url, data={'username': 'eng_user1', 'password': 'password'}, expect=400)
patch(ldap_settings_url, user=admin, data=ldap_settings_ansible, expect=200)
patch(ldap_settings_url, user=admin, data=ldap_settings_redhat, expect=200)
post(auth_url, data={'username': 'eng_user1', 'password': 'password'}, expect=200)
| import ldap
import ldif
import pytest
import os
from mockldap import MockLdap
from awx.api.versioning import reverse
@pytest.fixture
def ldap_generator():
def fn(fname, host='localhost'):
fh = open(os.path.join(os.path.dirname(os.path.realpath(__file__)), fname), 'rb')
ctrl = ldif.LDIFRecordList(fh)
ctrl.parse()
directory = dict(ctrl.all_records)
mockldap = MockLdap(directory)
mockldap.start()
mockldap['ldap://{}/'.format(host)]
conn = ldap.initialize('ldap://{}/'.format(host))
return conn
#mockldap.stop()
return fn
@pytest.fixture
def ldap_settings_generator():
def fn(prefix='', dc='ansible', host='ldap.ansible.com'):
prefix = '_{}'.format(prefix) if prefix else ''
data = {
'AUTH_LDAP_SERVER_URI': 'ldap://{}'.format(host),
'AUTH_LDAP_BIND_DN': 'cn=eng_user1,ou=people,dc={},dc=com'.format(dc),
'AUTH_LDAP_BIND_PASSWORD': 'password',
"AUTH_LDAP_USER_SEARCH": [
"ou=people,dc={},dc=com".format(dc),
"SCOPE_SUBTREE",
"(cn=%(user)s)"
],
"AUTH_LDAP_TEAM_MAP": {
"LDAP Sales": {
"organization": "LDAP Organization",
"users": "cn=sales,ou=groups,dc={},dc=com".format(dc),
"remove": True
},
"LDAP IT": {
"organization": "LDAP Organization",
"users": "cn=it,ou=groups,dc={},dc=com".format(dc),
"remove": True
},
"LDAP Engineering": {
"organization": "LDAP Organization",
"users": "cn=engineering,ou=groups,dc={},dc=com".format(dc),
"remove": True
}
},
"AUTH_LDAP_REQUIRE_GROUP": None,
"AUTH_LDAP_USER_ATTR_MAP": {
"first_name": "givenName",
"last_name": "sn",
"email": "mail"
},
"AUTH_LDAP_GROUP_SEARCH": [
"dc={},dc=com".format(dc),
"SCOPE_SUBTREE",
"(objectClass=groupOfNames)"
],
"AUTH_LDAP_USER_FLAGS_BY_GROUP": {
"is_superuser": "cn=superusers,ou=groups,dc={},dc=com".format(dc)
},
"AUTH_LDAP_ORGANIZATION_MAP": {
"LDAP Organization": {
"admins": "cn=engineering_admins,ou=groups,dc={},dc=com".format(dc),
"remove_admins": False,
"users": [
"cn=engineering,ou=groups,dc={},dc=com".format(dc),
"cn=sales,ou=groups,dc={},dc=com".format(dc),
"cn=it,ou=groups,dc={},dc=com".format(dc)
],
"remove_users": False
}
},
}
if prefix:
data_new = dict()
for k,v in data.iteritems():
k_new = k.replace('AUTH_LDAP', 'AUTH_LDAP{}'.format(prefix))
data_new[k_new] = v
else:
data_new = data
return data_new
return fn
# Note: mockldap isn't fully featured. Fancy queries aren't fully baked.
# However, objects returned are solid so they should flow through django ldap middleware nicely.
@pytest.mark.django_db
def test_login(ldap_generator, patch, post, admin, ldap_settings_generator):
auth_url = reverse('api:auth_token_view')
ldap_settings_url = reverse('api:setting_singleton_detail', kwargs={'category_slug': 'ldap'})
# Generate mock ldap servers and init with ldap data
ldap_generator("../data/ldap_example.ldif", "ldap.example.com")
ldap_generator("../data/ldap_redhat.ldif", "ldap.redhat.com")
ldap_generator("../data/ldap_ansible.ldif", "ldap.ansible.com")
ldap_settings_example = ldap_settings_generator(dc='example')
ldap_settings_ansible = ldap_settings_generator(prefix='1', dc='ansible')
ldap_settings_redhat = ldap_settings_generator(prefix='2', dc='redhat')
# eng_user1 exists in ansible and redhat but not example
patch(ldap_settings_url, user=admin, data=ldap_settings_example, expect=200)
post(auth_url, data={'username': 'eng_user1', 'password': 'password'}, expect=400)
patch(ldap_settings_url, user=admin, data=ldap_settings_ansible, expect=200)
patch(ldap_settings_url, user=admin, data=ldap_settings_redhat, expect=200)
post(auth_url, data={'username': 'eng_user1', 'password': 'password'}, expect=200) | en | 0.913988 | #mockldap.stop() # Note: mockldap isn't fully featured. Fancy queries aren't fully baked. # However, objects returned are solid so they should flow through django ldap middleware nicely. # Generate mock ldap servers and init with ldap data # eng_user1 exists in ansible and redhat but not example | 2.268782 | 2 |
topics/topic_2/3_next_and_previous.py | VladBaryliuk/my_trainings | 0 | 6630789 | <reponame>VladBaryliuk/my_trainings<gh_stars>0
def next_and_previous(number):
print("The next number for the number", number, "is", number + 1, end="")
print(".")
print("The previous number for the number", number, "is", number - 1, end="")
print(".")
next_and_previous(int(input())) | def next_and_previous(number):
print("The next number for the number", number, "is", number + 1, end="")
print(".")
print("The previous number for the number", number, "is", number - 1, end="")
print(".")
next_and_previous(int(input())) | none | 1 | 4.00892 | 4 |
|
tourist/models/attrib.py | TomGoBravo/tourist-with-flask | 3 | 6630790 | <gh_stars>1-10
from typing import Dict, Optional, List
from datetime import date
import attr
import json
from shapely.geometry.geo import shape
from shapely.geometry.geo import mapping
import geojson
def is_date_or_none(self, _, value):
if value is None:
return
date.fromisoformat(value)
def date_handler(obj):
if isinstance(obj, (date, )):
return obj.isoformat()
elif hasattr(obj, "__geo_interface__"):
# There may be an extra round trip from geo to mapping but this works
shp = shape(obj)
fixed = geojson.utils.map_coords(lambda c: round(c, 6), mapping(shp))
return fixed
else:
return None
def dump_filter(attribute: attr.Attribute, value):
if attribute.name in ('skipped', ):
return False
if attribute.name in ('markdown', 'status_comment', 'status_date', 'geonames_id', 'region', 'point') and not value:
return False
return True
@attr.s(auto_attribs=True, slots=True)
class Entity:
type: str = attr.ib(validator=attr.validators.in_(['place', 'club', 'pool']))
name: str
short_name: str
parent_short_name: str
markdown: str = ''
status_comment: str = ''
status_date: str = attr.ib(default=None, validator=is_date_or_none)
geonames_id: int = 0
region: Dict = attr.ib(factory=dict)
point: Dict = attr.ib(factory=dict)
skipped: List[str] = attr.ib(factory=list)
def dump_as_jsons(self) -> str:
return json.dumps(attr.asdict(self, filter=dump_filter), default=date_handler)
@staticmethod
def load_from_jsons(jsons: str) -> 'Entity':
d = json.loads(jsons)
skipped = []
for name in list(d.keys()):
if name not in attr.fields_dict(Entity):
skipped.append(name)
del d[name]
d['skipped'] = skipped
if d.get('region'):
d['region'] = shape(d['region'])
if d.get('point'):
d['point'] = shape(d['point'])
return Entity(**d)
| from typing import Dict, Optional, List
from datetime import date
import attr
import json
from shapely.geometry.geo import shape
from shapely.geometry.geo import mapping
import geojson
def is_date_or_none(self, _, value):
if value is None:
return
date.fromisoformat(value)
def date_handler(obj):
if isinstance(obj, (date, )):
return obj.isoformat()
elif hasattr(obj, "__geo_interface__"):
# There may be an extra round trip from geo to mapping but this works
shp = shape(obj)
fixed = geojson.utils.map_coords(lambda c: round(c, 6), mapping(shp))
return fixed
else:
return None
def dump_filter(attribute: attr.Attribute, value):
if attribute.name in ('skipped', ):
return False
if attribute.name in ('markdown', 'status_comment', 'status_date', 'geonames_id', 'region', 'point') and not value:
return False
return True
@attr.s(auto_attribs=True, slots=True)
class Entity:
type: str = attr.ib(validator=attr.validators.in_(['place', 'club', 'pool']))
name: str
short_name: str
parent_short_name: str
markdown: str = ''
status_comment: str = ''
status_date: str = attr.ib(default=None, validator=is_date_or_none)
geonames_id: int = 0
region: Dict = attr.ib(factory=dict)
point: Dict = attr.ib(factory=dict)
skipped: List[str] = attr.ib(factory=list)
def dump_as_jsons(self) -> str:
return json.dumps(attr.asdict(self, filter=dump_filter), default=date_handler)
@staticmethod
def load_from_jsons(jsons: str) -> 'Entity':
d = json.loads(jsons)
skipped = []
for name in list(d.keys()):
if name not in attr.fields_dict(Entity):
skipped.append(name)
del d[name]
d['skipped'] = skipped
if d.get('region'):
d['region'] = shape(d['region'])
if d.get('point'):
d['point'] = shape(d['point'])
return Entity(**d) | en | 0.908434 | # There may be an extra round trip from geo to mapping but this works | 2.305676 | 2 |
parfile.py | gdesvignes/python-tools | 0 | 6630791 | <filename>parfile.py<gh_stars>0
## parfile.py taken from Presto by <NAME> (NRAO)
## https://github.com/scottransom/presto
## http://www.cv.nrao.edu/~sransom
from types import StringType, FloatType
import math
import sys
import psr_utils as pu
import os
import numpy as np
try:
from slalib import sla_ecleq, sla_eqecl, sla_eqgal
slalib = True
except ImportError:
slalib = False
#
# The following are the parameters that are accepted in a
# par file when trying to determine a pulsar ephemeris.
#
# PEPOCH Epoch of period/frequency parameters and position (MJD)
# F0 Pulsar rotation frequency (s-2)
# F Alternative for F0
# F1 Pulsar rotation frequency derivative (s^-2)
# F2 Pulsar rotation frequency second derivative
# P0 Pulsar period (s).
# P Alternative for P0
# P1 Pulsar period derivative (10^-15).
# DM Dispersion measure (pc cm^-3)
# A1 Projected pulsar semi-major axis of 1st orbit
# E Eccentricity of 1st orbit
# T0 Epoch of periastron passage of 1st orbit (MJD)
# TASC Epoch of ascending node passage (MJD)
# PB Period of 1st orbit (days)
# OM Longitude of periastron passage, 2st orbit (deg)
# EPS1 First Laplace parameter [eccentricity times sin(omega)]
# EPS2 Second Laplace parameter [eccentricity times cos(omega)]
# EPS1DOT Time derivative of EPS1
# EPS2DOT Time derivative of EPS2
# OMDOT Rate of periastron advance (deg/yr)
# PBDOT Rate of change of orbital period (10^-12)
# XDOT Rate of change of projected semi-major axis (-12)
# EDOT Rate of change of eccentricity (-12)
#
# The following are _not_ currently implemented:
# F3, F4, F5,... Higher order frequency derivative terms
# OM2DOT Second time derivative of angle of periastron (rad/s^2)
# X2DOT Second time derivative of projected semi-major axis (1/s)
#
float_keys = ["F", "F0", "F1", "F2", "F3", "F4", "F5", "F6",
"P", "P0", "P1", "P2", "P3", "P4", "P5", "P6", "PX",
"PEPOCH", "POSEPOCH", "DM", "DM1", "DM2", "START", "FINISH", "NTOA",
"TRES", "TZRMJD", "TZRFRQ", "TZRSITE", "NITS",
"A1", "XDOT", "E", "ECC", "EDOT", "T0", "PB", "PBDOT", "OM", "OMDOT",
"EPS1", "EPS2", "EPS1DOT", "EPS2DOT", "TASC", "LAMBDA", "BETA",
"RA_RAD", "DEC_RAD", "GAMMA", "SINI", "M2", "MTOT",
"FB0", "FB1", "FB2", "ELAT", "ELONG", "LAMBDA", "BETA", "PMRA", "PMDEC", "PMELONG", "PMELAT", "KOM", "KIN"]
str_keys = ["FILE", "PSR", "PSRJ", "RAJ", "DECJ", "EPHEM", "CLK", "BINARY"]
par_keys = ["PSRJ", "RAJ", "DECJ", "F0", "F1", "PEPOCH", "POSEPOCH", "DM", "DM1", "DM2", "BINARY", "PB", "T0", "A1", "OM", "ECC"]
class Parfile:
def __init__(self, parfile=None):
setattr(self,'PSRJ', "")
setattr(self,'RAJ', "00:00:00.0")
setattr(self,'DECJ', "00:00:00.0")
setattr(self,'F0',0.0)
setattr(self,'F1',0.0)
#setattr(self,'OM',0.0)
#setattr(self,'ECC',0.0)
#setattr(self,'PB',0.0)
#setattr(self,'A1',0.0)
#setattr(self,'T0',0.0)
setattr(self,'PEPOCH',0.0)
setattr(self,'POSEPOCH',0.0)
setattr(self,'DM',0.0)
setattr(self,'DM1',0.0)
setattr(self,'DM2',0.0)
setattr(self,'EPHEM','')
setattr(self,'BINARY',"BT")
use_eclip = False # Use ecliptic coordinates
use_ell = False # Use elliptic coordinates
self.parfile = parfile
if self.parfile:
self.read(self.parfile)
def __str__(self):
out = ""
for k, v in self.__dict__.items():
if k[:2]!="__":
if type(self.__dict__[k]) is StringType:
out += "%10s = '%s'\n" % (k, v)
else:
out += "%10s = %-20.15g\n" % (k, v)
return out
def read(self, parfilenm):
self.FILE = parfilenm
#print parfilenm
pf = open(parfilenm)
for line in pf.readlines():
# Convert any 'D-' or 'D+' to 'E-' or 'E+'
line = line.replace("D-", "E-")
line = line.replace("D+", "E+")
try:
splitline = line.split()
key = splitline[0]
if key in str_keys:
setattr(self, key, splitline[1])
elif key in float_keys:
try:
setattr(self, key, float(splitline[1]))
except ValueError:
pass
if len(splitline)==3: # Some parfiles don't have flags, but do have errors
if splitline[2] not in ['0', '1']:
setattr(self, key+'_ERR', float(splitline[2]))
if len(splitline)==4:
setattr(self, key+'_ERR', float(splitline[3]))
except:
if line.strip():
print ' ', line
# Read PSR name
if hasattr(self, 'PSR'):
setattr(self, 'PSR', self.PSR)
if hasattr(self, 'PSRJ'):
setattr(self, 'PSRJ', self.PSRJ)
# Deal with Ecliptic coords
if (hasattr(self, 'BETA') and hasattr(self, 'LAMBDA')):
self.use_eclip = True
setattr(self, 'ELAT', self.BETA)
setattr(self, 'ELONG', self.LAMBDA)
if (slalib and hasattr(self, 'ELAT') and hasattr(self, 'ELONG')):
self.use_eclip = True
if hasattr(self, 'POSEPOCH'):
epoch = self.POSEPOCH
else:
epoch = self.PEPOCH
ra_rad, dec_rad = sla_ecleq(self.ELONG*pu.DEGTORAD,
self.ELAT*pu.DEGTORAD, epoch)
rstr = pu.coord_to_string(*pu.rad_to_hms(ra_rad))
dstr = pu.coord_to_string(*pu.rad_to_dms(dec_rad))
setattr(self, 'RAJ', rstr)
setattr(self, 'DECJ', dstr)
if (slalib and hasattr(self, 'ELAT') and hasattr(self, 'ELONG')):
setattr(self, 'PMELONG', self.PMELONG)
setattr(self, 'PMELAT', self.PMELAT)
if hasattr(self, 'RAJ'):
setattr(self, 'RA_RAD', pu.ra_to_rad(self.RAJ))
if hasattr(self, 'DECJ'):
setattr(self, 'DEC_RAD', pu.dec_to_rad(self.DECJ))
# Compute the Galactic coords
if (slalib and hasattr(self, 'RA_RAD') and hasattr(self, 'DEC_RAD')):
l, b = sla_eqgal(self.RA_RAD, self.DEC_RAD)
setattr(self, 'GLONG', l*pu.RADTODEG)
setattr(self, 'GLAT', b*pu.RADTODEG)
# Compute the Ecliptic coords
if (slalib and hasattr(self, 'RA_RAD') and hasattr(self, 'DEC_RAD')):
if hasattr(self, 'POSEPOCH'):
epoch = self.POSEPOCH
else:
epoch = self.PEPOCH
elon, elat = sla_eqecl(self.RA_RAD, self.DEC_RAD, epoch)
setattr(self, 'ELONG', elon*pu.RADTODEG)
setattr(self, 'ELAT', elat*pu.RADTODEG)
if hasattr(self, 'P'):
setattr(self, 'P0', self.P)
if hasattr(self, 'P0'):
setattr(self, 'F0', 1.0/self.P0)
if hasattr(self, 'F0'):
setattr(self, 'P0', 1.0/self.F0)
if hasattr(self, 'F1'):
setattr(self, 'P1', -self.F1/(self.F0*self.F0))
if hasattr(self, 'FB0'):
setattr(self, 'PB', (1.0/self.FB0)/86400.0)
if hasattr(self, 'P0_ERR'):
if hasattr(self, 'P1_ERR'):
f, ferr, fd, fderr = pu.pferrs(self.P0, self.P0_ERR,
self.P1, self.P1_ERR)
setattr(self, 'F0_ERR', ferr)
setattr(self, 'F1', fd)
setattr(self, 'F1_ERR', fderr)
else:
f, fd, = pu.p_to_f(self.P0, self.P1)
setattr(self, 'F0_ERR', self.P0_ERR/(self.P0*self.P0))
setattr(self, 'F1', fd)
if hasattr(self, 'F0_ERR'):
if hasattr(self, 'F1_ERR'):
p, perr, pd, pderr = pu.pferrs(self.F0, self.F0_ERR,
self.F1, self.F1_ERR)
setattr(self, 'P0_ERR', perr)
setattr(self, 'P1', pd)
setattr(self, 'P1_ERR', pderr)
else:
p, pd, = pu.p_to_f(self.F0, self.F1)
setattr(self, 'P0_ERR', self.F0_ERR/(self.F0*self.F0))
setattr(self, 'P1', pd)
if hasattr(self, 'DM'):
setattr(self, 'DM', self.DM)
if hasattr(self, 'DM1'):
setattr(self, 'DM1', self.DM1)
if hasattr(self, 'DM2'):
setattr(self, 'DM2', self.DM2)
if hasattr(self, 'EPS1') and hasattr(self, 'EPS2'):
self.use_ell = True
ecc = math.sqrt(self.EPS1 * self.EPS1 + self.EPS2 * self.EPS2)
omega = math.atan2(self.EPS1, self.EPS2)
if self.EPS1==0.0 and self.EPS2==0.0:
ecc_err = pow(pow(self.EPS1_ERR,2)+pow(self.EPS2_ERR,2), .5)
else:
ecc_err = pow(pow(self.EPS1*self.EPS1_ERR,2)+pow(self.EPS2*self.EPS2_ERR,2),0.5)/ecc;
setattr(self, 'ECC', ecc)
setattr(self, 'ECC_ERR', ecc_err)
setattr(self, 'OM', omega)
if hasattr(self, 'PB') and hasattr(self, 'A1') and not hasattr(self, 'ECC'):
setattr(self, 'ECC', 0.0)
if hasattr(self, 'BINARY'):
setattr(self, 'BINARY', self.BINARY)
if hasattr(self, 'KIN'):
setattr(self, 'SINI', math.sin(self.KIN * np.pi/180.))
setattr(self, 'SINI_ERR', math.sin(self.KIN * np.pi/180.)- math.sin(self.KIN * np.pi/180. - self.KIN_ERR * np.pi/180.))
pf.close()
def write(self, parfilenm):
# def write(self, parfilenm, p2f, param):
out = ""
for k in par_keys:
if hasattr(self, k):
v = self.__dict__[k]
if type(self.__dict__[k]) is StringType:
out += "%s %27s\n" % (k, v)
else:
out += "%-12s%20.15g\n" % (k, v)
#print out
pfo = open(parfilenm,'w')
pfo.write(out)
pfo.close()
def set_param(self, param, value):
setattr(self, param, value)
def make_polycos(self, mjd, tcode='f', polycofile='polyco.dat', fctr=1400., tobs=48, verbose=False, span=1):
if self.PSRJ:
psrname = self.PSRJ
elif self.PSR:
psrname = self.PSR
else:
print 'No pulsar name in Parfile. Abort'
sys.exit(-1)
cmd = "tempo -z -f %s -Z PSR=%s -Z SITE=%c -Z OUT=%s -Z START=%f -Z FREQ=%.1lf -Z TOBS=%dH -Z SPAN=%dH" % (self.parfile, psrname, tcode, polycofile, mjd-0.5, fctr, tobs, span)
if verbose:
print "Debug: ", cmd
os.system(cmd)
return psrname, polycofile
if __name__ == '__main__':
a = Parfile(sys.argv[1])
print a
| <filename>parfile.py<gh_stars>0
## parfile.py taken from Presto by <NAME> (NRAO)
## https://github.com/scottransom/presto
## http://www.cv.nrao.edu/~sransom
from types import StringType, FloatType
import math
import sys
import psr_utils as pu
import os
import numpy as np
try:
from slalib import sla_ecleq, sla_eqecl, sla_eqgal
slalib = True
except ImportError:
slalib = False
#
# The following are the parameters that are accepted in a
# par file when trying to determine a pulsar ephemeris.
#
# PEPOCH Epoch of period/frequency parameters and position (MJD)
# F0 Pulsar rotation frequency (s-2)
# F Alternative for F0
# F1 Pulsar rotation frequency derivative (s^-2)
# F2 Pulsar rotation frequency second derivative
# P0 Pulsar period (s).
# P Alternative for P0
# P1 Pulsar period derivative (10^-15).
# DM Dispersion measure (pc cm^-3)
# A1 Projected pulsar semi-major axis of 1st orbit
# E Eccentricity of 1st orbit
# T0 Epoch of periastron passage of 1st orbit (MJD)
# TASC Epoch of ascending node passage (MJD)
# PB Period of 1st orbit (days)
# OM Longitude of periastron passage, 2st orbit (deg)
# EPS1 First Laplace parameter [eccentricity times sin(omega)]
# EPS2 Second Laplace parameter [eccentricity times cos(omega)]
# EPS1DOT Time derivative of EPS1
# EPS2DOT Time derivative of EPS2
# OMDOT Rate of periastron advance (deg/yr)
# PBDOT Rate of change of orbital period (10^-12)
# XDOT Rate of change of projected semi-major axis (-12)
# EDOT Rate of change of eccentricity (-12)
#
# The following are _not_ currently implemented:
# F3, F4, F5,... Higher order frequency derivative terms
# OM2DOT Second time derivative of angle of periastron (rad/s^2)
# X2DOT Second time derivative of projected semi-major axis (1/s)
#
float_keys = ["F", "F0", "F1", "F2", "F3", "F4", "F5", "F6",
"P", "P0", "P1", "P2", "P3", "P4", "P5", "P6", "PX",
"PEPOCH", "POSEPOCH", "DM", "DM1", "DM2", "START", "FINISH", "NTOA",
"TRES", "TZRMJD", "TZRFRQ", "TZRSITE", "NITS",
"A1", "XDOT", "E", "ECC", "EDOT", "T0", "PB", "PBDOT", "OM", "OMDOT",
"EPS1", "EPS2", "EPS1DOT", "EPS2DOT", "TASC", "LAMBDA", "BETA",
"RA_RAD", "DEC_RAD", "GAMMA", "SINI", "M2", "MTOT",
"FB0", "FB1", "FB2", "ELAT", "ELONG", "LAMBDA", "BETA", "PMRA", "PMDEC", "PMELONG", "PMELAT", "KOM", "KIN"]
str_keys = ["FILE", "PSR", "PSRJ", "RAJ", "DECJ", "EPHEM", "CLK", "BINARY"]
par_keys = ["PSRJ", "RAJ", "DECJ", "F0", "F1", "PEPOCH", "POSEPOCH", "DM", "DM1", "DM2", "BINARY", "PB", "T0", "A1", "OM", "ECC"]
class Parfile:
def __init__(self, parfile=None):
setattr(self,'PSRJ', "")
setattr(self,'RAJ', "00:00:00.0")
setattr(self,'DECJ', "00:00:00.0")
setattr(self,'F0',0.0)
setattr(self,'F1',0.0)
#setattr(self,'OM',0.0)
#setattr(self,'ECC',0.0)
#setattr(self,'PB',0.0)
#setattr(self,'A1',0.0)
#setattr(self,'T0',0.0)
setattr(self,'PEPOCH',0.0)
setattr(self,'POSEPOCH',0.0)
setattr(self,'DM',0.0)
setattr(self,'DM1',0.0)
setattr(self,'DM2',0.0)
setattr(self,'EPHEM','')
setattr(self,'BINARY',"BT")
use_eclip = False # Use ecliptic coordinates
use_ell = False # Use elliptic coordinates
self.parfile = parfile
if self.parfile:
self.read(self.parfile)
def __str__(self):
out = ""
for k, v in self.__dict__.items():
if k[:2]!="__":
if type(self.__dict__[k]) is StringType:
out += "%10s = '%s'\n" % (k, v)
else:
out += "%10s = %-20.15g\n" % (k, v)
return out
def read(self, parfilenm):
self.FILE = parfilenm
#print parfilenm
pf = open(parfilenm)
for line in pf.readlines():
# Convert any 'D-' or 'D+' to 'E-' or 'E+'
line = line.replace("D-", "E-")
line = line.replace("D+", "E+")
try:
splitline = line.split()
key = splitline[0]
if key in str_keys:
setattr(self, key, splitline[1])
elif key in float_keys:
try:
setattr(self, key, float(splitline[1]))
except ValueError:
pass
if len(splitline)==3: # Some parfiles don't have flags, but do have errors
if splitline[2] not in ['0', '1']:
setattr(self, key+'_ERR', float(splitline[2]))
if len(splitline)==4:
setattr(self, key+'_ERR', float(splitline[3]))
except:
if line.strip():
print ' ', line
# Read PSR name
if hasattr(self, 'PSR'):
setattr(self, 'PSR', self.PSR)
if hasattr(self, 'PSRJ'):
setattr(self, 'PSRJ', self.PSRJ)
# Deal with Ecliptic coords
if (hasattr(self, 'BETA') and hasattr(self, 'LAMBDA')):
self.use_eclip = True
setattr(self, 'ELAT', self.BETA)
setattr(self, 'ELONG', self.LAMBDA)
if (slalib and hasattr(self, 'ELAT') and hasattr(self, 'ELONG')):
self.use_eclip = True
if hasattr(self, 'POSEPOCH'):
epoch = self.POSEPOCH
else:
epoch = self.PEPOCH
ra_rad, dec_rad = sla_ecleq(self.ELONG*pu.DEGTORAD,
self.ELAT*pu.DEGTORAD, epoch)
rstr = pu.coord_to_string(*pu.rad_to_hms(ra_rad))
dstr = pu.coord_to_string(*pu.rad_to_dms(dec_rad))
setattr(self, 'RAJ', rstr)
setattr(self, 'DECJ', dstr)
if (slalib and hasattr(self, 'ELAT') and hasattr(self, 'ELONG')):
setattr(self, 'PMELONG', self.PMELONG)
setattr(self, 'PMELAT', self.PMELAT)
if hasattr(self, 'RAJ'):
setattr(self, 'RA_RAD', pu.ra_to_rad(self.RAJ))
if hasattr(self, 'DECJ'):
setattr(self, 'DEC_RAD', pu.dec_to_rad(self.DECJ))
# Compute the Galactic coords
if (slalib and hasattr(self, 'RA_RAD') and hasattr(self, 'DEC_RAD')):
l, b = sla_eqgal(self.RA_RAD, self.DEC_RAD)
setattr(self, 'GLONG', l*pu.RADTODEG)
setattr(self, 'GLAT', b*pu.RADTODEG)
# Compute the Ecliptic coords
if (slalib and hasattr(self, 'RA_RAD') and hasattr(self, 'DEC_RAD')):
if hasattr(self, 'POSEPOCH'):
epoch = self.POSEPOCH
else:
epoch = self.PEPOCH
elon, elat = sla_eqecl(self.RA_RAD, self.DEC_RAD, epoch)
setattr(self, 'ELONG', elon*pu.RADTODEG)
setattr(self, 'ELAT', elat*pu.RADTODEG)
if hasattr(self, 'P'):
setattr(self, 'P0', self.P)
if hasattr(self, 'P0'):
setattr(self, 'F0', 1.0/self.P0)
if hasattr(self, 'F0'):
setattr(self, 'P0', 1.0/self.F0)
if hasattr(self, 'F1'):
setattr(self, 'P1', -self.F1/(self.F0*self.F0))
if hasattr(self, 'FB0'):
setattr(self, 'PB', (1.0/self.FB0)/86400.0)
if hasattr(self, 'P0_ERR'):
if hasattr(self, 'P1_ERR'):
f, ferr, fd, fderr = pu.pferrs(self.P0, self.P0_ERR,
self.P1, self.P1_ERR)
setattr(self, 'F0_ERR', ferr)
setattr(self, 'F1', fd)
setattr(self, 'F1_ERR', fderr)
else:
f, fd, = pu.p_to_f(self.P0, self.P1)
setattr(self, 'F0_ERR', self.P0_ERR/(self.P0*self.P0))
setattr(self, 'F1', fd)
if hasattr(self, 'F0_ERR'):
if hasattr(self, 'F1_ERR'):
p, perr, pd, pderr = pu.pferrs(self.F0, self.F0_ERR,
self.F1, self.F1_ERR)
setattr(self, 'P0_ERR', perr)
setattr(self, 'P1', pd)
setattr(self, 'P1_ERR', pderr)
else:
p, pd, = pu.p_to_f(self.F0, self.F1)
setattr(self, 'P0_ERR', self.F0_ERR/(self.F0*self.F0))
setattr(self, 'P1', pd)
if hasattr(self, 'DM'):
setattr(self, 'DM', self.DM)
if hasattr(self, 'DM1'):
setattr(self, 'DM1', self.DM1)
if hasattr(self, 'DM2'):
setattr(self, 'DM2', self.DM2)
if hasattr(self, 'EPS1') and hasattr(self, 'EPS2'):
self.use_ell = True
ecc = math.sqrt(self.EPS1 * self.EPS1 + self.EPS2 * self.EPS2)
omega = math.atan2(self.EPS1, self.EPS2)
if self.EPS1==0.0 and self.EPS2==0.0:
ecc_err = pow(pow(self.EPS1_ERR,2)+pow(self.EPS2_ERR,2), .5)
else:
ecc_err = pow(pow(self.EPS1*self.EPS1_ERR,2)+pow(self.EPS2*self.EPS2_ERR,2),0.5)/ecc;
setattr(self, 'ECC', ecc)
setattr(self, 'ECC_ERR', ecc_err)
setattr(self, 'OM', omega)
if hasattr(self, 'PB') and hasattr(self, 'A1') and not hasattr(self, 'ECC'):
setattr(self, 'ECC', 0.0)
if hasattr(self, 'BINARY'):
setattr(self, 'BINARY', self.BINARY)
if hasattr(self, 'KIN'):
setattr(self, 'SINI', math.sin(self.KIN * np.pi/180.))
setattr(self, 'SINI_ERR', math.sin(self.KIN * np.pi/180.)- math.sin(self.KIN * np.pi/180. - self.KIN_ERR * np.pi/180.))
pf.close()
def write(self, parfilenm):
# def write(self, parfilenm, p2f, param):
out = ""
for k in par_keys:
if hasattr(self, k):
v = self.__dict__[k]
if type(self.__dict__[k]) is StringType:
out += "%s %27s\n" % (k, v)
else:
out += "%-12s%20.15g\n" % (k, v)
#print out
pfo = open(parfilenm,'w')
pfo.write(out)
pfo.close()
def set_param(self, param, value):
setattr(self, param, value)
def make_polycos(self, mjd, tcode='f', polycofile='polyco.dat', fctr=1400., tobs=48, verbose=False, span=1):
if self.PSRJ:
psrname = self.PSRJ
elif self.PSR:
psrname = self.PSR
else:
print 'No pulsar name in Parfile. Abort'
sys.exit(-1)
cmd = "tempo -z -f %s -Z PSR=%s -Z SITE=%c -Z OUT=%s -Z START=%f -Z FREQ=%.1lf -Z TOBS=%dH -Z SPAN=%dH" % (self.parfile, psrname, tcode, polycofile, mjd-0.5, fctr, tobs, span)
if verbose:
print "Debug: ", cmd
os.system(cmd)
return psrname, polycofile
if __name__ == '__main__':
a = Parfile(sys.argv[1])
print a
| en | 0.707142 | ## parfile.py taken from Presto by <NAME> (NRAO) ## https://github.com/scottransom/presto ## http://www.cv.nrao.edu/~sransom # # The following are the parameters that are accepted in a # par file when trying to determine a pulsar ephemeris. # # PEPOCH Epoch of period/frequency parameters and position (MJD) # F0 Pulsar rotation frequency (s-2) # F Alternative for F0 # F1 Pulsar rotation frequency derivative (s^-2) # F2 Pulsar rotation frequency second derivative # P0 Pulsar period (s). # P Alternative for P0 # P1 Pulsar period derivative (10^-15). # DM Dispersion measure (pc cm^-3) # A1 Projected pulsar semi-major axis of 1st orbit # E Eccentricity of 1st orbit # T0 Epoch of periastron passage of 1st orbit (MJD) # TASC Epoch of ascending node passage (MJD) # PB Period of 1st orbit (days) # OM Longitude of periastron passage, 2st orbit (deg) # EPS1 First Laplace parameter [eccentricity times sin(omega)] # EPS2 Second Laplace parameter [eccentricity times cos(omega)] # EPS1DOT Time derivative of EPS1 # EPS2DOT Time derivative of EPS2 # OMDOT Rate of periastron advance (deg/yr) # PBDOT Rate of change of orbital period (10^-12) # XDOT Rate of change of projected semi-major axis (-12) # EDOT Rate of change of eccentricity (-12) # # The following are _not_ currently implemented: # F3, F4, F5,... Higher order frequency derivative terms # OM2DOT Second time derivative of angle of periastron (rad/s^2) # X2DOT Second time derivative of projected semi-major axis (1/s) # #setattr(self,'OM',0.0) #setattr(self,'ECC',0.0) #setattr(self,'PB',0.0) #setattr(self,'A1',0.0) #setattr(self,'T0',0.0) # Use ecliptic coordinates # Use elliptic coordinates #print parfilenm # Convert any 'D-' or 'D+' to 'E-' or 'E+' # Some parfiles don't have flags, but do have errors # Read PSR name # Deal with Ecliptic coords # Compute the Galactic coords # Compute the Ecliptic coords # def write(self, parfilenm, p2f, param): #print out | 2.26485 | 2 |
vedastr_cstr/vedastr/models/bodies/feature_extractors/encoders/backbones/__init__.py | bsm8734/formula-image-latex-recognition | 13 | 6630792 | <reponame>bsm8734/formula-image-latex-recognition
from .builder import build_backbone # noqa 401
from .general_backbone import GBackbone # noqa 401
from .resnet import GResNet, ResNet # noqa 401
| from .builder import build_backbone # noqa 401
from .general_backbone import GBackbone # noqa 401
from .resnet import GResNet, ResNet # noqa 401 | uz | 0.195003 | # noqa 401 # noqa 401 # noqa 401 | 0.98733 | 1 |
3-storage.py | CityOfZion/python-smart-contract-workshop | 82 | 6630793 | <reponame>CityOfZion/python-smart-contract-workshop
"""
This example shows how to write, read and manipulate value in storage.
It is also a good example of using neo-python's `debugstorage`, which
allows you to test `Put` operations with `build .. test` commands.
Debugstorage is enabled by default, you can disable it with
`sc debugstorage off` and, more importantly, reset it with
`sc debugstorage reset`.
Test & Build:
neo> sc build_run 3-storage.py True False False 07 05
Invoke this multiple times to see an increasing value in storage. Reset with:
neo> sc debugstorage reset
"""
from boa.interop.Neo.Runtime import Log, Notify
from boa.interop.Neo.Storage import Get, Put, GetContext
def Main():
context = GetContext()
# This is the storage key we use in this example
item_key = 'test-storage-key'
# Try to get a value for this key from storage
item_value = Get(context, item_key)
msg = ["Value read from storage:", item_value]
Notify(msg)
if len(item_value) == 0:
Notify("Storage key not yet set. Setting to 1")
item_value = 1
else:
Notify("Storage key already set. Incrementing by 1")
item_value += 1
# Store the new value
Put(context, item_key, item_value)
msg = ["New value written into storage:", item_value]
Notify(msg)
return item_value
| """
This example shows how to write, read and manipulate value in storage.
It is also a good example of using neo-python's `debugstorage`, which
allows you to test `Put` operations with `build .. test` commands.
Debugstorage is enabled by default, you can disable it with
`sc debugstorage off` and, more importantly, reset it with
`sc debugstorage reset`.
Test & Build:
neo> sc build_run 3-storage.py True False False 07 05
Invoke this multiple times to see an increasing value in storage. Reset with:
neo> sc debugstorage reset
"""
from boa.interop.Neo.Runtime import Log, Notify
from boa.interop.Neo.Storage import Get, Put, GetContext
def Main():
context = GetContext()
# This is the storage key we use in this example
item_key = 'test-storage-key'
# Try to get a value for this key from storage
item_value = Get(context, item_key)
msg = ["Value read from storage:", item_value]
Notify(msg)
if len(item_value) == 0:
Notify("Storage key not yet set. Setting to 1")
item_value = 1
else:
Notify("Storage key already set. Incrementing by 1")
item_value += 1
# Store the new value
Put(context, item_key, item_value)
msg = ["New value written into storage:", item_value]
Notify(msg)
return item_value | en | 0.763665 | This example shows how to write, read and manipulate value in storage. It is also a good example of using neo-python's `debugstorage`, which allows you to test `Put` operations with `build .. test` commands. Debugstorage is enabled by default, you can disable it with `sc debugstorage off` and, more importantly, reset it with `sc debugstorage reset`. Test & Build: neo> sc build_run 3-storage.py True False False 07 05 Invoke this multiple times to see an increasing value in storage. Reset with: neo> sc debugstorage reset # This is the storage key we use in this example # Try to get a value for this key from storage # Store the new value | 3.147688 | 3 |
ls_astropy_es.py | olgaborchevkina/ls_astropy_es | 0 | 6630794 | # -*- coding: utf-8 -*-
"""
Plot graph according to the DAT file
@author: <NAME>
"""
import csv
import glob
import os
import matplotlib.pyplot as plt
import scipy.signal as signal
import numpy as np
import math
from astropy.timeseries import LombScargle
def read_raw_file_data(filepath):
'''
Read data in list
'''
raw_data = list()
# Get raw data
with open(filepath, 'r') as dest_f:
raw_data = dest_f.readlines()
return raw_data
def process_file(data, out_filepath, window, step):
line_cursor = 0
while (line_cursor < (len(data) - window)):
with open(out_filepath + '_c' + "{:08d}".format(line_cursor) + '_w' + str(window) + '_s' + str(step) + ".dat", 'w') as dest_f:
for i in range(window):
dest_f.write(data[line_cursor + i])
line_cursor += step
def read_file_data(filepath):
'''
Read data in [[val,time],[val, time]] format
'''
raw_data = None
data = list()
# Get raw data
with open(filepath, 'r') as dest_f:
data_iter = csv.reader(dest_f,delimiter="\t")
raw_data = [raw_data for raw_data in data_iter]
# Convert data to list. If data is absent set it to None
for raw_val in raw_data:
amp = 0
time = 0
try:
amp = float(raw_val[0])
except:
amp = None
finally:
time = float(raw_val[1])
data.append([amp, time])
return data
def save_to_ascii_file(data_list, out_filepath, header=[]):
'''
Save data in format [[],[]] into DAT file
- CSV
- with \t delimeter
- \n line endings
'''
write_list = []
for data in data_list:
output_str = ""
for val in data:
output_str += str(val) + "\t"
output_str = output_str[:-1]
output_str += "\n"
write_list.append(output_str)
with open(out_filepath,"w") as f:
f.writelines(write_list)
def plot_graph(data, out_filepath, to_display=False, save_to_disk=True):
'''
Plot grapth and return its data
Params
data - input data in list of lists with pair value and time
out_filepath - out file name path for create
to_display - if set to true then graph will be shown on the display
save_to_disk - if set to true then graph will be saved on the disk
Return
List of lists of graph values in form [freq, period, pgram_value, time_value]
'''
output_data = list()
x = list()
y = list()
# Get first time value as constant time value for all window
time_value = data[0][1]
for val_pair in data:
if val_pair[0] != None:
x.append(val_pair[1])
y.append(val_pair[0])
# Calculate Lomb-Scargle periodogram Astropy
astropy_pgram = LombScargle(x, y, normalization='psd')
astropy_freq, astropy_power = astropy_pgram.autopower()
astropy_false_alarm_probability = astropy_pgram.false_alarm_probability(astropy_power.max(), method='baluev')
# Create figure with 2 subplots
fig = plt.figure()
source_ax = fig.add_subplot(211)
astropy_pgram_ax = fig.add_subplot(212)
#Now make a plot of the input data:
source_ax.plot(x, y, 'b+')
# astropy periodogram
astropy_pgram_ax.plot(astropy_freq, astropy_power,'g')
astropy_pgram_ax.text(0.95, 0.95, "FAP(first_peak) = {:.4f}%".format(astropy_false_alarm_probability),
verticalalignment='top', horizontalalignment='right',
transform=astropy_pgram_ax.transAxes,
color='green', fontsize=15)
if to_display:
plt.show()
if save_to_disk:
plt.savefig(out_filepath)
# Generate output
for idx, freq in enumerate(astropy_freq):
period = 1 / freq
output_data.append([freq, period, astropy_power[idx], time_value])
plt.cla()
plt.clf()
plt.close(fig)
return output_data
def process_windowed_files(path, output_file_path):
files = glob.glob(path + "*.dat")
for filepath in files:
# Reject old merged files
if "!" in filepath:
continue
# Reject old windowed files
if "windowed" in filepath:
continue
print("Process >> " + filepath)
read_data = read_file_data(filepath)
out_dat_filepath = path + os.path.basename(filepath) + "_windowed" + ".dat"
out_png_filepath = path + os.path.basename(filepath) + "_windowed" + ".png"
output_data = plot_graph(read_data,
out_png_filepath)
print("Saved PNG to >> " + out_png_filepath)
save_to_ascii_file(output_data, out_dat_filepath)
print("Saved DAT to >> " + out_dat_filepath)
try:
os.remove(output_file_path)
except Exception as e:
pass
finally:
pass
windowed_files = glob.glob(path + "*_windowed.dat")
for windowed_file in windowed_files:
with open(windowed_file, 'r') as windowed_f:
data = windowed_f.read()
with open(output_file_path, 'a') as merged_file:
merged_file.write(data)
def main():
print("Script is started")
files = glob.glob("./input/*.dat") # Change path here or write filepath
OUTPUT_PATH = "./output/" # Change output here
WINDOW = 648 # Change window value here
STEP = 24 # Change step value here
FREQ_START = 0.08 # Change freq start here
FREQ_END = 1.0 # Change freq end here
FREQ_NUM = 500 # Change freq num here
for filepath in files:
print("Process >> " + filepath)
read_lines = read_raw_file_data(filepath)
out_dat_filepath = OUTPUT_PATH + os.path.basename(filepath)
process_file(read_lines, out_dat_filepath, WINDOW, STEP)
process_windowed_files(OUTPUT_PATH, f'{OUTPUT_PATH}!{os.path.basename(filepath)}_merged_file.dat')
print(f"<{filepath}> succesful processed by the script")
print("Script is finished")
if __name__ == "__main__":
main() | # -*- coding: utf-8 -*-
"""
Plot graph according to the DAT file
@author: <NAME>
"""
import csv
import glob
import os
import matplotlib.pyplot as plt
import scipy.signal as signal
import numpy as np
import math
from astropy.timeseries import LombScargle
def read_raw_file_data(filepath):
'''
Read data in list
'''
raw_data = list()
# Get raw data
with open(filepath, 'r') as dest_f:
raw_data = dest_f.readlines()
return raw_data
def process_file(data, out_filepath, window, step):
line_cursor = 0
while (line_cursor < (len(data) - window)):
with open(out_filepath + '_c' + "{:08d}".format(line_cursor) + '_w' + str(window) + '_s' + str(step) + ".dat", 'w') as dest_f:
for i in range(window):
dest_f.write(data[line_cursor + i])
line_cursor += step
def read_file_data(filepath):
'''
Read data in [[val,time],[val, time]] format
'''
raw_data = None
data = list()
# Get raw data
with open(filepath, 'r') as dest_f:
data_iter = csv.reader(dest_f,delimiter="\t")
raw_data = [raw_data for raw_data in data_iter]
# Convert data to list. If data is absent set it to None
for raw_val in raw_data:
amp = 0
time = 0
try:
amp = float(raw_val[0])
except:
amp = None
finally:
time = float(raw_val[1])
data.append([amp, time])
return data
def save_to_ascii_file(data_list, out_filepath, header=[]):
'''
Save data in format [[],[]] into DAT file
- CSV
- with \t delimeter
- \n line endings
'''
write_list = []
for data in data_list:
output_str = ""
for val in data:
output_str += str(val) + "\t"
output_str = output_str[:-1]
output_str += "\n"
write_list.append(output_str)
with open(out_filepath,"w") as f:
f.writelines(write_list)
def plot_graph(data, out_filepath, to_display=False, save_to_disk=True):
'''
Plot grapth and return its data
Params
data - input data in list of lists with pair value and time
out_filepath - out file name path for create
to_display - if set to true then graph will be shown on the display
save_to_disk - if set to true then graph will be saved on the disk
Return
List of lists of graph values in form [freq, period, pgram_value, time_value]
'''
output_data = list()
x = list()
y = list()
# Get first time value as constant time value for all window
time_value = data[0][1]
for val_pair in data:
if val_pair[0] != None:
x.append(val_pair[1])
y.append(val_pair[0])
# Calculate Lomb-Scargle periodogram Astropy
astropy_pgram = LombScargle(x, y, normalization='psd')
astropy_freq, astropy_power = astropy_pgram.autopower()
astropy_false_alarm_probability = astropy_pgram.false_alarm_probability(astropy_power.max(), method='baluev')
# Create figure with 2 subplots
fig = plt.figure()
source_ax = fig.add_subplot(211)
astropy_pgram_ax = fig.add_subplot(212)
#Now make a plot of the input data:
source_ax.plot(x, y, 'b+')
# astropy periodogram
astropy_pgram_ax.plot(astropy_freq, astropy_power,'g')
astropy_pgram_ax.text(0.95, 0.95, "FAP(first_peak) = {:.4f}%".format(astropy_false_alarm_probability),
verticalalignment='top', horizontalalignment='right',
transform=astropy_pgram_ax.transAxes,
color='green', fontsize=15)
if to_display:
plt.show()
if save_to_disk:
plt.savefig(out_filepath)
# Generate output
for idx, freq in enumerate(astropy_freq):
period = 1 / freq
output_data.append([freq, period, astropy_power[idx], time_value])
plt.cla()
plt.clf()
plt.close(fig)
return output_data
def process_windowed_files(path, output_file_path):
files = glob.glob(path + "*.dat")
for filepath in files:
# Reject old merged files
if "!" in filepath:
continue
# Reject old windowed files
if "windowed" in filepath:
continue
print("Process >> " + filepath)
read_data = read_file_data(filepath)
out_dat_filepath = path + os.path.basename(filepath) + "_windowed" + ".dat"
out_png_filepath = path + os.path.basename(filepath) + "_windowed" + ".png"
output_data = plot_graph(read_data,
out_png_filepath)
print("Saved PNG to >> " + out_png_filepath)
save_to_ascii_file(output_data, out_dat_filepath)
print("Saved DAT to >> " + out_dat_filepath)
try:
os.remove(output_file_path)
except Exception as e:
pass
finally:
pass
windowed_files = glob.glob(path + "*_windowed.dat")
for windowed_file in windowed_files:
with open(windowed_file, 'r') as windowed_f:
data = windowed_f.read()
with open(output_file_path, 'a') as merged_file:
merged_file.write(data)
def main():
print("Script is started")
files = glob.glob("./input/*.dat") # Change path here or write filepath
OUTPUT_PATH = "./output/" # Change output here
WINDOW = 648 # Change window value here
STEP = 24 # Change step value here
FREQ_START = 0.08 # Change freq start here
FREQ_END = 1.0 # Change freq end here
FREQ_NUM = 500 # Change freq num here
for filepath in files:
print("Process >> " + filepath)
read_lines = read_raw_file_data(filepath)
out_dat_filepath = OUTPUT_PATH + os.path.basename(filepath)
process_file(read_lines, out_dat_filepath, WINDOW, STEP)
process_windowed_files(OUTPUT_PATH, f'{OUTPUT_PATH}!{os.path.basename(filepath)}_merged_file.dat')
print(f"<{filepath}> succesful processed by the script")
print("Script is finished")
if __name__ == "__main__":
main() | en | 0.708057 | # -*- coding: utf-8 -*- Plot graph according to the DAT file @author: <NAME> Read data in list # Get raw data Read data in [[val,time],[val, time]] format # Get raw data # Convert data to list. If data is absent set it to None Save data in format [[],[]] into DAT file - CSV - with \t delimeter - \n line endings Plot grapth and return its data Params data - input data in list of lists with pair value and time out_filepath - out file name path for create to_display - if set to true then graph will be shown on the display save_to_disk - if set to true then graph will be saved on the disk Return List of lists of graph values in form [freq, period, pgram_value, time_value] # Get first time value as constant time value for all window # Calculate Lomb-Scargle periodogram Astropy # Create figure with 2 subplots #Now make a plot of the input data: # astropy periodogram # Generate output # Reject old merged files # Reject old windowed files # Change path here or write filepath # Change output here # Change window value here # Change step value here # Change freq start here # Change freq end here # Change freq num here | 2.711303 | 3 |
pypy/module/test_lib_pypy/test_cPickle.py | pypyjs/pypy | 34 | 6630795 | from __future__ import absolute_import
import py
from lib_pypy import cPickle
def test_stack_underflow():
py.test.raises(cPickle.UnpicklingError, cPickle.loads, "a string")
| from __future__ import absolute_import
import py
from lib_pypy import cPickle
def test_stack_underflow():
py.test.raises(cPickle.UnpicklingError, cPickle.loads, "a string")
| none | 1 | 1.631378 | 2 |
|
rmepy/logger.py | 233a344a455/RobomasterEPlib | 3 | 6630796 | <filename>rmepy/logger.py
import traceback
class Logger():
def __init__(self, name):
if name.__class__.__name__ == 'str':
self.name = name
else:
self.name = name.__class__.__name__
self.level = 'DEBUG'
def info(self, msg):
"""Print infos.
Args:
msg: (str) 输出的info信息
Returns:
None
"""
if self._log_level <= 1:
print("\033[0;36m" + "[Info]%s: %s" % (self.name, msg) + "\033[0m")
def warn(self, msg):
"""Print warnings.
Args:
msg: (str) 输出的警告信息
Returns:
None
"""
if self._log_level <= 2:
print("\033[33m" + "[Warning]%s: %s" % (self.name, msg) + "\033[0m")
def error(self, msg):
"""Print errors.
输出错误并提供traceback或者强制继续
Args:
msg: (str) 输出的错误信息
Returns:
None
"""
print("=============================================")
print("\033[0;31m" + "[Error]%s: %s" % (self.name, msg) + "\033[0m")
temp = input("Force to continue? ('y' to continue / 'n' to print Traceback) ")
print("=============================================")
if temp.upper() != 'Y':
print("\n\033[0;31m" + "Traceback (most recent call last):" + "\033[0m")
for line in traceback.format_stack()[:-1]:
print("\033[31m" + line + "\033[0m")
print("\n=============================================")
exit()
def debuginfo(self, msg):
"""Print debug msg.
Args:
msg: (str) 输出的调试信息
Returns:
None
"""
if self._log_level == 0:
print("\033[2m" + "[Debug]%s: %s" % (self.name, msg) + "\033[0m")
def debug(self, msg):
"""Print highlighted debug msg.
Args:
msg: (str) 输出的调试信息
Returns:
None
"""
print("\033[7m" + "[Debug]%s: %s" % (self.name, msg) + "\033[0m")
@property
def level(self):
return ('DEBUG', 'INFO', 'WARNING', 'ERROR')[self._log_level]
@level.setter
def level(self, level):
try:
level = ('DEBUG', 'INFO', 'WARNING', 'ERROR').index(level)
except ValueError:
print("\033[33m" + "[Warning]Logger: Invalid log level %r." %level + "\033[0m")
else:
self._log_level = level
if __name__ == '__main__':
log = Logger('test')
log.level = 'WARNING'
log.info('test')
log.warn('test')
log.debuginfo('test')
log.debug('test')
print('aaaa')
log.error('test') | <filename>rmepy/logger.py
import traceback
class Logger():
def __init__(self, name):
if name.__class__.__name__ == 'str':
self.name = name
else:
self.name = name.__class__.__name__
self.level = 'DEBUG'
def info(self, msg):
"""Print infos.
Args:
msg: (str) 输出的info信息
Returns:
None
"""
if self._log_level <= 1:
print("\033[0;36m" + "[Info]%s: %s" % (self.name, msg) + "\033[0m")
def warn(self, msg):
"""Print warnings.
Args:
msg: (str) 输出的警告信息
Returns:
None
"""
if self._log_level <= 2:
print("\033[33m" + "[Warning]%s: %s" % (self.name, msg) + "\033[0m")
def error(self, msg):
"""Print errors.
输出错误并提供traceback或者强制继续
Args:
msg: (str) 输出的错误信息
Returns:
None
"""
print("=============================================")
print("\033[0;31m" + "[Error]%s: %s" % (self.name, msg) + "\033[0m")
temp = input("Force to continue? ('y' to continue / 'n' to print Traceback) ")
print("=============================================")
if temp.upper() != 'Y':
print("\n\033[0;31m" + "Traceback (most recent call last):" + "\033[0m")
for line in traceback.format_stack()[:-1]:
print("\033[31m" + line + "\033[0m")
print("\n=============================================")
exit()
def debuginfo(self, msg):
"""Print debug msg.
Args:
msg: (str) 输出的调试信息
Returns:
None
"""
if self._log_level == 0:
print("\033[2m" + "[Debug]%s: %s" % (self.name, msg) + "\033[0m")
def debug(self, msg):
"""Print highlighted debug msg.
Args:
msg: (str) 输出的调试信息
Returns:
None
"""
print("\033[7m" + "[Debug]%s: %s" % (self.name, msg) + "\033[0m")
@property
def level(self):
return ('DEBUG', 'INFO', 'WARNING', 'ERROR')[self._log_level]
@level.setter
def level(self, level):
try:
level = ('DEBUG', 'INFO', 'WARNING', 'ERROR').index(level)
except ValueError:
print("\033[33m" + "[Warning]Logger: Invalid log level %r." %level + "\033[0m")
else:
self._log_level = level
if __name__ == '__main__':
log = Logger('test')
log.level = 'WARNING'
log.info('test')
log.warn('test')
log.debuginfo('test')
log.debug('test')
print('aaaa')
log.error('test') | zh | 0.239143 | Print infos. Args: msg: (str) 输出的info信息 Returns: None Print warnings. Args: msg: (str) 输出的警告信息 Returns: None Print errors. 输出错误并提供traceback或者强制继续 Args: msg: (str) 输出的错误信息 Returns: None Print debug msg. Args: msg: (str) 输出的调试信息 Returns: None Print highlighted debug msg. Args: msg: (str) 输出的调试信息 Returns: None | 3.171183 | 3 |
hexagon/support/wax.py | redbeestudios/hexagon | 8 | 6630797 | <filename>hexagon/support/wax.py
from typing import Union, List
from InquirerPy import inquirer
from hexagon.domain.env import Env
from hexagon.domain.tool import Tool
from hexagon.domain.wax import Selection, SelectionType
from hexagon.support.hooks import HexagonHooks
def __classifier(value: Union[Tool, Env]):
if value.icon:
return f"{value.icon:2}"
symbols = {
"web": "⦾",
"shell": "ƒ",
"misc": " ",
"hexagon": "⬡",
"group": "≡",
}
r = symbols.get(value.type, "")
return f"{r:2}" if r else ""
def __choices_with_long_name(choices: List[Union[Tool, Env]], classifier=lambda x: ""):
def build_display(v: Union[Tool, Env]):
if "__separator" in v.name:
return "--------------------------------------------------------------------------------"
else:
gap = 60 if v.description else 0
return f"{classifier(v) + (v.long_name or v.name): <{gap}}{v.description or ''}"
return [{"value": each.name, "name": build_display(each)} for each in choices]
def search_by_name_or_alias(data: List[Union[Tool, Env]], arg: str):
if arg:
for k in data:
if k.name == arg or k.alias == arg:
return k.name
return None
def select_env(available_envs: List[Env], tool_envs: dict = None, selected: str = None):
if not tool_envs:
return None, None
if "*" in tool_envs:
return None, tool_envs["*"]
(env, prompted) = (
(selected, False)
if selected
else (
inquirer.fuzzy(
message="On which environment?",
choices=__choices_with_long_name(
[e for e in available_envs if e.name in tool_envs.keys()]
),
validate=lambda x: x and "__separator" not in x,
invalid_message="Please select a valid environment",
).execute(),
True,
)
)
return (
_select_and_register_event(env, available_envs, prompted, target="env"),
tool_envs[env],
)
def select_tool(tools: List[Tool], selected: str = None):
if selected:
return _select_and_register_event(selected, tools, target="tool")
name = inquirer.fuzzy(
message="Hi, which tool would you like to use today?",
choices=__choices_with_long_name(tools, classifier=__classifier),
validate=lambda x: x and "__separator" not in x,
invalid_message="Please select a valid tool",
).execute()
return _select_and_register_event(name, tools, target="tool", prompt=True)
def _select_and_register_event(
name: str, options: List[Union[Tool, Env]], prompt=False, **kwargs
):
selected = next((e for e in options if e.name == name), None)
if selected:
selection = Selection(
selected, SelectionType.prompt if prompt else SelectionType.args, **kwargs
)
if isinstance(selected, Tool):
HexagonHooks.tool_selected.run(selection)
elif isinstance(selected, Env):
HexagonHooks.env_selected.run(selection)
return selected
| <filename>hexagon/support/wax.py
from typing import Union, List
from InquirerPy import inquirer
from hexagon.domain.env import Env
from hexagon.domain.tool import Tool
from hexagon.domain.wax import Selection, SelectionType
from hexagon.support.hooks import HexagonHooks
def __classifier(value: Union[Tool, Env]):
if value.icon:
return f"{value.icon:2}"
symbols = {
"web": "⦾",
"shell": "ƒ",
"misc": " ",
"hexagon": "⬡",
"group": "≡",
}
r = symbols.get(value.type, "")
return f"{r:2}" if r else ""
def __choices_with_long_name(choices: List[Union[Tool, Env]], classifier=lambda x: ""):
def build_display(v: Union[Tool, Env]):
if "__separator" in v.name:
return "--------------------------------------------------------------------------------"
else:
gap = 60 if v.description else 0
return f"{classifier(v) + (v.long_name or v.name): <{gap}}{v.description or ''}"
return [{"value": each.name, "name": build_display(each)} for each in choices]
def search_by_name_or_alias(data: List[Union[Tool, Env]], arg: str):
if arg:
for k in data:
if k.name == arg or k.alias == arg:
return k.name
return None
def select_env(available_envs: List[Env], tool_envs: dict = None, selected: str = None):
if not tool_envs:
return None, None
if "*" in tool_envs:
return None, tool_envs["*"]
(env, prompted) = (
(selected, False)
if selected
else (
inquirer.fuzzy(
message="On which environment?",
choices=__choices_with_long_name(
[e for e in available_envs if e.name in tool_envs.keys()]
),
validate=lambda x: x and "__separator" not in x,
invalid_message="Please select a valid environment",
).execute(),
True,
)
)
return (
_select_and_register_event(env, available_envs, prompted, target="env"),
tool_envs[env],
)
def select_tool(tools: List[Tool], selected: str = None):
if selected:
return _select_and_register_event(selected, tools, target="tool")
name = inquirer.fuzzy(
message="Hi, which tool would you like to use today?",
choices=__choices_with_long_name(tools, classifier=__classifier),
validate=lambda x: x and "__separator" not in x,
invalid_message="Please select a valid tool",
).execute()
return _select_and_register_event(name, tools, target="tool", prompt=True)
def _select_and_register_event(
name: str, options: List[Union[Tool, Env]], prompt=False, **kwargs
):
selected = next((e for e in options if e.name == name), None)
if selected:
selection = Selection(
selected, SelectionType.prompt if prompt else SelectionType.args, **kwargs
)
if isinstance(selected, Tool):
HexagonHooks.tool_selected.run(selection)
elif isinstance(selected, Env):
HexagonHooks.env_selected.run(selection)
return selected
| none | 1 | 2.225034 | 2 |
|
zk/check.py | chadharvey/integrations-core | 2 | 6630798 | <filename>zk/check.py
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
'''
As of zookeeper 3.4.0, the `mntr` admin command is provided for easy parsing of zookeeper stats.
This check first parses the `stat` admin command for a version number.
If the zookeeper version supports `mntr`, it is also parsed.
Duplicate information is being reported by both `mntr` and `stat` to keep backwards compatability.
Example:
`stat` reports: zookeeper.latency.avg
`mntr` reports: zookeeper.avg.latency
If available, make use of the data reported by `mntr` not `stat`.
The duplicate `stat` reports are only kept for backward compatability.
Besides the usual zookeeper state of `leader`, `follower`, `observer` and `standalone`,
this check will report three other states:
`down`: the check cannot connect to zookeeper
`inactive`: the zookeeper instance has lost connection to the cluster
`unknown`: an unexpected error has occured in this check
States can be accessed through the gauge `zookeeper.instances.<state>,
through the set `zookeeper.instances`, or through the `mode:<state>` tag.
Parses the response from zookeeper's `stat` admin command, which looks like:
```
Zookeeper version: 3.2.2--1, built on 03/16/2010 07:31 GMT
Clients:
/10.42.114.160:32634[1](queued=0,recved=12,sent=0)
/10.37.137.74:21873[1](queued=0,recved=53613,sent=0)
/10.37.137.74:21876[1](queued=0,recved=57436,sent=0)
/10.115.77.32:32990[1](queued=0,recved=16,sent=0)
/10.37.137.74:21891[1](queued=0,recved=55011,sent=0)
/10.37.137.74:21797[1](queued=0,recved=19431,sent=0)
Latency min/avg/max: -10/0/20007
Received: 101032173
Sent: 0
Outstanding: 0
Zxid: 0x1034799c7
Mode: leader
Node count: 487
```
`stat` tested with Zookeeper versions 3.0.0 to 3.4.5
The following is an example of the `mntr` commands output:
```
zk_version 3.4.5-cdh4.4.0--1, built on 09/04/2013 01:46 GMT
zk_avg_latency 0
zk_max_latency 0
zk_min_latency 0
zk_packets_received 4
zk_packets_sent 3
zk_num_alive_connections 1
zk_outstanding_requests 0
zk_server_state standalone
zk_znode_count 4
zk_watch_count 0
zk_ephemerals_count 0
zk_approximate_data_size 27
zk_open_file_descriptor_count 29
zk_max_file_descriptor_count 4096
```
`mntr` tested with ZooKeeper 3.4.5
'''
# stdlib
from collections import defaultdict
from distutils.version import LooseVersion # pylint: disable=E0611,E0401
from StringIO import StringIO
import re
import socket
import struct
# project
from checks import AgentCheck
class ZKConnectionFailure(Exception):
""" Raised when we are unable to connect or get the output of a command. """
pass
class ZKMetric(tuple):
"""
A Zookeeper metric.
Tuple with an optional metric type (default is 'gauge').
"""
def __new__(cls, name, value, m_type="gauge"):
return super(ZKMetric, cls).__new__(cls, [name, value, m_type])
class ZookeeperCheck(AgentCheck):
"""
ZooKeeper AgentCheck.
Parse content from `stat` and `mntr`(if available) commmands to retrieve health cluster metrics.
"""
# example match:
# "Zookeeper version: 3.4.10-39d3a4f269333c922ed3db283be479f9deacaa0f, built on 03/23/2017 10:13 GMT"
version_pattern = re.compile(r'(\d+\.\d+\.\d+)')
SOURCE_TYPE_NAME = 'zookeeper'
STATUS_TYPES = [
'leader',
'follower',
'observer',
'standalone',
'down',
'inactive',
]
# `mntr` information to report as `rate`
_MNTR_RATES = set(
[
'zk_packets_received',
'zk_packets_sent',
]
)
def check(self, instance):
host = instance.get('host', 'localhost')
port = int(instance.get('port', 2181))
timeout = float(instance.get('timeout', 3.0))
expected_mode = (instance.get('expected_mode') or '').strip()
tags = instance.get('tags', [])
cx_args = (host, port, timeout)
sc_tags = ["host:{0}".format(host), "port:{0}".format(port)] + list(set(tags))
hostname = self.hostname
report_instance_mode = instance.get("report_instance_mode", True)
zk_version = None # parse_stat will parse and set version string
# Send a service check based on the `ruok` response.
# Set instance status to down if not ok.
try:
ruok_out = self._send_command('ruok', *cx_args)
except ZKConnectionFailure:
# The server should not respond at all if it's not OK.
status = AgentCheck.CRITICAL
message = 'No response from `ruok` command'
self.increment('zookeeper.timeouts')
if report_instance_mode:
self.report_instance_mode(hostname, 'down', tags)
raise
else:
ruok_out.seek(0)
ruok = ruok_out.readline()
if ruok == 'imok':
status = AgentCheck.OK
else:
status = AgentCheck.WARNING
message = u'Response from the server: %s' % ruok
finally:
self.service_check(
'zookeeper.ruok', status, message=message, tags=sc_tags
)
# Read metrics from the `stat` output.
try:
stat_out = self._send_command('stat', *cx_args)
except ZKConnectionFailure:
self.increment('zookeeper.timeouts')
if report_instance_mode:
self.report_instance_mode(hostname, 'down', tags)
raise
except Exception as e:
self.warning(e)
self.increment('zookeeper.datadog_client_exception')
if report_instance_mode:
self.report_instance_mode(hostname, 'unknown', tags)
raise
else:
# Parse the response
metrics, new_tags, mode, zk_version = self.parse_stat(stat_out)
# Write the data
if mode != 'inactive':
for metric, value, m_type in metrics:
submit_metric = getattr(self, m_type)
submit_metric(metric, value, tags=tags + new_tags)
if report_instance_mode:
self.report_instance_mode(hostname, mode, tags)
if expected_mode:
if mode == expected_mode:
status = AgentCheck.OK
message = u"Server is in %s mode" % mode
else:
status = AgentCheck.CRITICAL
message = u"Server is in %s mode but check expects %s mode"\
% (mode, expected_mode)
self.service_check('zookeeper.mode', status, message=message,
tags=sc_tags)
# Read metrics from the `mntr` output
if zk_version and LooseVersion(zk_version) > LooseVersion("3.4.0"):
try:
mntr_out = self._send_command('mntr', *cx_args)
except ZKConnectionFailure:
self.increment('zookeeper.timeouts')
if report_instance_mode:
self.report_instance_mode(hostname, 'down', tags)
raise
except Exception as e:
self.warning(e)
self.increment('zookeeper.datadog_client_exception')
if report_instance_mode:
self.report_instance_mode(hostname, 'unknown', tags)
raise
else:
metrics, mode = self.parse_mntr(mntr_out)
mode_tag = "mode:%s" % mode
if mode != 'inactive':
for metric, value, m_type in metrics:
submit_metric = getattr(self, m_type)
submit_metric(metric, value, tags=tags + [mode_tag])
if report_instance_mode:
self.report_instance_mode(hostname, mode, tags)
def report_instance_mode(self, hostname, mode, tags):
gauges = defaultdict(int)
if mode not in self.STATUS_TYPES:
mode = "unknown"
tags = tags + ['mode:%s' % mode]
self.gauge('zookeeper.instances', 1, tags=tags)
gauges[mode] = 1
for k, v in gauges.iteritems():
gauge_name = 'zookeeper.instances.%s' % k
self.gauge(gauge_name, v)
def _send_command(self, command, host, port, timeout):
sock = socket.socket()
sock.settimeout(timeout)
buf = StringIO()
chunk_size = 1024
# try-finally and try-except to stay compatible with python 2.4
try:
try:
# Connect to the zk client port and send the stat command
sock.connect((host, port))
sock.sendall(command)
# Read the response into a StringIO buffer
chunk = sock.recv(chunk_size)
buf.write(chunk)
num_reads = 1
max_reads = 10000
while chunk:
if num_reads > max_reads:
# Safeguard against an infinite loop
raise Exception("Read %s bytes before exceeding max reads of %s. "
% (buf.tell(), max_reads))
chunk = sock.recv(chunk_size)
buf.write(chunk)
num_reads += 1
except (socket.timeout, socket.error):
raise ZKConnectionFailure()
finally:
sock.close()
return buf
def parse_stat(self, buf):
''' `buf` is a readable file-like object
returns a tuple: (metrics, tags, mode, version)
'''
metrics = []
buf.seek(0)
# Check the version line to make sure we parse the rest of the
# body correctly. Particularly, the Connections val was added in
# >= 3.4.4.
start_line = buf.readline()
match = self.version_pattern.search(start_line)
if match is None:
return (None, None, "inactive", None)
raise Exception("Could not parse version from stat command output: %s" % start_line)
else:
version = match.group()
has_connections_val = LooseVersion(version) > LooseVersion("3.4.4")
# Clients:
buf.readline() # skip the Clients: header
connections = 0
client_line = buf.readline().strip()
if client_line:
connections += 1
while client_line:
client_line = buf.readline().strip()
if client_line:
connections += 1
# Latency min/avg/max: -10/0/20007
_, value = buf.readline().split(':')
l_min, l_avg, l_max = [int(v) for v in value.strip().split('/')]
metrics.append(ZKMetric('zookeeper.latency.min', l_min))
metrics.append(ZKMetric('zookeeper.latency.avg', l_avg))
metrics.append(ZKMetric('zookeeper.latency.max', l_max))
# Received: 101032173
_, value = buf.readline().split(':')
metrics.append(ZKMetric('zookeeper.bytes_received', long(value.strip())))
# Sent: 1324
_, value = buf.readline().split(':')
metrics.append(ZKMetric('zookeeper.bytes_sent', long(value.strip())))
if has_connections_val:
# Connections: 1
_, value = buf.readline().split(':')
metrics.append(ZKMetric('zookeeper.connections', int(value.strip())))
else:
# If the zk version doesnt explicitly give the Connections val,
# use the value we computed from the client list.
metrics.append(ZKMetric('zookeeper.connections', connections))
# Outstanding: 0
_, value = buf.readline().split(':')
# Fixme: This metric name is wrong. It should be removed in a major version of the agent
# See https://github.com/DataDog/dd-agent/issues/1383
metrics.append(ZKMetric('zookeeper.bytes_outstanding', long(value.strip())))
metrics.append(ZKMetric('zookeeper.outstanding_requests', long(value.strip())))
# Zxid: 0x1034799c7
_, value = buf.readline().split(':')
# Parse as a 64 bit hex int
zxid = long(value.strip(), 16)
# convert to bytes
zxid_bytes = struct.pack('>q', zxid)
# the higher order 4 bytes is the epoch
(zxid_epoch,) = struct.unpack('>i', zxid_bytes[0:4])
# the lower order 4 bytes is the count
(zxid_count,) = struct.unpack('>i', zxid_bytes[4:8])
metrics.append(ZKMetric('zookeeper.zxid.epoch', zxid_epoch))
metrics.append(ZKMetric('zookeeper.zxid.count', zxid_count))
# Mode: leader
_, value = buf.readline().split(':')
mode = value.strip().lower()
tags = [u'mode:' + mode]
# Node count: 487
_, value = buf.readline().split(':')
metrics.append(ZKMetric('zookeeper.nodes', long(value.strip())))
return metrics, tags, mode, version
def parse_mntr(self, buf):
'''
Parse `mntr` command's content.
`buf` is a readable file-like object
Returns: a tuple (metrics, mode)
if mode == 'inactive', metrics will be None
'''
buf.seek(0)
first = buf.readline() # First is version string or error
if first == 'This ZooKeeper instance is not currently serving requests':
return (None, 'inactive')
metrics = []
mode = 'inactive'
for line in buf:
try:
key, value = line.split()
if key == "zk_server_state":
mode = value.lower()
continue
metric_name = self._normalize_metric_label(key)
metric_type = "rate" if key in self._MNTR_RATES else "gauge"
metric_value = int(value)
metrics.append(ZKMetric(metric_name, metric_value, metric_type))
except ValueError:
self.log.warning(
u"Cannot format `mntr` value. key={key}, value{value}".format(
key=key, value=value
)
)
continue
except Exception:
self.log.exception(
u"Unexpected exception occurred while parsing `mntr` command content:\n"
u"{buf}".format(
buf=buf
)
)
return (metrics, mode)
def _normalize_metric_label(self, key):
if re.match('zk', key):
key = key.replace('zk', 'zookeeper', 1)
return key.replace('_', '.', 1)
| <filename>zk/check.py
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
'''
As of zookeeper 3.4.0, the `mntr` admin command is provided for easy parsing of zookeeper stats.
This check first parses the `stat` admin command for a version number.
If the zookeeper version supports `mntr`, it is also parsed.
Duplicate information is being reported by both `mntr` and `stat` to keep backwards compatability.
Example:
`stat` reports: zookeeper.latency.avg
`mntr` reports: zookeeper.avg.latency
If available, make use of the data reported by `mntr` not `stat`.
The duplicate `stat` reports are only kept for backward compatability.
Besides the usual zookeeper state of `leader`, `follower`, `observer` and `standalone`,
this check will report three other states:
`down`: the check cannot connect to zookeeper
`inactive`: the zookeeper instance has lost connection to the cluster
`unknown`: an unexpected error has occured in this check
States can be accessed through the gauge `zookeeper.instances.<state>,
through the set `zookeeper.instances`, or through the `mode:<state>` tag.
Parses the response from zookeeper's `stat` admin command, which looks like:
```
Zookeeper version: 3.2.2--1, built on 03/16/2010 07:31 GMT
Clients:
/10.42.114.160:32634[1](queued=0,recved=12,sent=0)
/10.37.137.74:21873[1](queued=0,recved=53613,sent=0)
/10.37.137.74:21876[1](queued=0,recved=57436,sent=0)
/10.115.77.32:32990[1](queued=0,recved=16,sent=0)
/10.37.137.74:21891[1](queued=0,recved=55011,sent=0)
/10.37.137.74:21797[1](queued=0,recved=19431,sent=0)
Latency min/avg/max: -10/0/20007
Received: 101032173
Sent: 0
Outstanding: 0
Zxid: 0x1034799c7
Mode: leader
Node count: 487
```
`stat` tested with Zookeeper versions 3.0.0 to 3.4.5
The following is an example of the `mntr` commands output:
```
zk_version 3.4.5-cdh4.4.0--1, built on 09/04/2013 01:46 GMT
zk_avg_latency 0
zk_max_latency 0
zk_min_latency 0
zk_packets_received 4
zk_packets_sent 3
zk_num_alive_connections 1
zk_outstanding_requests 0
zk_server_state standalone
zk_znode_count 4
zk_watch_count 0
zk_ephemerals_count 0
zk_approximate_data_size 27
zk_open_file_descriptor_count 29
zk_max_file_descriptor_count 4096
```
`mntr` tested with ZooKeeper 3.4.5
'''
# stdlib
from collections import defaultdict
from distutils.version import LooseVersion # pylint: disable=E0611,E0401
from StringIO import StringIO
import re
import socket
import struct
# project
from checks import AgentCheck
class ZKConnectionFailure(Exception):
""" Raised when we are unable to connect or get the output of a command. """
pass
class ZKMetric(tuple):
"""
A Zookeeper metric.
Tuple with an optional metric type (default is 'gauge').
"""
def __new__(cls, name, value, m_type="gauge"):
return super(ZKMetric, cls).__new__(cls, [name, value, m_type])
class ZookeeperCheck(AgentCheck):
"""
ZooKeeper AgentCheck.
Parse content from `stat` and `mntr`(if available) commmands to retrieve health cluster metrics.
"""
# example match:
# "Zookeeper version: 3.4.10-39d3a4f269333c922ed3db283be479f9deacaa0f, built on 03/23/2017 10:13 GMT"
version_pattern = re.compile(r'(\d+\.\d+\.\d+)')
SOURCE_TYPE_NAME = 'zookeeper'
STATUS_TYPES = [
'leader',
'follower',
'observer',
'standalone',
'down',
'inactive',
]
# `mntr` information to report as `rate`
_MNTR_RATES = set(
[
'zk_packets_received',
'zk_packets_sent',
]
)
def check(self, instance):
host = instance.get('host', 'localhost')
port = int(instance.get('port', 2181))
timeout = float(instance.get('timeout', 3.0))
expected_mode = (instance.get('expected_mode') or '').strip()
tags = instance.get('tags', [])
cx_args = (host, port, timeout)
sc_tags = ["host:{0}".format(host), "port:{0}".format(port)] + list(set(tags))
hostname = self.hostname
report_instance_mode = instance.get("report_instance_mode", True)
zk_version = None # parse_stat will parse and set version string
# Send a service check based on the `ruok` response.
# Set instance status to down if not ok.
try:
ruok_out = self._send_command('ruok', *cx_args)
except ZKConnectionFailure:
# The server should not respond at all if it's not OK.
status = AgentCheck.CRITICAL
message = 'No response from `ruok` command'
self.increment('zookeeper.timeouts')
if report_instance_mode:
self.report_instance_mode(hostname, 'down', tags)
raise
else:
ruok_out.seek(0)
ruok = ruok_out.readline()
if ruok == 'imok':
status = AgentCheck.OK
else:
status = AgentCheck.WARNING
message = u'Response from the server: %s' % ruok
finally:
self.service_check(
'zookeeper.ruok', status, message=message, tags=sc_tags
)
# Read metrics from the `stat` output.
try:
stat_out = self._send_command('stat', *cx_args)
except ZKConnectionFailure:
self.increment('zookeeper.timeouts')
if report_instance_mode:
self.report_instance_mode(hostname, 'down', tags)
raise
except Exception as e:
self.warning(e)
self.increment('zookeeper.datadog_client_exception')
if report_instance_mode:
self.report_instance_mode(hostname, 'unknown', tags)
raise
else:
# Parse the response
metrics, new_tags, mode, zk_version = self.parse_stat(stat_out)
# Write the data
if mode != 'inactive':
for metric, value, m_type in metrics:
submit_metric = getattr(self, m_type)
submit_metric(metric, value, tags=tags + new_tags)
if report_instance_mode:
self.report_instance_mode(hostname, mode, tags)
if expected_mode:
if mode == expected_mode:
status = AgentCheck.OK
message = u"Server is in %s mode" % mode
else:
status = AgentCheck.CRITICAL
message = u"Server is in %s mode but check expects %s mode"\
% (mode, expected_mode)
self.service_check('zookeeper.mode', status, message=message,
tags=sc_tags)
# Read metrics from the `mntr` output
if zk_version and LooseVersion(zk_version) > LooseVersion("3.4.0"):
try:
mntr_out = self._send_command('mntr', *cx_args)
except ZKConnectionFailure:
self.increment('zookeeper.timeouts')
if report_instance_mode:
self.report_instance_mode(hostname, 'down', tags)
raise
except Exception as e:
self.warning(e)
self.increment('zookeeper.datadog_client_exception')
if report_instance_mode:
self.report_instance_mode(hostname, 'unknown', tags)
raise
else:
metrics, mode = self.parse_mntr(mntr_out)
mode_tag = "mode:%s" % mode
if mode != 'inactive':
for metric, value, m_type in metrics:
submit_metric = getattr(self, m_type)
submit_metric(metric, value, tags=tags + [mode_tag])
if report_instance_mode:
self.report_instance_mode(hostname, mode, tags)
def report_instance_mode(self, hostname, mode, tags):
gauges = defaultdict(int)
if mode not in self.STATUS_TYPES:
mode = "unknown"
tags = tags + ['mode:%s' % mode]
self.gauge('zookeeper.instances', 1, tags=tags)
gauges[mode] = 1
for k, v in gauges.iteritems():
gauge_name = 'zookeeper.instances.%s' % k
self.gauge(gauge_name, v)
def _send_command(self, command, host, port, timeout):
sock = socket.socket()
sock.settimeout(timeout)
buf = StringIO()
chunk_size = 1024
# try-finally and try-except to stay compatible with python 2.4
try:
try:
# Connect to the zk client port and send the stat command
sock.connect((host, port))
sock.sendall(command)
# Read the response into a StringIO buffer
chunk = sock.recv(chunk_size)
buf.write(chunk)
num_reads = 1
max_reads = 10000
while chunk:
if num_reads > max_reads:
# Safeguard against an infinite loop
raise Exception("Read %s bytes before exceeding max reads of %s. "
% (buf.tell(), max_reads))
chunk = sock.recv(chunk_size)
buf.write(chunk)
num_reads += 1
except (socket.timeout, socket.error):
raise ZKConnectionFailure()
finally:
sock.close()
return buf
def parse_stat(self, buf):
''' `buf` is a readable file-like object
returns a tuple: (metrics, tags, mode, version)
'''
metrics = []
buf.seek(0)
# Check the version line to make sure we parse the rest of the
# body correctly. Particularly, the Connections val was added in
# >= 3.4.4.
start_line = buf.readline()
match = self.version_pattern.search(start_line)
if match is None:
return (None, None, "inactive", None)
raise Exception("Could not parse version from stat command output: %s" % start_line)
else:
version = match.group()
has_connections_val = LooseVersion(version) > LooseVersion("3.4.4")
# Clients:
buf.readline() # skip the Clients: header
connections = 0
client_line = buf.readline().strip()
if client_line:
connections += 1
while client_line:
client_line = buf.readline().strip()
if client_line:
connections += 1
# Latency min/avg/max: -10/0/20007
_, value = buf.readline().split(':')
l_min, l_avg, l_max = [int(v) for v in value.strip().split('/')]
metrics.append(ZKMetric('zookeeper.latency.min', l_min))
metrics.append(ZKMetric('zookeeper.latency.avg', l_avg))
metrics.append(ZKMetric('zookeeper.latency.max', l_max))
# Received: 101032173
_, value = buf.readline().split(':')
metrics.append(ZKMetric('zookeeper.bytes_received', long(value.strip())))
# Sent: 1324
_, value = buf.readline().split(':')
metrics.append(ZKMetric('zookeeper.bytes_sent', long(value.strip())))
if has_connections_val:
# Connections: 1
_, value = buf.readline().split(':')
metrics.append(ZKMetric('zookeeper.connections', int(value.strip())))
else:
# If the zk version doesnt explicitly give the Connections val,
# use the value we computed from the client list.
metrics.append(ZKMetric('zookeeper.connections', connections))
# Outstanding: 0
_, value = buf.readline().split(':')
# Fixme: This metric name is wrong. It should be removed in a major version of the agent
# See https://github.com/DataDog/dd-agent/issues/1383
metrics.append(ZKMetric('zookeeper.bytes_outstanding', long(value.strip())))
metrics.append(ZKMetric('zookeeper.outstanding_requests', long(value.strip())))
# Zxid: 0x1034799c7
_, value = buf.readline().split(':')
# Parse as a 64 bit hex int
zxid = long(value.strip(), 16)
# convert to bytes
zxid_bytes = struct.pack('>q', zxid)
# the higher order 4 bytes is the epoch
(zxid_epoch,) = struct.unpack('>i', zxid_bytes[0:4])
# the lower order 4 bytes is the count
(zxid_count,) = struct.unpack('>i', zxid_bytes[4:8])
metrics.append(ZKMetric('zookeeper.zxid.epoch', zxid_epoch))
metrics.append(ZKMetric('zookeeper.zxid.count', zxid_count))
# Mode: leader
_, value = buf.readline().split(':')
mode = value.strip().lower()
tags = [u'mode:' + mode]
# Node count: 487
_, value = buf.readline().split(':')
metrics.append(ZKMetric('zookeeper.nodes', long(value.strip())))
return metrics, tags, mode, version
def parse_mntr(self, buf):
'''
Parse `mntr` command's content.
`buf` is a readable file-like object
Returns: a tuple (metrics, mode)
if mode == 'inactive', metrics will be None
'''
buf.seek(0)
first = buf.readline() # First is version string or error
if first == 'This ZooKeeper instance is not currently serving requests':
return (None, 'inactive')
metrics = []
mode = 'inactive'
for line in buf:
try:
key, value = line.split()
if key == "zk_server_state":
mode = value.lower()
continue
metric_name = self._normalize_metric_label(key)
metric_type = "rate" if key in self._MNTR_RATES else "gauge"
metric_value = int(value)
metrics.append(ZKMetric(metric_name, metric_value, metric_type))
except ValueError:
self.log.warning(
u"Cannot format `mntr` value. key={key}, value{value}".format(
key=key, value=value
)
)
continue
except Exception:
self.log.exception(
u"Unexpected exception occurred while parsing `mntr` command content:\n"
u"{buf}".format(
buf=buf
)
)
return (metrics, mode)
def _normalize_metric_label(self, key):
if re.match('zk', key):
key = key.replace('zk', 'zookeeper', 1)
return key.replace('_', '.', 1)
| en | 0.756329 | # (C) Datadog, Inc. 2010-2016 # All rights reserved # Licensed under Simplified BSD License (see LICENSE) As of zookeeper 3.4.0, the `mntr` admin command is provided for easy parsing of zookeeper stats. This check first parses the `stat` admin command for a version number. If the zookeeper version supports `mntr`, it is also parsed. Duplicate information is being reported by both `mntr` and `stat` to keep backwards compatability. Example: `stat` reports: zookeeper.latency.avg `mntr` reports: zookeeper.avg.latency If available, make use of the data reported by `mntr` not `stat`. The duplicate `stat` reports are only kept for backward compatability. Besides the usual zookeeper state of `leader`, `follower`, `observer` and `standalone`, this check will report three other states: `down`: the check cannot connect to zookeeper `inactive`: the zookeeper instance has lost connection to the cluster `unknown`: an unexpected error has occured in this check States can be accessed through the gauge `zookeeper.instances.<state>, through the set `zookeeper.instances`, or through the `mode:<state>` tag. Parses the response from zookeeper's `stat` admin command, which looks like: ``` Zookeeper version: 3.2.2--1, built on 03/16/2010 07:31 GMT Clients: /10.42.114.160:32634[1](queued=0,recved=12,sent=0) /10.37.137.74:21873[1](queued=0,recved=53613,sent=0) /10.37.137.74:21876[1](queued=0,recved=57436,sent=0) /10.115.77.32:32990[1](queued=0,recved=16,sent=0) /10.37.137.74:21891[1](queued=0,recved=55011,sent=0) /10.37.137.74:21797[1](queued=0,recved=19431,sent=0) Latency min/avg/max: -10/0/20007 Received: 101032173 Sent: 0 Outstanding: 0 Zxid: 0x1034799c7 Mode: leader Node count: 487 ``` `stat` tested with Zookeeper versions 3.0.0 to 3.4.5 The following is an example of the `mntr` commands output: ``` zk_version 3.4.5-cdh4.4.0--1, built on 09/04/2013 01:46 GMT zk_avg_latency 0 zk_max_latency 0 zk_min_latency 0 zk_packets_received 4 zk_packets_sent 3 zk_num_alive_connections 1 zk_outstanding_requests 0 zk_server_state standalone zk_znode_count 4 zk_watch_count 0 zk_ephemerals_count 0 zk_approximate_data_size 27 zk_open_file_descriptor_count 29 zk_max_file_descriptor_count 4096 ``` `mntr` tested with ZooKeeper 3.4.5 # stdlib # pylint: disable=E0611,E0401 # project Raised when we are unable to connect or get the output of a command. A Zookeeper metric. Tuple with an optional metric type (default is 'gauge'). ZooKeeper AgentCheck. Parse content from `stat` and `mntr`(if available) commmands to retrieve health cluster metrics. # example match: # "Zookeeper version: 3.4.10-39d3a4f269333c922ed3db283be479f9deacaa0f, built on 03/23/2017 10:13 GMT" # `mntr` information to report as `rate` # parse_stat will parse and set version string # Send a service check based on the `ruok` response. # Set instance status to down if not ok. # The server should not respond at all if it's not OK. # Read metrics from the `stat` output. # Parse the response # Write the data # Read metrics from the `mntr` output # try-finally and try-except to stay compatible with python 2.4 # Connect to the zk client port and send the stat command # Read the response into a StringIO buffer # Safeguard against an infinite loop `buf` is a readable file-like object returns a tuple: (metrics, tags, mode, version) # Check the version line to make sure we parse the rest of the # body correctly. Particularly, the Connections val was added in # >= 3.4.4. # Clients: # skip the Clients: header # Latency min/avg/max: -10/0/20007 # Received: 101032173 # Sent: 1324 # Connections: 1 # If the zk version doesnt explicitly give the Connections val, # use the value we computed from the client list. # Outstanding: 0 # Fixme: This metric name is wrong. It should be removed in a major version of the agent # See https://github.com/DataDog/dd-agent/issues/1383 # Zxid: 0x1034799c7 # Parse as a 64 bit hex int # convert to bytes # the higher order 4 bytes is the epoch # the lower order 4 bytes is the count # Mode: leader # Node count: 487 Parse `mntr` command's content. `buf` is a readable file-like object Returns: a tuple (metrics, mode) if mode == 'inactive', metrics will be None # First is version string or error | 1.598324 | 2 |
examples/hands.py | MPI-IS/reactive_pepper | 0 | 6630799 | import math,time,random
import pepper_interface
IP = "127.0.0.1"
PORT = 36417
simulation = False
with pepper_interface.get(IP,PORT,simulation) as pepper:
# opening
pepper.hands.set(True,1.0,velocity=0.5) # left
pepper.hands.set(False,1.0,velocity=0.5) # right
while pepper.hands.running(True) or pepper.hands.running(False):
time.sleep(0.1)
# closing
pepper.hands.set(True,0.0,velocity=0.5) # left
pepper.hands.set(False,0.0,velocity=0.5) # right
while pepper.hands.running(True) or pepper.hands.running(False):
time.sleep(0.1)
| import math,time,random
import pepper_interface
IP = "127.0.0.1"
PORT = 36417
simulation = False
with pepper_interface.get(IP,PORT,simulation) as pepper:
# opening
pepper.hands.set(True,1.0,velocity=0.5) # left
pepper.hands.set(False,1.0,velocity=0.5) # right
while pepper.hands.running(True) or pepper.hands.running(False):
time.sleep(0.1)
# closing
pepper.hands.set(True,0.0,velocity=0.5) # left
pepper.hands.set(False,0.0,velocity=0.5) # right
while pepper.hands.running(True) or pepper.hands.running(False):
time.sleep(0.1)
| en | 0.554343 | # opening # left # right # closing # left # right | 2.713202 | 3 |
tests/test_tracing_map_parameters.py | rgstephens/opentracing-decorator | 4 | 6630800 | <filename>tests/test_tracing_map_parameters.py
import unittest
from typing import Dict
from opentracing.mocktracer import MockTracer
from opentracing_decorator.tracing import Tracing
class TestTracing(unittest.TestCase):
def setUp(self):
self.tracer = MockTracer()
self.tracing = Tracing(self.tracer)
def test_simple_parameters(self):
def simple_func(x, y, z):
pass
correct = {"x": 10, "y": 30, "z": 20}
result = self.tracing._map_parameters(simple_func, 10, 30, 20)
self.assertDictEqual(result, correct)
def test_missing_parameters(self):
def simple_func(x, y, z):
pass
self.assertRaises(TypeError, self.tracing._map_parameters, simple_func, 30, 20)
def test_default_parameters(self):
def simple_func_with_default(x, y, z=30):
pass
correct = {"x": 10, "y": 30, "z": 30}
result = self.tracing._map_parameters(simple_func_with_default, 10, 30)
self.assertDictEqual(result, correct)
def test_named_parameters(self):
def func_with_named_parameters(x, y, z):
pass
correct = {"x": 10, "y": 30, "z": 30}
result = self.tracing._map_parameters(func_with_named_parameters, 10, 30, z=30)
self.assertDictEqual(result, correct)
def test_typed_parameters(self):
def func_with_types(x: int, y: int, z: int):
pass
correct = {"x": 10, "y": 10, "z": 10}
result = self.tracing._map_parameters(func_with_types, 10, 10, 10)
self.assertDictEqual(result, correct)
def test_no_parameters(self):
def func_with_no_args():
pass
correct: Dict = {}
result = self.tracing._map_parameters(func_with_no_args)
self.assertDictEqual(result, correct)
| <filename>tests/test_tracing_map_parameters.py
import unittest
from typing import Dict
from opentracing.mocktracer import MockTracer
from opentracing_decorator.tracing import Tracing
class TestTracing(unittest.TestCase):
def setUp(self):
self.tracer = MockTracer()
self.tracing = Tracing(self.tracer)
def test_simple_parameters(self):
def simple_func(x, y, z):
pass
correct = {"x": 10, "y": 30, "z": 20}
result = self.tracing._map_parameters(simple_func, 10, 30, 20)
self.assertDictEqual(result, correct)
def test_missing_parameters(self):
def simple_func(x, y, z):
pass
self.assertRaises(TypeError, self.tracing._map_parameters, simple_func, 30, 20)
def test_default_parameters(self):
def simple_func_with_default(x, y, z=30):
pass
correct = {"x": 10, "y": 30, "z": 30}
result = self.tracing._map_parameters(simple_func_with_default, 10, 30)
self.assertDictEqual(result, correct)
def test_named_parameters(self):
def func_with_named_parameters(x, y, z):
pass
correct = {"x": 10, "y": 30, "z": 30}
result = self.tracing._map_parameters(func_with_named_parameters, 10, 30, z=30)
self.assertDictEqual(result, correct)
def test_typed_parameters(self):
def func_with_types(x: int, y: int, z: int):
pass
correct = {"x": 10, "y": 10, "z": 10}
result = self.tracing._map_parameters(func_with_types, 10, 10, 10)
self.assertDictEqual(result, correct)
def test_no_parameters(self):
def func_with_no_args():
pass
correct: Dict = {}
result = self.tracing._map_parameters(func_with_no_args)
self.assertDictEqual(result, correct)
| none | 1 | 3.025034 | 3 |
|
radioapp.py | hecke-rs/radio | 2 | 6630801 | from flask.helpers import get_debug_flag
from radio import create_app
from radio.settings import DevConfig, ProdConfig
config = DevConfig() if get_debug_flag() else ProdConfig()
app = create_app(config)
| from flask.helpers import get_debug_flag
from radio import create_app
from radio.settings import DevConfig, ProdConfig
config = DevConfig() if get_debug_flag() else ProdConfig()
app = create_app(config)
| none | 1 | 1.797075 | 2 |
|
tydal/quiz2.py | mguerrap/CurrentsDiscovery | 2 | 6630802 | <filename>tydal/quiz2.py
def quiz():
"""
Generates module 2 quiz
outputs print statements
user needs to type and line execute each answer before the next question shows up
"""
#Question 1
print("Module 2 Quiz (4 questions)")
print()
print("Question 1: A tide is _____?")
print()
print("a. a wave")
print("b. random")
print("c. made by whales")
answer = input("Make your choice: ")
if answer.lower() == "a":
print("Correct!")
else:
print("Incorrect")
#Question 2
print("Question 2: Peak tide elevation at each port happens at the same time: True or False: ?")
print()
print("a. True")
print("b. False")
answer = input("Make your choice: ")
if answer.lower() == "b":
print("Correct!")
else:
print("Incorrect")
#Question 3
print("Question 3: Neah Bay's tidal elevation is always higher than Port Townsends: True or False?")
print()
print("a. True")
print("b. False")
answer = input("Make your choice: ")
if answer.lower() == "b":
print("Correct!")
else:
print("Incorrect")
#Question 4
print("Question 4: If Neah Bay's tidal elevation is lower than Port Townsends, which way is the water flowing")
print()
print("a. To the Ocean")
print("b. To the Estuary")
print("c. Nowhere")
answer = input("Make your choice: ")
if answer.lower() == "a":
print("Correct!")
else:
print("Incorrect")
print("Go to Module 3") | <filename>tydal/quiz2.py
def quiz():
"""
Generates module 2 quiz
outputs print statements
user needs to type and line execute each answer before the next question shows up
"""
#Question 1
print("Module 2 Quiz (4 questions)")
print()
print("Question 1: A tide is _____?")
print()
print("a. a wave")
print("b. random")
print("c. made by whales")
answer = input("Make your choice: ")
if answer.lower() == "a":
print("Correct!")
else:
print("Incorrect")
#Question 2
print("Question 2: Peak tide elevation at each port happens at the same time: True or False: ?")
print()
print("a. True")
print("b. False")
answer = input("Make your choice: ")
if answer.lower() == "b":
print("Correct!")
else:
print("Incorrect")
#Question 3
print("Question 3: Neah Bay's tidal elevation is always higher than Port Townsends: True or False?")
print()
print("a. True")
print("b. False")
answer = input("Make your choice: ")
if answer.lower() == "b":
print("Correct!")
else:
print("Incorrect")
#Question 4
print("Question 4: If Neah Bay's tidal elevation is lower than Port Townsends, which way is the water flowing")
print()
print("a. To the Ocean")
print("b. To the Estuary")
print("c. Nowhere")
answer = input("Make your choice: ")
if answer.lower() == "a":
print("Correct!")
else:
print("Incorrect")
print("Go to Module 3") | en | 0.872437 | Generates module 2 quiz outputs print statements user needs to type and line execute each answer before the next question shows up #Question 1 #Question 2 #Question 3 #Question 4 | 3.96272 | 4 |
src/algoritmia/problems/textprocessing/wordsegmentation.py | DavidLlorens/algoritmia | 6 | 6630803 | #coding: latin1
class TextSegmenter: #[prob #[]seg
def __init__(self, Pr: "IMap<str, Real>"):
self.Pr = Pr
def probability(self, t: "str") -> "Real":
P = [1.0] + [None] * (len(t))
for j in range(1,len(t)+1):
P[j] = max(P[i] * self.Pr[t[i:j]] for i in range(j))
return P[len(t)] #]prob
def segment(self, t: "str") -> "str":#[seg
P = [1.0] + [None] * (len(t))
back = [None] * (len(t)+1)
for j in range(1,len(t)+1):
P[j], back[j] = max( (P[i] * self.Pr.get(t[i:j], 0.0), i) for i in range(j) )
sentence = []
j = len(t)
while back[j] != None:
sentence.append( t[back[j]:j] )
j = back[j]
sentence.reverse()
return ' '.join(sentence) #]seg | #coding: latin1
class TextSegmenter: #[prob #[]seg
def __init__(self, Pr: "IMap<str, Real>"):
self.Pr = Pr
def probability(self, t: "str") -> "Real":
P = [1.0] + [None] * (len(t))
for j in range(1,len(t)+1):
P[j] = max(P[i] * self.Pr[t[i:j]] for i in range(j))
return P[len(t)] #]prob
def segment(self, t: "str") -> "str":#[seg
P = [1.0] + [None] * (len(t))
back = [None] * (len(t)+1)
for j in range(1,len(t)+1):
P[j], back[j] = max( (P[i] * self.Pr.get(t[i:j], 0.0), i) for i in range(j) )
sentence = []
j = len(t)
while back[j] != None:
sentence.append( t[back[j]:j] )
j = back[j]
sentence.reverse()
return ' '.join(sentence) #]seg | pt | 0.133927 | #coding: latin1 #[prob #[]seg #]prob #[seg #]seg | 2.986519 | 3 |
readthedocs/rtd_tests/tests/test_comments.py | ank-forked/readthedocs.org | 1 | 6630804 | import random
from unittest.case import expectedFailure
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.client import RequestFactory
from django_dynamic_fixture import fixture
from django_dynamic_fixture import get
from django_dynamic_fixture import new
from rest_framework.test import APIRequestFactory, APITestCase
from readthedocs.builds.models import Version
from readthedocs.comments.models import DocumentNode, DocumentComment
from readthedocs.comments.models import NodeSnapshot
from readthedocs.comments.views import add_node, get_metadata, update_node
from readthedocs.projects.models import Project
from readthedocs.projects.views.private import project_comments_moderation
from readthedocs.rtd_tests.utils import create_user
def create_node(hash=None, commit=None, **kwargs):
snapshot_kwargs = {}
if hash is not None:
snapshot_kwargs['hash'] = hash
if commit is not None:
snapshot_kwargs['commit'] = commit
node = get(DocumentNode, **kwargs)
get(NodeSnapshot, node=node, **snapshot_kwargs)
return node
class ModerationTests(TestCase):
def setUp(self):
self.owner = create_user(username='owner', password='<PASSWORD>')
self.moderated_project = get(Project, comment_moderation=True)
self.moderated_project.users.add(self.owner)
self.moderated_node = create_node(project=self.moderated_project)
self.first_moderated_comment = get(DocumentComment,
node=self.moderated_node)
self.second_moderated_comment = get(DocumentComment,
node=self.moderated_node)
self.unmoderated_project = get(Project, comment_moderation=False)
self.unmoderated_project.users.add(self.owner)
self.unmoderated_node = create_node(project=self.unmoderated_project)
self.first_unmoderated_comment = get(DocumentComment,
node=self.unmoderated_node)
self.second_unmoderated_comment = get(DocumentComment,
node=self.unmoderated_node)
def test_approved_comments(self):
c = self.first_unmoderated_comment
# This comment has never been approved...
self.assertFalse(c.has_been_approved_since_most_recent_node_change())
# ...until now!
c.moderate(user=self.owner, decision=1)
self.assertTrue(c.has_been_approved_since_most_recent_node_change())
def test_new_node_snapshot_causes_comment_to_show_as_not_approved_since_change(self):
c = self.first_unmoderated_comment
c.moderate(user=self.owner, decision=1)
self.assertTrue(c.has_been_approved_since_most_recent_node_change())
c.node.snapshots.create(hash=random.getrandbits(128))
self.assertFalse(c.has_been_approved_since_most_recent_node_change())
def test_unmoderated_project_shows_all_comments(self):
visible_comments = self.unmoderated_node.visible_comments()
self.assertIn(self.first_unmoderated_comment, visible_comments)
self.assertIn(self.second_unmoderated_comment, visible_comments)
def test_unapproved_comment_is_not_visible_on_moderated_project(self):
# We take a look at the visible comments and find that neither comment
# is among them.
visible_comments = self.moderated_node.visible_comments()
self.assertNotIn(self.first_moderated_comment, visible_comments)
self.assertNotIn(self.second_moderated_comment, visible_comments)
def test_moderated_project_with_unchanged_nodes_shows_only_approved_comment(self):
# Approve the first comment...
self.first_moderated_comment.moderate(user=self.owner, decision=1)
# ...and find that the first comment, but not the second one, is visible.
visible_comments = self.moderated_node.visible_comments()
self.assertIn(self.first_moderated_comment, visible_comments)
self.assertNotIn(self.second_moderated_comment, visible_comments)
def test_moderated_project_with_changed_nodes_dont_show_comments_that_havent_been_approved_since(self):
# Approve the first comment...
self.first_moderated_comment.moderate(user=self.owner, decision=1)
# ...but this time, change the node.
self.first_moderated_comment.node.snapshots.create(hash=random.getrandbits(128))
# Now it does not show as visible.
visible_comments = self.moderated_node.visible_comments()
self.assertNotIn(self.first_moderated_comment, visible_comments)
def test_unapproved_comments_appear_in_moderation_queue(self):
queue = self.moderated_project.moderation_queue()
self.assertIn(self.first_moderated_comment, queue)
self.assertIn(self.second_moderated_comment, queue)
def test_approved_comments_do_not_appear_in_moderation_queue(self):
self.first_moderated_comment.moderate(user=self.owner, decision=1)
queue = self.moderated_project.moderation_queue()
self.assertNotIn(self.first_moderated_comment, queue)
self.assertIn(self.second_moderated_comment, queue)
class NodeAndSnapshotTests(TestCase):
def test_update_with_same_hash_does_not_create_new_snapshot(self):
node = get(DocumentNode)
get(NodeSnapshot, node=node)
hash = "SOMEHASH"
commit = "SOMEGITCOMMIT"
# We initially have just one snapshot.
self.assertEqual(node.snapshots.count(), 1)
# ...but when we update the hash, we have two.
node.update_hash(hash, commit)
self.assertEqual(node.snapshots.count(), 2)
# If we update with the same exact hash and commit, it doesn't create a new snapshot.
node.update_hash(hash, commit)
self.assertEqual(node.snapshots.count(), 2)
def test_node_cannot_be_created_without_commit_and_hash(self):
project = get(Project, versions=[fixture()])
some_version = project.versions.all()[0]
self.assertRaises(TypeError,
DocumentNode.objects.create,
project=project,
version=some_version,
hash=random.getrandbits(128)
)
self.assertRaises(TypeError,
DocumentNode.objects.create,
project=project,
version=some_version,
commit=random.getrandbits(128)
)
def test_node_can_be_sought_From_new_hash(self):
first_hash = "THEoriginalHASH"
second_hash = 'ANEWCRAZYHASH'
node = create_node(hash=first_hash)
get(DocumentComment)
node.update_hash(second_hash, 'ANEWCRAZYCOMMIT')
node_from_orm = DocumentNode.objects.from_hash(node.version.slug,
node.page,
node.latest_hash(),
project_slug=node.project.slug)
self.assertEqual(node, node_from_orm)
node.update_hash(first_hash, 'AthirdCommit')
node_from_orm2 = DocumentNode.objects.from_hash(node.version.slug, node.page, first_hash, node.project.slug)
self.assertEqual(node, node_from_orm2)
@expectedFailure
def test_nodes_with_same_hash_oddness(self):
node_hash = "AcommonHASH"
page = "somepage"
commit = "somecommit"
project = get(Project, versions=[fixture()])
project.add_node(node_hash=node_hash,
page=page,
version=project.versions.all()[0].slug,
commit=commit,
)
# A new commit with a second instance of the exact same content.
project.add_node(node_hash=node_hash,
page=page,
version=project.versions.all()[0].slug,
commit="ANEWCOMMIT",
)
try:
project.nodes.from_hash(project.versions.all()[0].slug, page, node_hash, project.slug)
except NotImplementedError:
self.fail("We don't have indexing yet.")
class CommentModerationViewsTests(TestCase):
def setUp(self):
self.owner = create_user(username='owner', password='<PASSWORD>')
self.moderated_project = get(Project, comment_moderation=True)
self.moderated_project.users.add(self.owner)
self.moderated_node = get(DocumentNode,
project=self.moderated_project)
get(NodeSnapshot, node=self.moderated_node)
self.moderated_comment = get(DocumentComment,
text='Some comment text.',
node=self.moderated_node)
def test_unmoderated_comments_are_listed_in_view(self):
request = RequestFactory()
request.user = self.owner
request.META = {}
response = project_comments_moderation(request, self.moderated_project.slug)
self.assertIn(self.moderated_comment.text, response.content)
class CommentAPIViewsTests(APITestCase):
request_factory = APIRequestFactory()
def setUp(self):
self.owner = create_user(username='owner', password='<PASSWORD>')
self.moderated_project = get(Project, comment_moderation=True)
self.moderated_project.users.add(self.owner)
self.moderated_version = get(Version, project=self.moderated_project)
self.moderated_node = get(DocumentNode,
project=self.moderated_project,
version=self.moderated_version)
get(NodeSnapshot, node=self.moderated_node)
def test_get_comments_view(self):
number_of_comments = DocumentComment.objects.count()
response = self.client.get('/api/v2/comments/')
self.assertEqual(number_of_comments, response.data['count'])
# moooore comments.
get(DocumentComment, n=50, node=self.moderated_node)
response = self.client.get('/api/v2/comments/')
self.assertEqual(number_of_comments + 50, response.data['count'])
def test_get_metadata_view(self):
node = create_node()
get_data = {
'project': node.project.slug,
'version': node.version.slug,
'page': node.page
}
request = self.request_factory.get('/_get_metadata/', get_data)
response = get_metadata(request)
response.render()
number_of_comments = response.data[node.latest_hash()]
# There haven't been any comments yet.
self.assertEqual(number_of_comments, 0)
# Now we'll make one.
get(DocumentComment, node=node, text="Our first comment!")
second_request = self.request_factory.get('/_get_metadata/', get_data)
second_response = get_metadata(second_request)
second_response.render()
number_of_comments = second_response.data[node.latest_hash()]
# And sure enough - one comment.
self.assertEqual(number_of_comments, 1)
def test_add_node_view(self):
node = self.moderated_project.nodes.all()[0]
post_data = {
'document': node.page,
'id': node.latest_hash(),
'project': node.project.slug,
'version': node.version.slug,
'commit': node.latest_commit(),
}
# Now let's delete the node....
DocumentNode.objects.all().delete()
# ...we have no nodes.
self.assertEqual(DocumentNode.objects.count(), 0)
# Hit the API again.
request = self.request_factory.post('/_add_node/', post_data)
response = add_node(request)
# We do now have exactly one Node.
self.assertEqual(DocumentNode.objects.count(), 1)
def test_update_node_view(self):
node = create_node()
# Our node has one snapshot.
self.assertEqual(node.snapshots.count(), 1)
new_hash = "CRAZYnewHASHtoUPDATEnode"
commit = "COOLNEWGITCOMMITHASH"
post_data = {
'old_hash': node.latest_hash(),
'new_hash': new_hash,
'commit': commit,
'project': node.project.slug,
'version': node.version.slug,
'page': node.page
}
request = self.request_factory.post('/_update_node/', post_data)
response = update_node(request)
response.render()
self.assertEqual(response.data['current_hash'], new_hash)
# We now have two snapshots.
self.assertEqual(node.snapshots.count(), 2)
# And the latest hash is the one we just set.
self.assertEqual(node.latest_hash(), new_hash)
def test_add_comment_view_without_existing_hash(self):
comment_text = "Here's a comment added to a new hash."
version = get(Version, project=fixture())
node = create_node(project=version.project, version=version)
user = create_user(username='test', password='<PASSWORD>')
number_of_nodes = DocumentNode.objects.count()
post_data = {
'node': random.getrandbits(128),
'commit': random.getrandbits(128),
'project': node.project.slug,
'version': node.version.slug,
'document_page': node.page,
'text': comment_text
}
self.client.login(username="test", password="<PASSWORD>")
response = self.client.post('/api/v2/comments/', post_data)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data['text'], comment_text)
self.assertEqual(DocumentNode.objects.count(), number_of_nodes + 1) # We created a new node.
def test_add_comment_view_with_existing_hash(self):
node = create_node()
user = create_user(username='test', password='<PASSWORD>')
comment_text = "Here's a comment added through the comment view."
post_data = {
'node': node.latest_hash(),
'commit': node.latest_hash(),
'project': node.project.slug,
'version': node.version.slug,
'document_page': node.page,
'text': comment_text
}
self.client.login(username="test", password="<PASSWORD>")
response = self.client.post('/api/v2/comments/', post_data)
comment_from_orm = node.comments.filter(text=comment_text)
self.assertTrue(comment_from_orm.exists())
self.assertEqual(comment_from_orm[0].node, node,
"The comment exists, but lives in a different node! Not supposed to happen.")
def test_add_comment_view_with_changed_hash(self):
first_hash = "THEoriginalHASH"
second_hash = 'ANEWCRAZYHASH'
comment_text = "This comment will follow its node despite hash changing."
# Create a comment on a node whose latest hash is the first one.
node = create_node(hash=first_hash)
get(DocumentComment, node=node, text=comment_text)
# Now change the node's hash.
node.update_hash(second_hash, 'ANEWCRAZYCOMMIT')
node_from_orm = DocumentNode.objects.from_hash(version_slug=node.version.slug,
page=node.page,
node_hash=node.latest_hash(),
project_slug=node.project.slug)
# It's the same node.
self.assertEqual(node, node_from_orm)
# Get all the comments with the second hash.
query_params = {'node': second_hash,
'document_page': node.page,
'project': node.project.slug,
'version': node.version.slug,
}
response = self.client.get('/api/v2/comments/', query_params)
self.assertEqual(response.data['results'][0]['text'], comment_text)
def test_retrieve_comment_on_old_hash(self):
pass
def test_post_comment_on_old_hash(self):
pass
def test_moderate_comment_by_approving(self):
user = create_user(username='test', password='<PASSWORD>')
project = get(Project, versions=[fixture()])
project.users.add(user)
node = create_node(project=project)
comment = get(DocumentComment, node=node)
post_data = {
'decision': 1,
}
self.assertFalse(comment.has_been_approved_since_most_recent_node_change())
self.client.login(username="test", password="<PASSWORD>")
response = self.client.put('/api/v2/comments/%s/moderate/' % comment.id, post_data)
self.assertEqual(response.data['decision'], 1)
self.assertTrue(comment.has_been_approved_since_most_recent_node_change())
def test_stranger_cannot_moderate_comments(self):
node = create_node()
comment = get(DocumentComment, node=node)
post_data = {
'decision': 1,
}
response = self.client.put('/api/v2/comments/%s/moderate/' % comment.id,
post_data
)
self.assertEqual(response.status_code, 403)
| import random
from unittest.case import expectedFailure
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.client import RequestFactory
from django_dynamic_fixture import fixture
from django_dynamic_fixture import get
from django_dynamic_fixture import new
from rest_framework.test import APIRequestFactory, APITestCase
from readthedocs.builds.models import Version
from readthedocs.comments.models import DocumentNode, DocumentComment
from readthedocs.comments.models import NodeSnapshot
from readthedocs.comments.views import add_node, get_metadata, update_node
from readthedocs.projects.models import Project
from readthedocs.projects.views.private import project_comments_moderation
from readthedocs.rtd_tests.utils import create_user
def create_node(hash=None, commit=None, **kwargs):
snapshot_kwargs = {}
if hash is not None:
snapshot_kwargs['hash'] = hash
if commit is not None:
snapshot_kwargs['commit'] = commit
node = get(DocumentNode, **kwargs)
get(NodeSnapshot, node=node, **snapshot_kwargs)
return node
class ModerationTests(TestCase):
def setUp(self):
self.owner = create_user(username='owner', password='<PASSWORD>')
self.moderated_project = get(Project, comment_moderation=True)
self.moderated_project.users.add(self.owner)
self.moderated_node = create_node(project=self.moderated_project)
self.first_moderated_comment = get(DocumentComment,
node=self.moderated_node)
self.second_moderated_comment = get(DocumentComment,
node=self.moderated_node)
self.unmoderated_project = get(Project, comment_moderation=False)
self.unmoderated_project.users.add(self.owner)
self.unmoderated_node = create_node(project=self.unmoderated_project)
self.first_unmoderated_comment = get(DocumentComment,
node=self.unmoderated_node)
self.second_unmoderated_comment = get(DocumentComment,
node=self.unmoderated_node)
def test_approved_comments(self):
c = self.first_unmoderated_comment
# This comment has never been approved...
self.assertFalse(c.has_been_approved_since_most_recent_node_change())
# ...until now!
c.moderate(user=self.owner, decision=1)
self.assertTrue(c.has_been_approved_since_most_recent_node_change())
def test_new_node_snapshot_causes_comment_to_show_as_not_approved_since_change(self):
c = self.first_unmoderated_comment
c.moderate(user=self.owner, decision=1)
self.assertTrue(c.has_been_approved_since_most_recent_node_change())
c.node.snapshots.create(hash=random.getrandbits(128))
self.assertFalse(c.has_been_approved_since_most_recent_node_change())
def test_unmoderated_project_shows_all_comments(self):
visible_comments = self.unmoderated_node.visible_comments()
self.assertIn(self.first_unmoderated_comment, visible_comments)
self.assertIn(self.second_unmoderated_comment, visible_comments)
def test_unapproved_comment_is_not_visible_on_moderated_project(self):
# We take a look at the visible comments and find that neither comment
# is among them.
visible_comments = self.moderated_node.visible_comments()
self.assertNotIn(self.first_moderated_comment, visible_comments)
self.assertNotIn(self.second_moderated_comment, visible_comments)
def test_moderated_project_with_unchanged_nodes_shows_only_approved_comment(self):
# Approve the first comment...
self.first_moderated_comment.moderate(user=self.owner, decision=1)
# ...and find that the first comment, but not the second one, is visible.
visible_comments = self.moderated_node.visible_comments()
self.assertIn(self.first_moderated_comment, visible_comments)
self.assertNotIn(self.second_moderated_comment, visible_comments)
def test_moderated_project_with_changed_nodes_dont_show_comments_that_havent_been_approved_since(self):
# Approve the first comment...
self.first_moderated_comment.moderate(user=self.owner, decision=1)
# ...but this time, change the node.
self.first_moderated_comment.node.snapshots.create(hash=random.getrandbits(128))
# Now it does not show as visible.
visible_comments = self.moderated_node.visible_comments()
self.assertNotIn(self.first_moderated_comment, visible_comments)
def test_unapproved_comments_appear_in_moderation_queue(self):
queue = self.moderated_project.moderation_queue()
self.assertIn(self.first_moderated_comment, queue)
self.assertIn(self.second_moderated_comment, queue)
def test_approved_comments_do_not_appear_in_moderation_queue(self):
self.first_moderated_comment.moderate(user=self.owner, decision=1)
queue = self.moderated_project.moderation_queue()
self.assertNotIn(self.first_moderated_comment, queue)
self.assertIn(self.second_moderated_comment, queue)
class NodeAndSnapshotTests(TestCase):
def test_update_with_same_hash_does_not_create_new_snapshot(self):
node = get(DocumentNode)
get(NodeSnapshot, node=node)
hash = "SOMEHASH"
commit = "SOMEGITCOMMIT"
# We initially have just one snapshot.
self.assertEqual(node.snapshots.count(), 1)
# ...but when we update the hash, we have two.
node.update_hash(hash, commit)
self.assertEqual(node.snapshots.count(), 2)
# If we update with the same exact hash and commit, it doesn't create a new snapshot.
node.update_hash(hash, commit)
self.assertEqual(node.snapshots.count(), 2)
def test_node_cannot_be_created_without_commit_and_hash(self):
project = get(Project, versions=[fixture()])
some_version = project.versions.all()[0]
self.assertRaises(TypeError,
DocumentNode.objects.create,
project=project,
version=some_version,
hash=random.getrandbits(128)
)
self.assertRaises(TypeError,
DocumentNode.objects.create,
project=project,
version=some_version,
commit=random.getrandbits(128)
)
def test_node_can_be_sought_From_new_hash(self):
first_hash = "THEoriginalHASH"
second_hash = 'ANEWCRAZYHASH'
node = create_node(hash=first_hash)
get(DocumentComment)
node.update_hash(second_hash, 'ANEWCRAZYCOMMIT')
node_from_orm = DocumentNode.objects.from_hash(node.version.slug,
node.page,
node.latest_hash(),
project_slug=node.project.slug)
self.assertEqual(node, node_from_orm)
node.update_hash(first_hash, 'AthirdCommit')
node_from_orm2 = DocumentNode.objects.from_hash(node.version.slug, node.page, first_hash, node.project.slug)
self.assertEqual(node, node_from_orm2)
@expectedFailure
def test_nodes_with_same_hash_oddness(self):
node_hash = "AcommonHASH"
page = "somepage"
commit = "somecommit"
project = get(Project, versions=[fixture()])
project.add_node(node_hash=node_hash,
page=page,
version=project.versions.all()[0].slug,
commit=commit,
)
# A new commit with a second instance of the exact same content.
project.add_node(node_hash=node_hash,
page=page,
version=project.versions.all()[0].slug,
commit="ANEWCOMMIT",
)
try:
project.nodes.from_hash(project.versions.all()[0].slug, page, node_hash, project.slug)
except NotImplementedError:
self.fail("We don't have indexing yet.")
class CommentModerationViewsTests(TestCase):
def setUp(self):
self.owner = create_user(username='owner', password='<PASSWORD>')
self.moderated_project = get(Project, comment_moderation=True)
self.moderated_project.users.add(self.owner)
self.moderated_node = get(DocumentNode,
project=self.moderated_project)
get(NodeSnapshot, node=self.moderated_node)
self.moderated_comment = get(DocumentComment,
text='Some comment text.',
node=self.moderated_node)
def test_unmoderated_comments_are_listed_in_view(self):
request = RequestFactory()
request.user = self.owner
request.META = {}
response = project_comments_moderation(request, self.moderated_project.slug)
self.assertIn(self.moderated_comment.text, response.content)
class CommentAPIViewsTests(APITestCase):
request_factory = APIRequestFactory()
def setUp(self):
self.owner = create_user(username='owner', password='<PASSWORD>')
self.moderated_project = get(Project, comment_moderation=True)
self.moderated_project.users.add(self.owner)
self.moderated_version = get(Version, project=self.moderated_project)
self.moderated_node = get(DocumentNode,
project=self.moderated_project,
version=self.moderated_version)
get(NodeSnapshot, node=self.moderated_node)
def test_get_comments_view(self):
number_of_comments = DocumentComment.objects.count()
response = self.client.get('/api/v2/comments/')
self.assertEqual(number_of_comments, response.data['count'])
# moooore comments.
get(DocumentComment, n=50, node=self.moderated_node)
response = self.client.get('/api/v2/comments/')
self.assertEqual(number_of_comments + 50, response.data['count'])
def test_get_metadata_view(self):
node = create_node()
get_data = {
'project': node.project.slug,
'version': node.version.slug,
'page': node.page
}
request = self.request_factory.get('/_get_metadata/', get_data)
response = get_metadata(request)
response.render()
number_of_comments = response.data[node.latest_hash()]
# There haven't been any comments yet.
self.assertEqual(number_of_comments, 0)
# Now we'll make one.
get(DocumentComment, node=node, text="Our first comment!")
second_request = self.request_factory.get('/_get_metadata/', get_data)
second_response = get_metadata(second_request)
second_response.render()
number_of_comments = second_response.data[node.latest_hash()]
# And sure enough - one comment.
self.assertEqual(number_of_comments, 1)
def test_add_node_view(self):
node = self.moderated_project.nodes.all()[0]
post_data = {
'document': node.page,
'id': node.latest_hash(),
'project': node.project.slug,
'version': node.version.slug,
'commit': node.latest_commit(),
}
# Now let's delete the node....
DocumentNode.objects.all().delete()
# ...we have no nodes.
self.assertEqual(DocumentNode.objects.count(), 0)
# Hit the API again.
request = self.request_factory.post('/_add_node/', post_data)
response = add_node(request)
# We do now have exactly one Node.
self.assertEqual(DocumentNode.objects.count(), 1)
def test_update_node_view(self):
node = create_node()
# Our node has one snapshot.
self.assertEqual(node.snapshots.count(), 1)
new_hash = "CRAZYnewHASHtoUPDATEnode"
commit = "COOLNEWGITCOMMITHASH"
post_data = {
'old_hash': node.latest_hash(),
'new_hash': new_hash,
'commit': commit,
'project': node.project.slug,
'version': node.version.slug,
'page': node.page
}
request = self.request_factory.post('/_update_node/', post_data)
response = update_node(request)
response.render()
self.assertEqual(response.data['current_hash'], new_hash)
# We now have two snapshots.
self.assertEqual(node.snapshots.count(), 2)
# And the latest hash is the one we just set.
self.assertEqual(node.latest_hash(), new_hash)
def test_add_comment_view_without_existing_hash(self):
comment_text = "Here's a comment added to a new hash."
version = get(Version, project=fixture())
node = create_node(project=version.project, version=version)
user = create_user(username='test', password='<PASSWORD>')
number_of_nodes = DocumentNode.objects.count()
post_data = {
'node': random.getrandbits(128),
'commit': random.getrandbits(128),
'project': node.project.slug,
'version': node.version.slug,
'document_page': node.page,
'text': comment_text
}
self.client.login(username="test", password="<PASSWORD>")
response = self.client.post('/api/v2/comments/', post_data)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data['text'], comment_text)
self.assertEqual(DocumentNode.objects.count(), number_of_nodes + 1) # We created a new node.
def test_add_comment_view_with_existing_hash(self):
node = create_node()
user = create_user(username='test', password='<PASSWORD>')
comment_text = "Here's a comment added through the comment view."
post_data = {
'node': node.latest_hash(),
'commit': node.latest_hash(),
'project': node.project.slug,
'version': node.version.slug,
'document_page': node.page,
'text': comment_text
}
self.client.login(username="test", password="<PASSWORD>")
response = self.client.post('/api/v2/comments/', post_data)
comment_from_orm = node.comments.filter(text=comment_text)
self.assertTrue(comment_from_orm.exists())
self.assertEqual(comment_from_orm[0].node, node,
"The comment exists, but lives in a different node! Not supposed to happen.")
def test_add_comment_view_with_changed_hash(self):
first_hash = "THEoriginalHASH"
second_hash = 'ANEWCRAZYHASH'
comment_text = "This comment will follow its node despite hash changing."
# Create a comment on a node whose latest hash is the first one.
node = create_node(hash=first_hash)
get(DocumentComment, node=node, text=comment_text)
# Now change the node's hash.
node.update_hash(second_hash, 'ANEWCRAZYCOMMIT')
node_from_orm = DocumentNode.objects.from_hash(version_slug=node.version.slug,
page=node.page,
node_hash=node.latest_hash(),
project_slug=node.project.slug)
# It's the same node.
self.assertEqual(node, node_from_orm)
# Get all the comments with the second hash.
query_params = {'node': second_hash,
'document_page': node.page,
'project': node.project.slug,
'version': node.version.slug,
}
response = self.client.get('/api/v2/comments/', query_params)
self.assertEqual(response.data['results'][0]['text'], comment_text)
def test_retrieve_comment_on_old_hash(self):
pass
def test_post_comment_on_old_hash(self):
pass
def test_moderate_comment_by_approving(self):
user = create_user(username='test', password='<PASSWORD>')
project = get(Project, versions=[fixture()])
project.users.add(user)
node = create_node(project=project)
comment = get(DocumentComment, node=node)
post_data = {
'decision': 1,
}
self.assertFalse(comment.has_been_approved_since_most_recent_node_change())
self.client.login(username="test", password="<PASSWORD>")
response = self.client.put('/api/v2/comments/%s/moderate/' % comment.id, post_data)
self.assertEqual(response.data['decision'], 1)
self.assertTrue(comment.has_been_approved_since_most_recent_node_change())
def test_stranger_cannot_moderate_comments(self):
node = create_node()
comment = get(DocumentComment, node=node)
post_data = {
'decision': 1,
}
response = self.client.put('/api/v2/comments/%s/moderate/' % comment.id,
post_data
)
self.assertEqual(response.status_code, 403)
| en | 0.931071 | # This comment has never been approved... # ...until now! # We take a look at the visible comments and find that neither comment # is among them. # Approve the first comment... # ...and find that the first comment, but not the second one, is visible. # Approve the first comment... # ...but this time, change the node. # Now it does not show as visible. # We initially have just one snapshot. # ...but when we update the hash, we have two. # If we update with the same exact hash and commit, it doesn't create a new snapshot. # A new commit with a second instance of the exact same content. # moooore comments. # There haven't been any comments yet. # Now we'll make one. # And sure enough - one comment. # Now let's delete the node.... # ...we have no nodes. # Hit the API again. # We do now have exactly one Node. # Our node has one snapshot. # We now have two snapshots. # And the latest hash is the one we just set. # We created a new node. # Create a comment on a node whose latest hash is the first one. # Now change the node's hash. # It's the same node. # Get all the comments with the second hash. | 2.081314 | 2 |
Pix2Story/source/services/azureml_service.py | skumar911/ailab | 4,537 | 6630805 | import os, re, requests, json
from azureml.core.model import Model
from azureml.core.webservice import Webservice, AksWebservice
from azureml.core.image import ContainerImage
from azureml.core import Workspace, Run
from azureml.core.conda_dependencies import CondaDependencies
from azureml.core.authentication import ServicePrincipalAuthentication
from azureml.core.compute import AksCompute, ComputeTarget
from helpers import filter_list
import glob
import json
class AzureMlService():
def __init__(self, config_path):
self.__config = json.loads(open(config_path).read())
self.__ws = Workspace.from_config(path='config_deploy/config.json')
print(self.__ws.name, self.__ws.location, self.__ws.resource_group, self.__ws.location, sep = '\t')
def deployment(self):
image = self.create_or_get_image()
compute = self.__create_or_get_compute()
self.__deploy_service(image, compute)
def create_or_get_image(self):
image_params = self.__config['image']
images = ContainerImage.list(self.__ws,image_name=image_config['name'])
image = images.find_by_property('version',image_config['version'])
if image:
return image
image_config = ContainerImage.image_configuration(execution_script="score.py",
runtime="python",
conda_file="config_deploy/myenv.yml",
docker_file="config_deploy/Dockerfile",
enable_gpu=True,
dependencies=['generation', 'config.py', 'skipthoughts_vectors',
'generate.py', 'preprocessing/text_moderator.py'])
image = ContainerImage.create(name=image_params['name'],
models = [],
image_config = image_config,
workspace = self.__ws)
image.wait_for_creation(show_output = True)
return image
def __create_or_get_compute(self):
compute_config = self.__config['compute']
compute_list = AksCompute.list(self.__ws)
compute = compute_list.find_by_property('name',compute_config['name'])
if compute:
return compute
prov_config = AksCompute.provisioning_configuration(agent_count=compute_config['agent_count'],
vm_size=compute_config['vm_size'], location=compute_config['location'])
aks_name = compute_config['name']
aks_target = ComputeTarget.create(workspace = self.__ws,
name = aks_name,
provisioning_configuration = prov_config)
aks_target.wait_for_completion(show_output = True)
print(aks_target.provisioning_state)
print(aks_target.provisioning_errors)
return aks_target
def __deploy_service(self,image,compute):
service_config = self.__config['deploy']
services = AksWebservice.list(self.__ws)
service = services.find_by_property('name',service_config['name'])
if service:
service.update(auth_enabled=service_config['auth'])
service.wait_for_deployment(show_output = True)
return service
aks_config = AksWebservice.deploy_configuration(auth_enabled=True, max_request_wait_time=75000,
replica_max_concurrent_requests=100,autoscale_enabled=False,num_replicas=15)
aks_service_name = service_config['name']
aks_service = Webservice.deploy_from_image(workspace = self.__ws,
name = aks_service_name,
image = image,
deployment_config = aks_config,
deployment_target = compute)
aks_service.wait_for_deployment(show_output = True)
print(aks_service.state)
return aks_service
| import os, re, requests, json
from azureml.core.model import Model
from azureml.core.webservice import Webservice, AksWebservice
from azureml.core.image import ContainerImage
from azureml.core import Workspace, Run
from azureml.core.conda_dependencies import CondaDependencies
from azureml.core.authentication import ServicePrincipalAuthentication
from azureml.core.compute import AksCompute, ComputeTarget
from helpers import filter_list
import glob
import json
class AzureMlService():
def __init__(self, config_path):
self.__config = json.loads(open(config_path).read())
self.__ws = Workspace.from_config(path='config_deploy/config.json')
print(self.__ws.name, self.__ws.location, self.__ws.resource_group, self.__ws.location, sep = '\t')
def deployment(self):
image = self.create_or_get_image()
compute = self.__create_or_get_compute()
self.__deploy_service(image, compute)
def create_or_get_image(self):
image_params = self.__config['image']
images = ContainerImage.list(self.__ws,image_name=image_config['name'])
image = images.find_by_property('version',image_config['version'])
if image:
return image
image_config = ContainerImage.image_configuration(execution_script="score.py",
runtime="python",
conda_file="config_deploy/myenv.yml",
docker_file="config_deploy/Dockerfile",
enable_gpu=True,
dependencies=['generation', 'config.py', 'skipthoughts_vectors',
'generate.py', 'preprocessing/text_moderator.py'])
image = ContainerImage.create(name=image_params['name'],
models = [],
image_config = image_config,
workspace = self.__ws)
image.wait_for_creation(show_output = True)
return image
def __create_or_get_compute(self):
compute_config = self.__config['compute']
compute_list = AksCompute.list(self.__ws)
compute = compute_list.find_by_property('name',compute_config['name'])
if compute:
return compute
prov_config = AksCompute.provisioning_configuration(agent_count=compute_config['agent_count'],
vm_size=compute_config['vm_size'], location=compute_config['location'])
aks_name = compute_config['name']
aks_target = ComputeTarget.create(workspace = self.__ws,
name = aks_name,
provisioning_configuration = prov_config)
aks_target.wait_for_completion(show_output = True)
print(aks_target.provisioning_state)
print(aks_target.provisioning_errors)
return aks_target
def __deploy_service(self,image,compute):
service_config = self.__config['deploy']
services = AksWebservice.list(self.__ws)
service = services.find_by_property('name',service_config['name'])
if service:
service.update(auth_enabled=service_config['auth'])
service.wait_for_deployment(show_output = True)
return service
aks_config = AksWebservice.deploy_configuration(auth_enabled=True, max_request_wait_time=75000,
replica_max_concurrent_requests=100,autoscale_enabled=False,num_replicas=15)
aks_service_name = service_config['name']
aks_service = Webservice.deploy_from_image(workspace = self.__ws,
name = aks_service_name,
image = image,
deployment_config = aks_config,
deployment_target = compute)
aks_service.wait_for_deployment(show_output = True)
print(aks_service.state)
return aks_service
| none | 1 | 2.044389 | 2 |
|
notifications/migrations/0003_auto_20161028_1749.py | kevinvargasp/my_proyecto | 0 | 6630806 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-28 21:49
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('notifications', '0002_auto_20161028_1517'),
]
operations = [
migrations.AlterModelOptions(
name='notification',
options={'permissions': (('show_notification', 'Can Details Notificacion'), ('index_notification', 'Can List Notificacion')), 'verbose_name': 'Notificacion', 'verbose_name_plural': 'Notificaciones'},
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-28 21:49
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('notifications', '0002_auto_20161028_1517'),
]
operations = [
migrations.AlterModelOptions(
name='notification',
options={'permissions': (('show_notification', 'Can Details Notificacion'), ('index_notification', 'Can List Notificacion')), 'verbose_name': 'Notificacion', 'verbose_name_plural': 'Notificaciones'},
),
]
| en | 0.789179 | # -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2016-10-28 21:49 | 1.548185 | 2 |
xlsx2html/__init__.py | waldobeest/xlsx2html | 0 | 6630807 | <reponame>waldobeest/xlsx2html
# -*- coding: utf-8 -*-
import warnings
from .core import xlsx2html
def xls2html(*args, **kwargs):
warnings.warn("This func was renamed to xlsx2html.", DeprecationWarning)
return xlsx2html(*args, **kwargs)
| # -*- coding: utf-8 -*-
import warnings
from .core import xlsx2html
def xls2html(*args, **kwargs):
warnings.warn("This func was renamed to xlsx2html.", DeprecationWarning)
return xlsx2html(*args, **kwargs) | en | 0.769321 | # -*- coding: utf-8 -*- | 2.041602 | 2 |
osxphotos/utils.py | xwu64/osxphotos | 0 | 6630808 | """ Utility functions used in osxphotos """
import fnmatch
import glob
import importlib
import inspect
import logging
import os
import os.path
import pathlib
import platform
import re
import sqlite3
import subprocess
import sys
import unicodedata
import urllib.parse
from plistlib import load as plistload
from typing import Callable, Union
import CoreFoundation
import objc
from Foundation import NSString
from ._constants import UNICODE_FORMAT
__all__ = [
"noop",
"lineno",
"dd_to_dms_str",
"get_system_library_path",
"get_last_library_path",
"list_photo_libraries",
"normalize_fs_path",
"findfiles",
"normalize_unicode",
"increment_filename_with_count",
"increment_filename",
"expand_and_validate_filepath",
"load_function",
]
_DEBUG = False
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(filename)s - %(lineno)d - %(message)s",
)
if not _DEBUG:
logging.disable(logging.DEBUG)
def _get_logger():
"""Used only for testing
Returns:
logging.Logger object -- logging.Logger object for osxphotos
"""
return logging.Logger(__name__)
def _set_debug(debug):
"""Enable or disable debug logging"""
global _DEBUG
_DEBUG = debug
if debug:
logging.disable(logging.NOTSET)
else:
logging.disable(logging.DEBUG)
def _debug():
"""returns True if debugging turned on (via _set_debug), otherwise, false"""
return _DEBUG
def noop(*args, **kwargs):
"""do nothing (no operation)"""
pass
def lineno(filename):
"""Returns string with filename and current line number in caller as '(filename): line_num'
Will trim filename to just the name, dropping path, if any."""
line = inspect.currentframe().f_back.f_lineno
filename = pathlib.Path(filename).name
return f"{filename}: {line}"
def _get_os_version():
# returns tuple of str containing OS version
# e.g. 10.13.6 = ("10", "13", "6")
version = platform.mac_ver()[0].split(".")
if len(version) == 2:
(ver, major) = version
minor = "0"
elif len(version) == 3:
(ver, major, minor) = version
else:
raise (
ValueError(
f"Could not parse version string: {platform.mac_ver()} {version}"
)
)
return (ver, major, minor)
def _check_file_exists(filename):
"""returns true if file exists and is not a directory
otherwise returns false"""
filename = os.path.abspath(filename)
return os.path.exists(filename) and not os.path.isdir(filename)
def _get_resource_loc(model_id):
"""returns folder_id and file_id needed to find location of edited photo"""
""" and live photos for version <= Photos 4.0 """
# determine folder where Photos stores edited version
# edited images are stored in:
# Photos Library.photoslibrary/resources/media/version/XX/00/fullsizeoutput_Y.jpeg
# where XX and Y are computed based on RKModelResources.modelId
# file_id (Y in above example) is hex representation of model_id without leading 0x
file_id = hex_id = hex(model_id)[2:]
# folder_id (XX) in above example if first two chars of model_id converted to hex
# and left padded with zeros if < 4 digits
folder_id = hex_id.zfill(4)[0:2]
return folder_id, file_id
def _dd_to_dms(dd):
"""convert lat or lon in decimal degrees (dd) to degrees, minutes, seconds"""
""" return tuple of int(deg), int(min), float(sec) """
dd = float(dd)
negative = dd < 0
dd = abs(dd)
min_, sec_ = divmod(dd * 3600, 60)
deg_, min_ = divmod(min_, 60)
if negative:
if deg_ > 0:
deg_ = deg_ * -1
elif min_ > 0:
min_ = min_ * -1
else:
sec_ = sec_ * -1
return int(deg_), int(min_), sec_
def dd_to_dms_str(lat, lon):
"""convert latitude, longitude in degrees to degrees, minutes, seconds as string"""
""" lat: latitude in degrees """
""" lon: longitude in degrees """
""" returns: string tuple in format ("51 deg 30' 12.86\" N", "0 deg 7' 54.50\" W") """
""" this is the same format used by exiftool's json format """
# TODO: add this to readme
lat_deg, lat_min, lat_sec = _dd_to_dms(lat)
lon_deg, lon_min, lon_sec = _dd_to_dms(lon)
lat_hemisphere = "N"
if any([lat_deg < 0, lat_min < 0, lat_sec < 0]):
lat_hemisphere = "S"
lon_hemisphere = "E"
if any([lon_deg < 0, lon_min < 0, lon_sec < 0]):
lon_hemisphere = "W"
lat_str = (
f"{abs(lat_deg)} deg {abs(lat_min)}' {abs(lat_sec):.2f}\" {lat_hemisphere}"
)
lon_str = (
f"{abs(lon_deg)} deg {abs(lon_min)}' {abs(lon_sec):.2f}\" {lon_hemisphere}"
)
return lat_str, lon_str
def get_system_library_path():
"""return the path to the system Photos library as string"""
""" only works on MacOS 10.15 """
""" on earlier versions, returns None """
_, major, _ = _get_os_version()
if int(major) < 15:
logging.debug(
f"get_system_library_path not implemented for MacOS < 10.15: you have {major}"
)
return None
plist_file = pathlib.Path(
str(pathlib.Path.home())
+ "/Library/Containers/com.apple.photolibraryd/Data/Library/Preferences/com.apple.photolibraryd.plist"
)
if plist_file.is_file():
with open(plist_file, "rb") as fp:
pl = plistload(fp)
else:
logging.debug(f"could not find plist file: {str(plist_file)}")
return None
return pl.get("SystemLibraryPath")
def get_last_library_path():
"""returns the path to the last opened Photos library
If a library has never been opened, returns None"""
plist_file = pathlib.Path(
str(pathlib.Path.home())
+ "/Library/Containers/com.apple.Photos/Data/Library/Preferences/com.apple.Photos.plist"
)
if plist_file.is_file():
with open(plist_file, "rb") as fp:
pl = plistload(fp)
else:
logging.debug(f"could not find plist file: {str(plist_file)}")
return None
# get the IPXDefaultLibraryURLBookmark from com.apple.Photos.plist
# this is a serialized CFData object
photosurlref = pl.get("IPXDefaultLibraryURLBookmark")
if photosurlref is not None:
# use CFURLCreateByResolvingBookmarkData to de-serialize bookmark data into a CFURLRef
# pylint: disable=no-member
# pylint: disable=undefined-variable
photosurl = CoreFoundation.CFURLCreateByResolvingBookmarkData(
CoreFoundation.kCFAllocatorDefault, photosurlref, 0, None, None, None, None
)
# the CFURLRef we got is a sruct that python treats as an array
# I'd like to pass this to CFURLGetFileSystemRepresentation to get the path but
# CFURLGetFileSystemRepresentation barfs when it gets an array from python instead of expected struct
# first element is the path string in form:
# file:///Users/username/Pictures/Photos%20Library.photoslibrary/
photosurlstr = photosurl[0].absoluteString() if photosurl[0] else None
# now coerce the file URI back into an OS path
# surely there must be a better way
if photosurlstr is not None:
photospath = os.path.normpath(
urllib.parse.unquote(urllib.parse.urlparse(photosurlstr).path)
)
else:
logging.warning(
"Could not extract photos URL String from IPXDefaultLibraryURLBookmark"
)
return None
return photospath
else:
logging.debug("Could not get path to Photos database")
return None
def list_photo_libraries():
"""returns list of Photos libraries found on the system"""
""" on MacOS < 10.15, this may omit some libraries """
# On 10.15, mdfind appears to find all libraries
# On older MacOS versions, mdfind appears to ignore some libraries
# glob to find libraries in ~/Pictures then mdfind to find all the others
# TODO: make this more robust
lib_list = glob.glob(f"{str(pathlib.Path.home())}/Pictures/*.photoslibrary")
# On older OS, may not get all libraries so make sure we get the last one
last_lib = get_last_library_path()
if last_lib:
lib_list.append(last_lib)
output = subprocess.check_output(
["/usr/bin/mdfind", "-onlyin", "/", "-name", ".photoslibrary"]
).splitlines()
for lib in output:
lib_list.append(lib.decode("utf-8"))
lib_list = list(set(lib_list))
lib_list.sort()
return lib_list
def normalize_fs_path(path: str) -> str:
"""Normalize filesystem paths with unicode in them"""
with objc.autorelease_pool():
normalized_path = NSString.fileSystemRepresentation(path)
return normalized_path.decode("utf8")
def findfiles(pattern, path_):
"""Returns list of filenames from path_ matched by pattern
shell pattern. Matching is case-insensitive.
If 'path_' is invalid/doesn't exist, returns []."""
if not os.path.isdir(path_):
return []
# See: https://gist.github.com/techtonik/5694830
# paths need to be normalized for unicode as filesystem returns unicode in NFD form
pattern = normalize_fs_path(pattern)
rule = re.compile(fnmatch.translate(pattern), re.IGNORECASE)
files = [normalize_fs_path(p) for p in os.listdir(path_)]
return [name for name in files if rule.match(name)]
def _open_sql_file(dbname):
"""opens sqlite file dbname in read-only mode
returns tuple of (connection, cursor)"""
try:
dbpath = pathlib.Path(dbname).resolve()
conn = sqlite3.connect(f"{dbpath.as_uri()}?mode=ro", timeout=1, uri=True)
c = conn.cursor()
except sqlite3.Error as e:
sys.exit(f"An error occurred opening sqlite file: {e.args[0]} {dbname}")
return (conn, c)
def _db_is_locked(dbname):
"""check to see if a sqlite3 db is locked
returns True if database is locked, otherwise False
dbname: name of database to test"""
# first, check to see if lock file exists, if so, assume the file is locked
lock_name = f"{dbname}.lock"
if os.path.exists(lock_name):
logging.debug(f"{dbname} is locked")
return True
# no lock file so try to read from the database to see if it's locked
locked = None
try:
(conn, c) = _open_sql_file(dbname)
c.execute("SELECT name FROM sqlite_master WHERE type='table' ORDER BY name;")
conn.close()
logging.debug(f"{dbname} is not locked")
locked = False
except:
logging.debug(f"{dbname} is locked")
locked = True
return locked
# OSXPHOTOS_XATTR_UUID = "com.osxphotos.uuid"
# def get_uuid_for_file(filepath):
# """ returns UUID associated with an exported file
# filepath: path to exported photo
# """
# attr = xattr.xattr(filepath)
# try:
# uuid_bytes = attr[OSXPHOTOS_XATTR_UUID]
# uuid_str = uuid_bytes.decode('utf-8')
# except KeyError:
# uuid_str = None
# return uuid_str
# def set_uuid_for_file(filepath, uuid):
# """ sets the UUID associated with an exported file
# filepath: path to exported photo
# uuid: uuid string for photo
# """
# if not os.path.exists(filepath):
# raise FileNotFoundError(f"Missing file: {filepath}")
# attr = xattr.xattr(filepath)
# uuid_bytes = bytes(uuid, 'utf-8')
# attr.set(OSXPHOTOS_XATTR_UUID, uuid_bytes)
def normalize_unicode(value):
"""normalize unicode data"""
if value is not None:
if isinstance(value, (tuple, list)):
return tuple(unicodedata.normalize(UNICODE_FORMAT, v) for v in value)
elif isinstance(value, str):
return unicodedata.normalize(UNICODE_FORMAT, value)
else:
return value
else:
return None
def increment_filename_with_count(
filepath: Union[str, pathlib.Path], count: int = 0
) -> str:
"""Return filename (1).ext, etc if filename.ext exists
If file exists in filename's parent folder with same stem as filename,
add (1), (2), etc. until a non-existing filename is found.
Args:
filepath: str or pathlib.Path; full path, including file name
count: int; starting increment value
Returns:
tuple of new filepath (or same if not incremented), count
Note: This obviously is subject to race condition so using with caution.
"""
dest = filepath if isinstance(filepath, pathlib.Path) else pathlib.Path(filepath)
dest_files = findfiles(f"{dest.stem}*", str(dest.parent))
dest_files = [normalize_fs_path(pathlib.Path(f).stem.lower()) for f in dest_files]
dest_new = dest.stem
if count:
dest_new = f"{dest.stem} ({count})"
while normalize_fs_path(dest_new.lower()) in dest_files:
count += 1
dest_new = f"{dest.stem} ({count})"
dest = dest.parent / f"{dest_new}{dest.suffix}"
return str(dest), count
def increment_filename(filepath: Union[str, pathlib.Path]) -> str:
"""Return filename (1).ext, etc if filename.ext exists
If file exists in filename's parent folder with same stem as filename,
add (1), (2), etc. until a non-existing filename is found.
Args:
filepath: str or pathlib.Path; full path, including file name
Returns:
new filepath (or same if not incremented)
Note: This obviously is subject to race condition so using with caution.
"""
new_filepath, _ = increment_filename_with_count(filepath)
return new_filepath
def expand_and_validate_filepath(path: str) -> str:
"""validate and expand ~ in filepath, also un-escapes spaces
Returns:
expanded path if path is valid file, else None
"""
path = re.sub(r"\\ ", " ", path)
path = pathlib.Path(path).expanduser()
if path.is_file():
return str(path)
return None
def load_function(pyfile: str, function_name: str) -> Callable:
"""Load function_name from python file pyfile"""
module_file = pathlib.Path(pyfile)
if not module_file.is_file():
raise FileNotFoundError(f"module {pyfile} does not appear to exist")
module_dir = module_file.parent or pathlib.Path(os.getcwd())
module_name = module_file.stem
# store old sys.path and ensure module_dir at beginning of path
syspath = sys.path
sys.path = [str(module_dir)] + syspath
module = importlib.import_module(module_name)
try:
func = getattr(module, function_name)
except AttributeError:
raise ValueError(f"'{function_name}' not found in module '{module_name}'")
finally:
# restore sys.path
sys.path = syspath
return func
| """ Utility functions used in osxphotos """
import fnmatch
import glob
import importlib
import inspect
import logging
import os
import os.path
import pathlib
import platform
import re
import sqlite3
import subprocess
import sys
import unicodedata
import urllib.parse
from plistlib import load as plistload
from typing import Callable, Union
import CoreFoundation
import objc
from Foundation import NSString
from ._constants import UNICODE_FORMAT
__all__ = [
"noop",
"lineno",
"dd_to_dms_str",
"get_system_library_path",
"get_last_library_path",
"list_photo_libraries",
"normalize_fs_path",
"findfiles",
"normalize_unicode",
"increment_filename_with_count",
"increment_filename",
"expand_and_validate_filepath",
"load_function",
]
_DEBUG = False
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(filename)s - %(lineno)d - %(message)s",
)
if not _DEBUG:
logging.disable(logging.DEBUG)
def _get_logger():
"""Used only for testing
Returns:
logging.Logger object -- logging.Logger object for osxphotos
"""
return logging.Logger(__name__)
def _set_debug(debug):
"""Enable or disable debug logging"""
global _DEBUG
_DEBUG = debug
if debug:
logging.disable(logging.NOTSET)
else:
logging.disable(logging.DEBUG)
def _debug():
"""returns True if debugging turned on (via _set_debug), otherwise, false"""
return _DEBUG
def noop(*args, **kwargs):
"""do nothing (no operation)"""
pass
def lineno(filename):
"""Returns string with filename and current line number in caller as '(filename): line_num'
Will trim filename to just the name, dropping path, if any."""
line = inspect.currentframe().f_back.f_lineno
filename = pathlib.Path(filename).name
return f"{filename}: {line}"
def _get_os_version():
# returns tuple of str containing OS version
# e.g. 10.13.6 = ("10", "13", "6")
version = platform.mac_ver()[0].split(".")
if len(version) == 2:
(ver, major) = version
minor = "0"
elif len(version) == 3:
(ver, major, minor) = version
else:
raise (
ValueError(
f"Could not parse version string: {platform.mac_ver()} {version}"
)
)
return (ver, major, minor)
def _check_file_exists(filename):
"""returns true if file exists and is not a directory
otherwise returns false"""
filename = os.path.abspath(filename)
return os.path.exists(filename) and not os.path.isdir(filename)
def _get_resource_loc(model_id):
"""returns folder_id and file_id needed to find location of edited photo"""
""" and live photos for version <= Photos 4.0 """
# determine folder where Photos stores edited version
# edited images are stored in:
# Photos Library.photoslibrary/resources/media/version/XX/00/fullsizeoutput_Y.jpeg
# where XX and Y are computed based on RKModelResources.modelId
# file_id (Y in above example) is hex representation of model_id without leading 0x
file_id = hex_id = hex(model_id)[2:]
# folder_id (XX) in above example if first two chars of model_id converted to hex
# and left padded with zeros if < 4 digits
folder_id = hex_id.zfill(4)[0:2]
return folder_id, file_id
def _dd_to_dms(dd):
"""convert lat or lon in decimal degrees (dd) to degrees, minutes, seconds"""
""" return tuple of int(deg), int(min), float(sec) """
dd = float(dd)
negative = dd < 0
dd = abs(dd)
min_, sec_ = divmod(dd * 3600, 60)
deg_, min_ = divmod(min_, 60)
if negative:
if deg_ > 0:
deg_ = deg_ * -1
elif min_ > 0:
min_ = min_ * -1
else:
sec_ = sec_ * -1
return int(deg_), int(min_), sec_
def dd_to_dms_str(lat, lon):
"""convert latitude, longitude in degrees to degrees, minutes, seconds as string"""
""" lat: latitude in degrees """
""" lon: longitude in degrees """
""" returns: string tuple in format ("51 deg 30' 12.86\" N", "0 deg 7' 54.50\" W") """
""" this is the same format used by exiftool's json format """
# TODO: add this to readme
lat_deg, lat_min, lat_sec = _dd_to_dms(lat)
lon_deg, lon_min, lon_sec = _dd_to_dms(lon)
lat_hemisphere = "N"
if any([lat_deg < 0, lat_min < 0, lat_sec < 0]):
lat_hemisphere = "S"
lon_hemisphere = "E"
if any([lon_deg < 0, lon_min < 0, lon_sec < 0]):
lon_hemisphere = "W"
lat_str = (
f"{abs(lat_deg)} deg {abs(lat_min)}' {abs(lat_sec):.2f}\" {lat_hemisphere}"
)
lon_str = (
f"{abs(lon_deg)} deg {abs(lon_min)}' {abs(lon_sec):.2f}\" {lon_hemisphere}"
)
return lat_str, lon_str
def get_system_library_path():
"""return the path to the system Photos library as string"""
""" only works on MacOS 10.15 """
""" on earlier versions, returns None """
_, major, _ = _get_os_version()
if int(major) < 15:
logging.debug(
f"get_system_library_path not implemented for MacOS < 10.15: you have {major}"
)
return None
plist_file = pathlib.Path(
str(pathlib.Path.home())
+ "/Library/Containers/com.apple.photolibraryd/Data/Library/Preferences/com.apple.photolibraryd.plist"
)
if plist_file.is_file():
with open(plist_file, "rb") as fp:
pl = plistload(fp)
else:
logging.debug(f"could not find plist file: {str(plist_file)}")
return None
return pl.get("SystemLibraryPath")
def get_last_library_path():
"""returns the path to the last opened Photos library
If a library has never been opened, returns None"""
plist_file = pathlib.Path(
str(pathlib.Path.home())
+ "/Library/Containers/com.apple.Photos/Data/Library/Preferences/com.apple.Photos.plist"
)
if plist_file.is_file():
with open(plist_file, "rb") as fp:
pl = plistload(fp)
else:
logging.debug(f"could not find plist file: {str(plist_file)}")
return None
# get the IPXDefaultLibraryURLBookmark from com.apple.Photos.plist
# this is a serialized CFData object
photosurlref = pl.get("IPXDefaultLibraryURLBookmark")
if photosurlref is not None:
# use CFURLCreateByResolvingBookmarkData to de-serialize bookmark data into a CFURLRef
# pylint: disable=no-member
# pylint: disable=undefined-variable
photosurl = CoreFoundation.CFURLCreateByResolvingBookmarkData(
CoreFoundation.kCFAllocatorDefault, photosurlref, 0, None, None, None, None
)
# the CFURLRef we got is a sruct that python treats as an array
# I'd like to pass this to CFURLGetFileSystemRepresentation to get the path but
# CFURLGetFileSystemRepresentation barfs when it gets an array from python instead of expected struct
# first element is the path string in form:
# file:///Users/username/Pictures/Photos%20Library.photoslibrary/
photosurlstr = photosurl[0].absoluteString() if photosurl[0] else None
# now coerce the file URI back into an OS path
# surely there must be a better way
if photosurlstr is not None:
photospath = os.path.normpath(
urllib.parse.unquote(urllib.parse.urlparse(photosurlstr).path)
)
else:
logging.warning(
"Could not extract photos URL String from IPXDefaultLibraryURLBookmark"
)
return None
return photospath
else:
logging.debug("Could not get path to Photos database")
return None
def list_photo_libraries():
"""returns list of Photos libraries found on the system"""
""" on MacOS < 10.15, this may omit some libraries """
# On 10.15, mdfind appears to find all libraries
# On older MacOS versions, mdfind appears to ignore some libraries
# glob to find libraries in ~/Pictures then mdfind to find all the others
# TODO: make this more robust
lib_list = glob.glob(f"{str(pathlib.Path.home())}/Pictures/*.photoslibrary")
# On older OS, may not get all libraries so make sure we get the last one
last_lib = get_last_library_path()
if last_lib:
lib_list.append(last_lib)
output = subprocess.check_output(
["/usr/bin/mdfind", "-onlyin", "/", "-name", ".photoslibrary"]
).splitlines()
for lib in output:
lib_list.append(lib.decode("utf-8"))
lib_list = list(set(lib_list))
lib_list.sort()
return lib_list
def normalize_fs_path(path: str) -> str:
"""Normalize filesystem paths with unicode in them"""
with objc.autorelease_pool():
normalized_path = NSString.fileSystemRepresentation(path)
return normalized_path.decode("utf8")
def findfiles(pattern, path_):
"""Returns list of filenames from path_ matched by pattern
shell pattern. Matching is case-insensitive.
If 'path_' is invalid/doesn't exist, returns []."""
if not os.path.isdir(path_):
return []
# See: https://gist.github.com/techtonik/5694830
# paths need to be normalized for unicode as filesystem returns unicode in NFD form
pattern = normalize_fs_path(pattern)
rule = re.compile(fnmatch.translate(pattern), re.IGNORECASE)
files = [normalize_fs_path(p) for p in os.listdir(path_)]
return [name for name in files if rule.match(name)]
def _open_sql_file(dbname):
"""opens sqlite file dbname in read-only mode
returns tuple of (connection, cursor)"""
try:
dbpath = pathlib.Path(dbname).resolve()
conn = sqlite3.connect(f"{dbpath.as_uri()}?mode=ro", timeout=1, uri=True)
c = conn.cursor()
except sqlite3.Error as e:
sys.exit(f"An error occurred opening sqlite file: {e.args[0]} {dbname}")
return (conn, c)
def _db_is_locked(dbname):
"""check to see if a sqlite3 db is locked
returns True if database is locked, otherwise False
dbname: name of database to test"""
# first, check to see if lock file exists, if so, assume the file is locked
lock_name = f"{dbname}.lock"
if os.path.exists(lock_name):
logging.debug(f"{dbname} is locked")
return True
# no lock file so try to read from the database to see if it's locked
locked = None
try:
(conn, c) = _open_sql_file(dbname)
c.execute("SELECT name FROM sqlite_master WHERE type='table' ORDER BY name;")
conn.close()
logging.debug(f"{dbname} is not locked")
locked = False
except:
logging.debug(f"{dbname} is locked")
locked = True
return locked
# OSXPHOTOS_XATTR_UUID = "com.osxphotos.uuid"
# def get_uuid_for_file(filepath):
# """ returns UUID associated with an exported file
# filepath: path to exported photo
# """
# attr = xattr.xattr(filepath)
# try:
# uuid_bytes = attr[OSXPHOTOS_XATTR_UUID]
# uuid_str = uuid_bytes.decode('utf-8')
# except KeyError:
# uuid_str = None
# return uuid_str
# def set_uuid_for_file(filepath, uuid):
# """ sets the UUID associated with an exported file
# filepath: path to exported photo
# uuid: uuid string for photo
# """
# if not os.path.exists(filepath):
# raise FileNotFoundError(f"Missing file: {filepath}")
# attr = xattr.xattr(filepath)
# uuid_bytes = bytes(uuid, 'utf-8')
# attr.set(OSXPHOTOS_XATTR_UUID, uuid_bytes)
def normalize_unicode(value):
"""normalize unicode data"""
if value is not None:
if isinstance(value, (tuple, list)):
return tuple(unicodedata.normalize(UNICODE_FORMAT, v) for v in value)
elif isinstance(value, str):
return unicodedata.normalize(UNICODE_FORMAT, value)
else:
return value
else:
return None
def increment_filename_with_count(
filepath: Union[str, pathlib.Path], count: int = 0
) -> str:
"""Return filename (1).ext, etc if filename.ext exists
If file exists in filename's parent folder with same stem as filename,
add (1), (2), etc. until a non-existing filename is found.
Args:
filepath: str or pathlib.Path; full path, including file name
count: int; starting increment value
Returns:
tuple of new filepath (or same if not incremented), count
Note: This obviously is subject to race condition so using with caution.
"""
dest = filepath if isinstance(filepath, pathlib.Path) else pathlib.Path(filepath)
dest_files = findfiles(f"{dest.stem}*", str(dest.parent))
dest_files = [normalize_fs_path(pathlib.Path(f).stem.lower()) for f in dest_files]
dest_new = dest.stem
if count:
dest_new = f"{dest.stem} ({count})"
while normalize_fs_path(dest_new.lower()) in dest_files:
count += 1
dest_new = f"{dest.stem} ({count})"
dest = dest.parent / f"{dest_new}{dest.suffix}"
return str(dest), count
def increment_filename(filepath: Union[str, pathlib.Path]) -> str:
"""Return filename (1).ext, etc if filename.ext exists
If file exists in filename's parent folder with same stem as filename,
add (1), (2), etc. until a non-existing filename is found.
Args:
filepath: str or pathlib.Path; full path, including file name
Returns:
new filepath (or same if not incremented)
Note: This obviously is subject to race condition so using with caution.
"""
new_filepath, _ = increment_filename_with_count(filepath)
return new_filepath
def expand_and_validate_filepath(path: str) -> str:
"""validate and expand ~ in filepath, also un-escapes spaces
Returns:
expanded path if path is valid file, else None
"""
path = re.sub(r"\\ ", " ", path)
path = pathlib.Path(path).expanduser()
if path.is_file():
return str(path)
return None
def load_function(pyfile: str, function_name: str) -> Callable:
"""Load function_name from python file pyfile"""
module_file = pathlib.Path(pyfile)
if not module_file.is_file():
raise FileNotFoundError(f"module {pyfile} does not appear to exist")
module_dir = module_file.parent or pathlib.Path(os.getcwd())
module_name = module_file.stem
# store old sys.path and ensure module_dir at beginning of path
syspath = sys.path
sys.path = [str(module_dir)] + syspath
module = importlib.import_module(module_name)
try:
func = getattr(module, function_name)
except AttributeError:
raise ValueError(f"'{function_name}' not found in module '{module_name}'")
finally:
# restore sys.path
sys.path = syspath
return func
| en | 0.743098 | Utility functions used in osxphotos Used only for testing Returns: logging.Logger object -- logging.Logger object for osxphotos Enable or disable debug logging returns True if debugging turned on (via _set_debug), otherwise, false do nothing (no operation) Returns string with filename and current line number in caller as '(filename): line_num' Will trim filename to just the name, dropping path, if any. # returns tuple of str containing OS version # e.g. 10.13.6 = ("10", "13", "6") returns true if file exists and is not a directory otherwise returns false returns folder_id and file_id needed to find location of edited photo and live photos for version <= Photos 4.0 # determine folder where Photos stores edited version # edited images are stored in: # Photos Library.photoslibrary/resources/media/version/XX/00/fullsizeoutput_Y.jpeg # where XX and Y are computed based on RKModelResources.modelId # file_id (Y in above example) is hex representation of model_id without leading 0x # folder_id (XX) in above example if first two chars of model_id converted to hex # and left padded with zeros if < 4 digits convert lat or lon in decimal degrees (dd) to degrees, minutes, seconds return tuple of int(deg), int(min), float(sec) convert latitude, longitude in degrees to degrees, minutes, seconds as string lat: latitude in degrees lon: longitude in degrees returns: string tuple in format ("51 deg 30' 12.86\" N", "0 deg 7' 54.50\" W") this is the same format used by exiftool's json format # TODO: add this to readme return the path to the system Photos library as string only works on MacOS 10.15 on earlier versions, returns None returns the path to the last opened Photos library If a library has never been opened, returns None # get the IPXDefaultLibraryURLBookmark from com.apple.Photos.plist # this is a serialized CFData object # use CFURLCreateByResolvingBookmarkData to de-serialize bookmark data into a CFURLRef # pylint: disable=no-member # pylint: disable=undefined-variable # the CFURLRef we got is a sruct that python treats as an array # I'd like to pass this to CFURLGetFileSystemRepresentation to get the path but # CFURLGetFileSystemRepresentation barfs when it gets an array from python instead of expected struct # first element is the path string in form: # file:///Users/username/Pictures/Photos%20Library.photoslibrary/ # now coerce the file URI back into an OS path # surely there must be a better way returns list of Photos libraries found on the system on MacOS < 10.15, this may omit some libraries # On 10.15, mdfind appears to find all libraries # On older MacOS versions, mdfind appears to ignore some libraries # glob to find libraries in ~/Pictures then mdfind to find all the others # TODO: make this more robust # On older OS, may not get all libraries so make sure we get the last one Normalize filesystem paths with unicode in them Returns list of filenames from path_ matched by pattern shell pattern. Matching is case-insensitive. If 'path_' is invalid/doesn't exist, returns []. # See: https://gist.github.com/techtonik/5694830 # paths need to be normalized for unicode as filesystem returns unicode in NFD form opens sqlite file dbname in read-only mode returns tuple of (connection, cursor) check to see if a sqlite3 db is locked returns True if database is locked, otherwise False dbname: name of database to test # first, check to see if lock file exists, if so, assume the file is locked # no lock file so try to read from the database to see if it's locked # OSXPHOTOS_XATTR_UUID = "com.osxphotos.uuid" # def get_uuid_for_file(filepath): # """ returns UUID associated with an exported file # filepath: path to exported photo # """ # attr = xattr.xattr(filepath) # try: # uuid_bytes = attr[OSXPHOTOS_XATTR_UUID] # uuid_str = uuid_bytes.decode('utf-8') # except KeyError: # uuid_str = None # return uuid_str # def set_uuid_for_file(filepath, uuid): # """ sets the UUID associated with an exported file # filepath: path to exported photo # uuid: uuid string for photo # """ # if not os.path.exists(filepath): # raise FileNotFoundError(f"Missing file: {filepath}") # attr = xattr.xattr(filepath) # uuid_bytes = bytes(uuid, 'utf-8') # attr.set(OSXPHOTOS_XATTR_UUID, uuid_bytes) normalize unicode data Return filename (1).ext, etc if filename.ext exists If file exists in filename's parent folder with same stem as filename, add (1), (2), etc. until a non-existing filename is found. Args: filepath: str or pathlib.Path; full path, including file name count: int; starting increment value Returns: tuple of new filepath (or same if not incremented), count Note: This obviously is subject to race condition so using with caution. Return filename (1).ext, etc if filename.ext exists If file exists in filename's parent folder with same stem as filename, add (1), (2), etc. until a non-existing filename is found. Args: filepath: str or pathlib.Path; full path, including file name Returns: new filepath (or same if not incremented) Note: This obviously is subject to race condition so using with caution. validate and expand ~ in filepath, also un-escapes spaces Returns: expanded path if path is valid file, else None Load function_name from python file pyfile # store old sys.path and ensure module_dir at beginning of path # restore sys.path | 2.285243 | 2 |
neon/transforms/linear.py | kashif/neon | 3 | 6630809 | # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Linear transform functions and classes.
"""
from neon.transforms.activation import Activation
class Linear(Activation):
"""
Embodiment of a linear activation function.
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def apply_function(self, backend, inputs, outputs):
"""
Linear activation function. (no-op)
Arguments:
backend (Backend): The backend class to use for computation.
inputs (array_like): Input data to be transformed
outputs (array_like): Storage for the transformed output.
"""
return
def apply_derivative(self, backend, inputs, outputs):
"""
Linear activation function derivative (no-op).
Arguments:
backend (Backend): The backend class to use for computation.
inputs (array_like): Input data to be transformed
outputs (array_like): Storage for the transformed output.
"""
return
def fprop_func(self, backend, inputs, outputs):
"""
Function to apply during fprop
Arguments:
backend (Backend): The backend class to use for computation.
inputs (array_like): Input data to be transformed. This also acts
as storage for the output of the derivative
function.
outputs (array_like): Storage for the transformed output.
"""
return
def pre_act_buffer(self, backend, output, dtype):
"""
overrides the pre_act_buffer with output to save memory
Arguments:
backend (Backend): The backend class to use for computation.
output (array_like): Output data buffer.
dtype: dtype for pre_act_buffer
"""
return output
def bprop_func(self, backend, pre_act, error, skip_act=False):
"""
Function to perform during the bprop
Arguments:
backend (Backend): The backend class to use for computation.
pre_act (array_like): pre_activation buffer
error (array_like): error buffer
skip_act (Boolean): whether to skip the multiplication
"""
return
| # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Linear transform functions and classes.
"""
from neon.transforms.activation import Activation
class Linear(Activation):
"""
Embodiment of a linear activation function.
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def apply_function(self, backend, inputs, outputs):
"""
Linear activation function. (no-op)
Arguments:
backend (Backend): The backend class to use for computation.
inputs (array_like): Input data to be transformed
outputs (array_like): Storage for the transformed output.
"""
return
def apply_derivative(self, backend, inputs, outputs):
"""
Linear activation function derivative (no-op).
Arguments:
backend (Backend): The backend class to use for computation.
inputs (array_like): Input data to be transformed
outputs (array_like): Storage for the transformed output.
"""
return
def fprop_func(self, backend, inputs, outputs):
"""
Function to apply during fprop
Arguments:
backend (Backend): The backend class to use for computation.
inputs (array_like): Input data to be transformed. This also acts
as storage for the output of the derivative
function.
outputs (array_like): Storage for the transformed output.
"""
return
def pre_act_buffer(self, backend, output, dtype):
"""
overrides the pre_act_buffer with output to save memory
Arguments:
backend (Backend): The backend class to use for computation.
output (array_like): Output data buffer.
dtype: dtype for pre_act_buffer
"""
return output
def bprop_func(self, backend, pre_act, error, skip_act=False):
"""
Function to perform during the bprop
Arguments:
backend (Backend): The backend class to use for computation.
pre_act (array_like): pre_activation buffer
error (array_like): error buffer
skip_act (Boolean): whether to skip the multiplication
"""
return
| en | 0.656751 | # ---------------------------------------------------------------------------- # Copyright 2014 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- Linear transform functions and classes. Embodiment of a linear activation function. Linear activation function. (no-op) Arguments: backend (Backend): The backend class to use for computation. inputs (array_like): Input data to be transformed outputs (array_like): Storage for the transformed output. Linear activation function derivative (no-op). Arguments: backend (Backend): The backend class to use for computation. inputs (array_like): Input data to be transformed outputs (array_like): Storage for the transformed output. Function to apply during fprop Arguments: backend (Backend): The backend class to use for computation. inputs (array_like): Input data to be transformed. This also acts as storage for the output of the derivative function. outputs (array_like): Storage for the transformed output. overrides the pre_act_buffer with output to save memory Arguments: backend (Backend): The backend class to use for computation. output (array_like): Output data buffer. dtype: dtype for pre_act_buffer Function to perform during the bprop Arguments: backend (Backend): The backend class to use for computation. pre_act (array_like): pre_activation buffer error (array_like): error buffer skip_act (Boolean): whether to skip the multiplication | 2.2837 | 2 |
lib/ui/Ui.py | mattermccrea/expensive-skeleton-free | 0 | 6630810 | from datetime import datetime
t = datetime.now().strftime("\033[1;33m[\033[1;37m%H:%M\033[1;33m][")
class Sec:
info = t+"\033[1;32mINFO\033[1;33m]\033[1;37m "
fail = t+"\033[1;31mFAIL\033[1;33m]\033[1;31m "
warn = t+"\033[1;31mWARN\033[1;33m]\033[1;30m "
def Banner():
print r"""
_____ _ _ _
| __| |_ ___| |___| |_ ___ ___
|__ | '_| -_| | -_| _| . | |
|_____|_,_|___|_|___|_| |___|_|_|
"""
print """
\033[1;31m+\033[1;33m--=[ \033[1;31me\033[1;30mxpensive \033[1;31ms\033[1;30mkeleton\033[1;32m free
\033[1;31m+\033[1;33m--=[ \033[1;30mBy : 8 Ball, <NAME>\n\n
""" | from datetime import datetime
t = datetime.now().strftime("\033[1;33m[\033[1;37m%H:%M\033[1;33m][")
class Sec:
info = t+"\033[1;32mINFO\033[1;33m]\033[1;37m "
fail = t+"\033[1;31mFAIL\033[1;33m]\033[1;31m "
warn = t+"\033[1;31mWARN\033[1;33m]\033[1;30m "
def Banner():
print r"""
_____ _ _ _
| __| |_ ___| |___| |_ ___ ___
|__ | '_| -_| | -_| _| . | |
|_____|_,_|___|_|___|_| |___|_|_|
"""
print """
\033[1;31m+\033[1;33m--=[ \033[1;31me\033[1;30mxpensive \033[1;31ms\033[1;30mkeleton\033[1;32m free
\033[1;31m+\033[1;33m--=[ \033[1;30mBy : 8 Ball, <NAME>\n\n
""" | de | 0.211776 | _____ _ _ _
| __| |_ ___| |___| |_ ___ ___
|__ | '_| -_| | -_| _| . | |
|_____|_,_|___|_|___|_| |___|_|_| \033[1;31m+\033[1;33m--=[ \033[1;31me\033[1;30mxpensive \033[1;31ms\033[1;30mkeleton\033[1;32m free
\033[1;31m+\033[1;33m--=[ \033[1;30mBy : 8 Ball, <NAME>\n\n | 2.627232 | 3 |
Plugins/Robot1857v2.py | pompom2626/msbot | 1 | 6630811 | import os, sys
sys.path.append('..')
import uuid
import pandas as pd
import pandas.io.sql as pdsql
from pandas import DataFrame, Series
# from pandas.lib import Timestamp
import sqlite3
import PyQt5
from PyQt5 import QtCore, QtGui, uic
from PyQt5 import QAxContainer
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import (QApplication, QLabel, QLineEdit, QMainWindow, QDialog, QMessageBox, QProgressBar)
from PyQt5.QtWidgets import *
from PyQt5.QAxContainer import *
from XASessions import *
from XAQuaries import *
from XAReals import *
from CRobot import *
from Utils import *
__PATHNAME__ = os.path.dirname(sys.argv[0])
__PLUGINDIR__ = os.path.abspath(__PATHNAME__)
ROBOT_NAME = "Robot1857v2"
Ui_Robot1857v2, QtBaseClass_Robot1857v2 = uic.loadUiType("%s\\Plugins\\Robot1857v2.ui" % __PLUGINDIR__)
class CUIRobot1857v2(QDialog, Ui_Robot1857v2):
def __init__(self, parent=None):
super(__class__, self).__init__(parent)
self.setupUi(self)
self.parent = parent
def SearchFile(self):
pathname = os.path.dirname(sys.argv[0])
RESDIR = "%s\\ACF\\" % os.path.abspath(pathname)
fname = QFileDialog.getOpenFileName(self, 'Open file',RESDIR, "조검검색(*.acf)")
self.lineEdit_filename.setText(fname[0])
class Robot1857v2(CRobot):
def instance(self):
UUID = uuid.uuid4().hex
return Robot1857v2(Name=ROBOT_NAME, UUID=UUID)
def __init__(self, Name, UUID):
super(__class__, self).__init__(Name, UUID)
self.parent = None
self.단위투자금 = 100 * 10000
self.매수방법 = '00'
self.매도방법 = '00'
self.시장가 = '03'
self.포트폴리오수 = 10
self.trailstop = 0.01
self.ACF파일 = ''
self.일괄매도시각 = '15:15:00'
self.매수거래시간STR = '''09:00:00-11:00:00,
12:00:00-13:00:00,
14:00:00-15:20:00'''
self.매도거래시간STR = '''09:00:00-11:00:00,
12:00:00-13:00:00,
14:00:00-15:20:00'''
self.매수거래중 = False
self.매도거래중 = False
self.금일매도종목 = []
self.주문번호리스트 = []
self.매수Lock = dict()
self.매도Lock = dict()
self.QueryInit()
self.clock = None
self.전량매도 = False
def QueryInit(self):
self.XQ_t1857 = None
self.XR_S3_ = None
self.XR_K3_ = None
self.QA_CSPAT00600 = None
self.XR_SC1 = None # 체결
def modal(self, parent):
ui = CUIRobot1857v2(parent=parent)
ui.setModal(True)
ui.lineEdit_name.setText(self.Name)
ui.lineEdit_unit.setText(str(self.단위투자금 // 10000))
ui.lineEdit_trailstop.setText(str(self.trailstop))
ui.lineEdit_portsize.setText(str(self.포트폴리오수))
ui.comboBox_buy_sHogaGb.setCurrentIndex(ui.comboBox_buy_sHogaGb.findText(self.매수방법, flags=Qt.MatchContains))
ui.comboBox_sell_sHogaGb.setCurrentIndex(ui.comboBox_sell_sHogaGb.findText(self.매도방법, flags=Qt.MatchContains))
ui.lineEdit_filename.setText(self.ACF파일)
ui.plainTextEdit_buytime.setPlainText(self.매수거래시간STR)
ui.plainTextEdit_selltime.setPlainText(self.매도거래시간STR)
ui.lineEdit_sellall.setText(self.일괄매도시각)
r = ui.exec_()
if r == 1:
self.Name = ui.lineEdit_name.text().strip()
self.단위투자금 = int(ui.lineEdit_unit.text().strip()) * 10000
self.매수방법 = ui.comboBox_buy_sHogaGb.currentText().strip()[0:2]
self.매도방법 = ui.comboBox_sell_sHogaGb.currentText().strip()[0:2]
self.포트폴리오수 = int(ui.lineEdit_portsize.text().strip())
self.ACF파일 = ui.lineEdit_filename.text().strip()
self.trailstop = float(ui.lineEdit_trailstop.text().strip())
self.매수거래시간STR = ui.plainTextEdit_buytime.toPlainText().strip()
self.매도거래시간STR = ui.plainTextEdit_selltime.toPlainText().strip()
매수거래시간1 = self.매수거래시간STR
매수거래시간2 = [x.strip() for x in 매수거래시간1.split(',')]
result = []
for temp in 매수거래시간2:
result.append([x.strip() for x in temp.split('-')])
self.매수거래시간 = result
매도거래시간1 = self.매도거래시간STR
매도거래시간2 = [x.strip() for x in 매도거래시간1.split(',')]
result = []
for temp in 매도거래시간2:
result.append([x.strip() for x in temp.split('-')])
self.매도거래시간 = result
self.일괄매도시각 = ui.lineEdit_sellall.text().strip()
return r
def OnReceiveMessage(self, systemError, messageCode, message):
일자 = "{:%Y-%m-%d %H:%M:%S.%f}".format(datetime.datetime.now())
클래스이름 = self.__class__.__name__
print(일자, 클래스이름, systemError, messageCode, message)
def OnReceiveData(self, szTrCode, result):
# 종목검색
if szTrCode == 't1857':
if self.running:
식별자, 검색종목수, 포착시간, 실시간키, df = result
if 식별자 == self.XQ_t1857.식별자:
for idx, row in df[['종목코드','종목상태']].iterrows():
code, flag = row
if type(code) == str:
if code in self.kospi_codes and flag in ['N','R']:
if type(self.XR_S3_) is not type(None):
self.XR_S3_.AdviseRealData(종목코드=code)
if code in self.kospi_codes and flag in ['O']:
if type(self.XR_S3_) is not type(None):
if code not in self.portfolio.keys() and code not in self.매수Lock.keys() and code not in self.매도Lock.keys():
self.XR_S3_.UnadviseRealDataWithKey(종목코드=code)
if code in self.kosdaq_codes and flag in ['N','R']:
if type(self.XR_K3_) is not type(None):
self.XR_K3_.AdviseRealData(종목코드=code)
if code in self.kosdaq_codes and flag in ['O']:
if type(self.XR_K3_) is not type(None):
if code not in self.portfolio.keys() and code not in self.매수Lock.keys() and code not in self.매도Lock.keys():
self.XR_K3_.UnadviseRealDataWithKey(종목코드=code)
# 현재 가지고 있는 포트폴리오의 실시간데이타를 받는다.
for code in self.portfolio.keys():
if code in self.kospi_codes:
if type(self.XR_S3_) is not type(None):
self.XR_S3_.AdviseRealData(종목코드=code)
if code in self.kosdaq_codes:
if type(self.XR_K3_) is not type(None):
self.XR_K3_.AdviseRealData(종목코드=code)
# 체결
if szTrCode == 'CSPAT00600':
df, df1 = result
if len(df1) > 0:
주문번호 = df1['주문번호'].values[0]
if 주문번호 != '0':
# 주문번호처리
self.주문번호리스트.append(str(주문번호))
def OnReceiveSearchRealData(self, szTrCode, lst):
식별자, result = lst
if 식별자 == self.XQ_t1857.식별자:
try:
code = result['종목코드']
flag = result['종목상태']
if type(code) == str:
if code in self.kospi_codes and flag in ['N', 'R']:
if type(self.XR_S3_) is not type(None):
self.XR_S3_.AdviseRealData(종목코드=code)
if code in self.kospi_codes and flag in ['O']:
if type(self.XR_S3_) is not type(None):
if code not in self.portfolio.keys() and code not in self.매수Lock.keys() and code not in self.매도Lock.keys():
self.XR_S3_.UnadviseRealDataWithKey(종목코드=code)
if code in self.kosdaq_codes and flag in ['N', 'R']:
if type(self.XR_K3_) is not type(None):
self.XR_K3_.AdviseRealData(종목코드=code)
if code in self.kosdaq_codes and flag in ['O']:
if type(self.XR_K3_) is not type(None):
if code not in self.portfolio.keys() and code not in self.매수Lock.keys() and code not in self.매도Lock.keys():
self.XR_K3_.UnadviseRealDataWithKey(종목코드=code)
except Exception as e:
print(e)
finally:
pass
def OnReceiveRealData(self, szTrCode, result):
if szTrCode == 'SC1':
체결시각 = result['체결시각']
단축종목번호 = result['단축종목번호'].strip().replace('A','')
종목명 = result['종목명']
매매구분 = result['매매구분']
주문번호 = result['주문번호']
체결번호 = result['체결번호']
주문수량 = int(result['주문수량'])
주문가격 = int(result['주문가격'])
체결수량 = int(result['체결수량'])
체결가격 = int(result['체결가격'])
주문평균체결가격 = int(result['주문평균체결가격'])
주문계좌번호 = result['주문계좌번호']
# 내가 주문한 것이 체결된 경우 처리
if 주문번호 in self.주문번호리스트:
if 매매구분 == '1' or 매매구분 == 1: # 매도
P = self.portfolio.get(단축종목번호, None)
if P != None:
P.수량 = P.수량 - 체결수량
if P.수량 == 0:
self.portfolio.pop(단축종목번호)
self.매도Lock.pop(단축종목번호)
#TODO: 빠른거래시 화면의 응답속도도 영향을 주므로 일단은 커멘트처리
# self.parent.RobotView()
# ToTelegram(__class__.__name__ + "매도 : %s 체결수량:%s 체결가격:%s" % (종목명, 주문수량, 주문평균체결가격))
else:
print("매도 주문이 없는데 매도가 들어옴")
if 매매구분 == '2' or 매매구분 == 2: # 매수
P = self.portfolio.get(단축종목번호, None)
if P== None:
self.portfolio[단축종목번호] = CPortStock(종목코드=단축종목번호, 종목명=종목명, 매수가=주문평균체결가격, 수량=체결수량, 매수일=datetime.datetime.now())
if P.수량 == 주문수량:
self.매수Lock.pop(단축종목번호)
else:
P.수량 = P.수량 + 체결수량
if P.수량 == 주문수량:
self.매수Lock.pop(단축종목번호)
# 조건검색과 체결사이에 시간 간격차 때문에 등록이 안되어 있을수도 있음
# 체결된 종목은 실시간 가격을 받는다.
if 단축종목번호 in self.kospi_codes:
if type(self.XR_S3_) is not type(None):
self.XR_S3_.AdviseRealData(종목코드=단축종목번호)
if 단축종목번호 in self.kosdaq_codes:
if type(self.XR_K3_) is not type(None):
self.XR_K3_.AdviseRealData(종목코드=단축종목번호)
if self.parent is not None:
self.parent.RobotView()
일자 = "{:%Y-%m-%d}".format(datetime.datetime.now())
data = [self.Name, self.UUID, 일자, 체결시각, 단축종목번호, 종목명, 매매구분, 주문번호, 체결번호, 주문수량, 주문가격, 체결수량, 체결가격, 주문평균체결가격]
self.체결기록(data=data)
if szTrCode in ['K3_', 'S3_']:
if self.매수거래중 == True or self.매도거래중 == True:
단축코드 = result['단축코드']
try:
종목명 = self.종목코드테이블.query("단축코드=='%s'" % 단축코드)['종목명'].values[0]
except Exception as e:
종목명 = ''
현재가 = result['현재가']
고가 = result['고가']
수량 = self.단위투자금 // 현재가
현재시각 = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if self.parent is not None:
self.parent.statusbar.showMessage("[%s]%s %s %s" % (현재시각, 단축코드, 종목명, 현재가))
P = self.portfolio.get(단축코드, None)
매수락 = self.매수Lock.get(단축코드, None)
매도락 = self.매도Lock.get(단축코드, None)
if P == None:
if 단축코드 not in self.금일매도종목 and 수량 > 0:
if self.매수거래중 == True:
lst = set(self.portfolio.keys()).union(self.매수Lock.keys())
if len(lst) < self.포트폴리오수:
if 매수락 == None:
self.매수Lock[단축코드] = ''
# 포트폴리오에 없으므로 현재가에 매수
self.QA_CSPAT00600.Query(계좌번호=self.계좌번호,입력비밀번호=self.비밀번호,종목번호=단축코드,주문수량=수량,주문가=현재가,매매구분=self.매수,
호가유형코드=self.매수방법, 신용거래코드=self.신용거래코드,주문조건구분=self.조건없음)
# ToTelegram(__class__.__name__ + "매수주문 : %s %s 주문수량:%s 주문가격:%s" % (단축코드, 종목명, 수량, 현재가))
else:
if self.매도거래중 == True:
if 현재가 > P.매수후고가:
P.매수후고가 = 현재가
if 매도락 == None:
수량 = P.수량
if 현재가 < P.매수후고가 * (1-self.trailstop):
self.매도Lock[단축코드] = ''
self.금일매도종목.append(단축코드)
self.QA_CSPAT00600.Query(계좌번호=self.계좌번호,입력비밀번호=self.비밀번호,종목번호=단축코드,주문수량=수량,주문가=현재가,매매구분=self.매도,
호가유형코드=self.시장가, 신용거래코드=self.신용거래코드,주문조건구분=self.조건없음)
#TODO: 주문이 연속적으로 나가는 경우
# 텔레그렘의 메세지 전송속도가 약 1초이기 때문에
# 이베스트에서 오는 신호를 놓치는 경우가 있다.
# ToTelegram(__class__.__name__ + "매도주문 : %s %s 주문수량:%s 주문가격:%s" % (단축코드, 종목명, 수량, 현재가))
def OnClockTick(self):
current = datetime.datetime.now()
current_str = current.strftime('%H:%M:%S')
거래중 = False
for t in self.매수거래시간:
if t[0] <= current_str and current_str <= t[1]:
거래중 = True
self.매수거래중 = 거래중
거래중 = False
for t in self.매도거래시간:
if t[0] <= current_str and current_str <= t[1]:
거래중 = True
self.매도거래중 = 거래중
#TODO: 특정시간의 강제매도
#------------------------------------------------------------
if self.일괄매도시각.strip() is not "":
if self.일괄매도시각 < current_str and self.전량매도 == False:
self.전량매도 = True
#TODO:취소주문 ???
for k,v in self.portfolio.items():
단축코드 = v.종목코드
수량 = v.수량
종목명 = v.종목명
주문가 = '0'
호가유형코드 = '03'
self.매도Lock[단축코드] = ''
self.금일매도종목.append(단축코드)
self.QA_CSPAT00600.Query(계좌번호=self.계좌번호, 입력비밀번호=self.비밀번호, 종목번호=단축코드, 주문수량=수량, 주문가=주문가, 매매구분=self.매도,
호가유형코드=호가유형코드, 신용거래코드=self.신용거래코드, 주문조건구분=self.조건없음)
# ToTelegram(__class__.__name__ + "일괄매도 : %s %s 주문수량:%s 주문가격:%s" % (단축코드, 종목명, 수량, 주문가))
def Run(self, flag=True, parent=None):
if self.running == flag:
return
self.parent = parent
self.running = flag
ret = 0
if flag == True:
ToTelegram("로직 [%s]을 시작합니다." % (__class__.__name__))
self.clock = QtCore.QTimer()
self.clock.timeout.connect(self.OnClockTick)
self.clock.start(1000)
self.전량매도 = False
self.금일매도종목 = []
self.주문번호리스트 = []
self.매수Lock = dict()
self.매도Lock = dict()
with sqlite3.connect(self.DATABASE) as conn:
query = 'select 단축코드,종목명,ETF구분,구분 from 종목코드'
self.종목코드테이블 = pdsql.read_sql_query(query, con=conn)
self.kospi_codes = self.종목코드테이블.query("구분=='1'")['단축코드'].values.tolist()
self.kosdaq_codes = self.종목코드테이블.query("구분=='2'")['단축코드'].values.tolist()
self.XQ_t1857 = t1857(parent=self, 식별자=uuid.uuid4().hex)
self.XQ_t1857.Query(실시간구분='1', 종목검색구분='F', 종목검색입력값=self.ACF파일)
self.QA_CSPAT00600 = CSPAT00600(parent=self)
self.XR_S3_ = S3_(parent=self)
self.XR_K3_ = K3_(parent=self)
self.XR_SC1 = SC1(parent=self)
self.XR_SC1.AdviseRealData()
else:
if self.XQ_t1857 is not None:
self.XQ_t1857.RemoveService()
self.XQ_t1857 = None
if self.clock is not None:
try:
self.clock.stop()
except Exception as e:
pass
finally:
self.clock = None
try:
if self.XR_S3_ != None:
self.XR_S3_.UnadviseRealData()
except Exception as e:
pass
finally:
self.XR_S3_ = None
try:
if self.XR_K3_ != None:
self.XR_K3_.UnadviseRealData()
except Exception as e:
pass
finally:
self.XR_K3_ = None
try:
if self.XR_SC1 != None:
self.XR_SC1.UnadviseRealData()
except Exception as e:
pass
finally:
self.XR_SC1 = None
self.QueryInit()
def robot_loader():
UUID = uuid.uuid4().hex
robot = Robot1857v2(Name=ROBOT_NAME, UUID=UUID)
return robot | import os, sys
sys.path.append('..')
import uuid
import pandas as pd
import pandas.io.sql as pdsql
from pandas import DataFrame, Series
# from pandas.lib import Timestamp
import sqlite3
import PyQt5
from PyQt5 import QtCore, QtGui, uic
from PyQt5 import QAxContainer
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import (QApplication, QLabel, QLineEdit, QMainWindow, QDialog, QMessageBox, QProgressBar)
from PyQt5.QtWidgets import *
from PyQt5.QAxContainer import *
from XASessions import *
from XAQuaries import *
from XAReals import *
from CRobot import *
from Utils import *
__PATHNAME__ = os.path.dirname(sys.argv[0])
__PLUGINDIR__ = os.path.abspath(__PATHNAME__)
ROBOT_NAME = "Robot1857v2"
Ui_Robot1857v2, QtBaseClass_Robot1857v2 = uic.loadUiType("%s\\Plugins\\Robot1857v2.ui" % __PLUGINDIR__)
class CUIRobot1857v2(QDialog, Ui_Robot1857v2):
def __init__(self, parent=None):
super(__class__, self).__init__(parent)
self.setupUi(self)
self.parent = parent
def SearchFile(self):
pathname = os.path.dirname(sys.argv[0])
RESDIR = "%s\\ACF\\" % os.path.abspath(pathname)
fname = QFileDialog.getOpenFileName(self, 'Open file',RESDIR, "조검검색(*.acf)")
self.lineEdit_filename.setText(fname[0])
class Robot1857v2(CRobot):
def instance(self):
UUID = uuid.uuid4().hex
return Robot1857v2(Name=ROBOT_NAME, UUID=UUID)
def __init__(self, Name, UUID):
super(__class__, self).__init__(Name, UUID)
self.parent = None
self.단위투자금 = 100 * 10000
self.매수방법 = '00'
self.매도방법 = '00'
self.시장가 = '03'
self.포트폴리오수 = 10
self.trailstop = 0.01
self.ACF파일 = ''
self.일괄매도시각 = '15:15:00'
self.매수거래시간STR = '''09:00:00-11:00:00,
12:00:00-13:00:00,
14:00:00-15:20:00'''
self.매도거래시간STR = '''09:00:00-11:00:00,
12:00:00-13:00:00,
14:00:00-15:20:00'''
self.매수거래중 = False
self.매도거래중 = False
self.금일매도종목 = []
self.주문번호리스트 = []
self.매수Lock = dict()
self.매도Lock = dict()
self.QueryInit()
self.clock = None
self.전량매도 = False
def QueryInit(self):
self.XQ_t1857 = None
self.XR_S3_ = None
self.XR_K3_ = None
self.QA_CSPAT00600 = None
self.XR_SC1 = None # 체결
def modal(self, parent):
ui = CUIRobot1857v2(parent=parent)
ui.setModal(True)
ui.lineEdit_name.setText(self.Name)
ui.lineEdit_unit.setText(str(self.단위투자금 // 10000))
ui.lineEdit_trailstop.setText(str(self.trailstop))
ui.lineEdit_portsize.setText(str(self.포트폴리오수))
ui.comboBox_buy_sHogaGb.setCurrentIndex(ui.comboBox_buy_sHogaGb.findText(self.매수방법, flags=Qt.MatchContains))
ui.comboBox_sell_sHogaGb.setCurrentIndex(ui.comboBox_sell_sHogaGb.findText(self.매도방법, flags=Qt.MatchContains))
ui.lineEdit_filename.setText(self.ACF파일)
ui.plainTextEdit_buytime.setPlainText(self.매수거래시간STR)
ui.plainTextEdit_selltime.setPlainText(self.매도거래시간STR)
ui.lineEdit_sellall.setText(self.일괄매도시각)
r = ui.exec_()
if r == 1:
self.Name = ui.lineEdit_name.text().strip()
self.단위투자금 = int(ui.lineEdit_unit.text().strip()) * 10000
self.매수방법 = ui.comboBox_buy_sHogaGb.currentText().strip()[0:2]
self.매도방법 = ui.comboBox_sell_sHogaGb.currentText().strip()[0:2]
self.포트폴리오수 = int(ui.lineEdit_portsize.text().strip())
self.ACF파일 = ui.lineEdit_filename.text().strip()
self.trailstop = float(ui.lineEdit_trailstop.text().strip())
self.매수거래시간STR = ui.plainTextEdit_buytime.toPlainText().strip()
self.매도거래시간STR = ui.plainTextEdit_selltime.toPlainText().strip()
매수거래시간1 = self.매수거래시간STR
매수거래시간2 = [x.strip() for x in 매수거래시간1.split(',')]
result = []
for temp in 매수거래시간2:
result.append([x.strip() for x in temp.split('-')])
self.매수거래시간 = result
매도거래시간1 = self.매도거래시간STR
매도거래시간2 = [x.strip() for x in 매도거래시간1.split(',')]
result = []
for temp in 매도거래시간2:
result.append([x.strip() for x in temp.split('-')])
self.매도거래시간 = result
self.일괄매도시각 = ui.lineEdit_sellall.text().strip()
return r
def OnReceiveMessage(self, systemError, messageCode, message):
일자 = "{:%Y-%m-%d %H:%M:%S.%f}".format(datetime.datetime.now())
클래스이름 = self.__class__.__name__
print(일자, 클래스이름, systemError, messageCode, message)
def OnReceiveData(self, szTrCode, result):
# 종목검색
if szTrCode == 't1857':
if self.running:
식별자, 검색종목수, 포착시간, 실시간키, df = result
if 식별자 == self.XQ_t1857.식별자:
for idx, row in df[['종목코드','종목상태']].iterrows():
code, flag = row
if type(code) == str:
if code in self.kospi_codes and flag in ['N','R']:
if type(self.XR_S3_) is not type(None):
self.XR_S3_.AdviseRealData(종목코드=code)
if code in self.kospi_codes and flag in ['O']:
if type(self.XR_S3_) is not type(None):
if code not in self.portfolio.keys() and code not in self.매수Lock.keys() and code not in self.매도Lock.keys():
self.XR_S3_.UnadviseRealDataWithKey(종목코드=code)
if code in self.kosdaq_codes and flag in ['N','R']:
if type(self.XR_K3_) is not type(None):
self.XR_K3_.AdviseRealData(종목코드=code)
if code in self.kosdaq_codes and flag in ['O']:
if type(self.XR_K3_) is not type(None):
if code not in self.portfolio.keys() and code not in self.매수Lock.keys() and code not in self.매도Lock.keys():
self.XR_K3_.UnadviseRealDataWithKey(종목코드=code)
# 현재 가지고 있는 포트폴리오의 실시간데이타를 받는다.
for code in self.portfolio.keys():
if code in self.kospi_codes:
if type(self.XR_S3_) is not type(None):
self.XR_S3_.AdviseRealData(종목코드=code)
if code in self.kosdaq_codes:
if type(self.XR_K3_) is not type(None):
self.XR_K3_.AdviseRealData(종목코드=code)
# 체결
if szTrCode == 'CSPAT00600':
df, df1 = result
if len(df1) > 0:
주문번호 = df1['주문번호'].values[0]
if 주문번호 != '0':
# 주문번호처리
self.주문번호리스트.append(str(주문번호))
def OnReceiveSearchRealData(self, szTrCode, lst):
식별자, result = lst
if 식별자 == self.XQ_t1857.식별자:
try:
code = result['종목코드']
flag = result['종목상태']
if type(code) == str:
if code in self.kospi_codes and flag in ['N', 'R']:
if type(self.XR_S3_) is not type(None):
self.XR_S3_.AdviseRealData(종목코드=code)
if code in self.kospi_codes and flag in ['O']:
if type(self.XR_S3_) is not type(None):
if code not in self.portfolio.keys() and code not in self.매수Lock.keys() and code not in self.매도Lock.keys():
self.XR_S3_.UnadviseRealDataWithKey(종목코드=code)
if code in self.kosdaq_codes and flag in ['N', 'R']:
if type(self.XR_K3_) is not type(None):
self.XR_K3_.AdviseRealData(종목코드=code)
if code in self.kosdaq_codes and flag in ['O']:
if type(self.XR_K3_) is not type(None):
if code not in self.portfolio.keys() and code not in self.매수Lock.keys() and code not in self.매도Lock.keys():
self.XR_K3_.UnadviseRealDataWithKey(종목코드=code)
except Exception as e:
print(e)
finally:
pass
def OnReceiveRealData(self, szTrCode, result):
if szTrCode == 'SC1':
체결시각 = result['체결시각']
단축종목번호 = result['단축종목번호'].strip().replace('A','')
종목명 = result['종목명']
매매구분 = result['매매구분']
주문번호 = result['주문번호']
체결번호 = result['체결번호']
주문수량 = int(result['주문수량'])
주문가격 = int(result['주문가격'])
체결수량 = int(result['체결수량'])
체결가격 = int(result['체결가격'])
주문평균체결가격 = int(result['주문평균체결가격'])
주문계좌번호 = result['주문계좌번호']
# 내가 주문한 것이 체결된 경우 처리
if 주문번호 in self.주문번호리스트:
if 매매구분 == '1' or 매매구분 == 1: # 매도
P = self.portfolio.get(단축종목번호, None)
if P != None:
P.수량 = P.수량 - 체결수량
if P.수량 == 0:
self.portfolio.pop(단축종목번호)
self.매도Lock.pop(단축종목번호)
#TODO: 빠른거래시 화면의 응답속도도 영향을 주므로 일단은 커멘트처리
# self.parent.RobotView()
# ToTelegram(__class__.__name__ + "매도 : %s 체결수량:%s 체결가격:%s" % (종목명, 주문수량, 주문평균체결가격))
else:
print("매도 주문이 없는데 매도가 들어옴")
if 매매구분 == '2' or 매매구분 == 2: # 매수
P = self.portfolio.get(단축종목번호, None)
if P== None:
self.portfolio[단축종목번호] = CPortStock(종목코드=단축종목번호, 종목명=종목명, 매수가=주문평균체결가격, 수량=체결수량, 매수일=datetime.datetime.now())
if P.수량 == 주문수량:
self.매수Lock.pop(단축종목번호)
else:
P.수량 = P.수량 + 체결수량
if P.수량 == 주문수량:
self.매수Lock.pop(단축종목번호)
# 조건검색과 체결사이에 시간 간격차 때문에 등록이 안되어 있을수도 있음
# 체결된 종목은 실시간 가격을 받는다.
if 단축종목번호 in self.kospi_codes:
if type(self.XR_S3_) is not type(None):
self.XR_S3_.AdviseRealData(종목코드=단축종목번호)
if 단축종목번호 in self.kosdaq_codes:
if type(self.XR_K3_) is not type(None):
self.XR_K3_.AdviseRealData(종목코드=단축종목번호)
if self.parent is not None:
self.parent.RobotView()
일자 = "{:%Y-%m-%d}".format(datetime.datetime.now())
data = [self.Name, self.UUID, 일자, 체결시각, 단축종목번호, 종목명, 매매구분, 주문번호, 체결번호, 주문수량, 주문가격, 체결수량, 체결가격, 주문평균체결가격]
self.체결기록(data=data)
if szTrCode in ['K3_', 'S3_']:
if self.매수거래중 == True or self.매도거래중 == True:
단축코드 = result['단축코드']
try:
종목명 = self.종목코드테이블.query("단축코드=='%s'" % 단축코드)['종목명'].values[0]
except Exception as e:
종목명 = ''
현재가 = result['현재가']
고가 = result['고가']
수량 = self.단위투자금 // 현재가
현재시각 = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if self.parent is not None:
self.parent.statusbar.showMessage("[%s]%s %s %s" % (현재시각, 단축코드, 종목명, 현재가))
P = self.portfolio.get(단축코드, None)
매수락 = self.매수Lock.get(단축코드, None)
매도락 = self.매도Lock.get(단축코드, None)
if P == None:
if 단축코드 not in self.금일매도종목 and 수량 > 0:
if self.매수거래중 == True:
lst = set(self.portfolio.keys()).union(self.매수Lock.keys())
if len(lst) < self.포트폴리오수:
if 매수락 == None:
self.매수Lock[단축코드] = ''
# 포트폴리오에 없으므로 현재가에 매수
self.QA_CSPAT00600.Query(계좌번호=self.계좌번호,입력비밀번호=self.비밀번호,종목번호=단축코드,주문수량=수량,주문가=현재가,매매구분=self.매수,
호가유형코드=self.매수방법, 신용거래코드=self.신용거래코드,주문조건구분=self.조건없음)
# ToTelegram(__class__.__name__ + "매수주문 : %s %s 주문수량:%s 주문가격:%s" % (단축코드, 종목명, 수량, 현재가))
else:
if self.매도거래중 == True:
if 현재가 > P.매수후고가:
P.매수후고가 = 현재가
if 매도락 == None:
수량 = P.수량
if 현재가 < P.매수후고가 * (1-self.trailstop):
self.매도Lock[단축코드] = ''
self.금일매도종목.append(단축코드)
self.QA_CSPAT00600.Query(계좌번호=self.계좌번호,입력비밀번호=self.비밀번호,종목번호=단축코드,주문수량=수량,주문가=현재가,매매구분=self.매도,
호가유형코드=self.시장가, 신용거래코드=self.신용거래코드,주문조건구분=self.조건없음)
#TODO: 주문이 연속적으로 나가는 경우
# 텔레그렘의 메세지 전송속도가 약 1초이기 때문에
# 이베스트에서 오는 신호를 놓치는 경우가 있다.
# ToTelegram(__class__.__name__ + "매도주문 : %s %s 주문수량:%s 주문가격:%s" % (단축코드, 종목명, 수량, 현재가))
def OnClockTick(self):
current = datetime.datetime.now()
current_str = current.strftime('%H:%M:%S')
거래중 = False
for t in self.매수거래시간:
if t[0] <= current_str and current_str <= t[1]:
거래중 = True
self.매수거래중 = 거래중
거래중 = False
for t in self.매도거래시간:
if t[0] <= current_str and current_str <= t[1]:
거래중 = True
self.매도거래중 = 거래중
#TODO: 특정시간의 강제매도
#------------------------------------------------------------
if self.일괄매도시각.strip() is not "":
if self.일괄매도시각 < current_str and self.전량매도 == False:
self.전량매도 = True
#TODO:취소주문 ???
for k,v in self.portfolio.items():
단축코드 = v.종목코드
수량 = v.수량
종목명 = v.종목명
주문가 = '0'
호가유형코드 = '03'
self.매도Lock[단축코드] = ''
self.금일매도종목.append(단축코드)
self.QA_CSPAT00600.Query(계좌번호=self.계좌번호, 입력비밀번호=self.비밀번호, 종목번호=단축코드, 주문수량=수량, 주문가=주문가, 매매구분=self.매도,
호가유형코드=호가유형코드, 신용거래코드=self.신용거래코드, 주문조건구분=self.조건없음)
# ToTelegram(__class__.__name__ + "일괄매도 : %s %s 주문수량:%s 주문가격:%s" % (단축코드, 종목명, 수량, 주문가))
def Run(self, flag=True, parent=None):
if self.running == flag:
return
self.parent = parent
self.running = flag
ret = 0
if flag == True:
ToTelegram("로직 [%s]을 시작합니다." % (__class__.__name__))
self.clock = QtCore.QTimer()
self.clock.timeout.connect(self.OnClockTick)
self.clock.start(1000)
self.전량매도 = False
self.금일매도종목 = []
self.주문번호리스트 = []
self.매수Lock = dict()
self.매도Lock = dict()
with sqlite3.connect(self.DATABASE) as conn:
query = 'select 단축코드,종목명,ETF구분,구분 from 종목코드'
self.종목코드테이블 = pdsql.read_sql_query(query, con=conn)
self.kospi_codes = self.종목코드테이블.query("구분=='1'")['단축코드'].values.tolist()
self.kosdaq_codes = self.종목코드테이블.query("구분=='2'")['단축코드'].values.tolist()
self.XQ_t1857 = t1857(parent=self, 식별자=uuid.uuid4().hex)
self.XQ_t1857.Query(실시간구분='1', 종목검색구분='F', 종목검색입력값=self.ACF파일)
self.QA_CSPAT00600 = CSPAT00600(parent=self)
self.XR_S3_ = S3_(parent=self)
self.XR_K3_ = K3_(parent=self)
self.XR_SC1 = SC1(parent=self)
self.XR_SC1.AdviseRealData()
else:
if self.XQ_t1857 is not None:
self.XQ_t1857.RemoveService()
self.XQ_t1857 = None
if self.clock is not None:
try:
self.clock.stop()
except Exception as e:
pass
finally:
self.clock = None
try:
if self.XR_S3_ != None:
self.XR_S3_.UnadviseRealData()
except Exception as e:
pass
finally:
self.XR_S3_ = None
try:
if self.XR_K3_ != None:
self.XR_K3_.UnadviseRealData()
except Exception as e:
pass
finally:
self.XR_K3_ = None
try:
if self.XR_SC1 != None:
self.XR_SC1.UnadviseRealData()
except Exception as e:
pass
finally:
self.XR_SC1 = None
self.QueryInit()
def robot_loader():
UUID = uuid.uuid4().hex
robot = Robot1857v2(Name=ROBOT_NAME, UUID=UUID)
return robot | ko | 0.984185 | # from pandas.lib import Timestamp 09:00:00-11:00:00, 12:00:00-13:00:00, 14:00:00-15:20:00 09:00:00-11:00:00, 12:00:00-13:00:00, 14:00:00-15:20:00 # 체결 # 종목검색 # 현재 가지고 있는 포트폴리오의 실시간데이타를 받는다. # 체결 # 주문번호처리 # 내가 주문한 것이 체결된 경우 처리 # 매도 #TODO: 빠른거래시 화면의 응답속도도 영향을 주므로 일단은 커멘트처리 # self.parent.RobotView() # ToTelegram(__class__.__name__ + "매도 : %s 체결수량:%s 체결가격:%s" % (종목명, 주문수량, 주문평균체결가격)) # 매수 # 조건검색과 체결사이에 시간 간격차 때문에 등록이 안되어 있을수도 있음 # 체결된 종목은 실시간 가격을 받는다. # 포트폴리오에 없으므로 현재가에 매수 # ToTelegram(__class__.__name__ + "매수주문 : %s %s 주문수량:%s 주문가격:%s" % (단축코드, 종목명, 수량, 현재가)) #TODO: 주문이 연속적으로 나가는 경우 # 텔레그렘의 메세지 전송속도가 약 1초이기 때문에 # 이베스트에서 오는 신호를 놓치는 경우가 있다. # ToTelegram(__class__.__name__ + "매도주문 : %s %s 주문수량:%s 주문가격:%s" % (단축코드, 종목명, 수량, 현재가)) #TODO: 특정시간의 강제매도 #------------------------------------------------------------ #TODO:취소주문 ??? # ToTelegram(__class__.__name__ + "일괄매도 : %s %s 주문수량:%s 주문가격:%s" % (단축코드, 종목명, 수량, 주문가)) | 2.044594 | 2 |
p2ner/components/overlay/distributedclient/distributedclient/messages/peerlistmessage.py | schristakidis/p2ner | 2 | 6630812 | <reponame>schristakidis/p2ner
# -*- coding: utf-8 -*-
# Copyright 2012 <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from p2ner.base.ControlMessage import ControlMessage, trap_sent,probe_all,BaseControlMessage
from p2ner.base.Consts import MessageCodes as MSG
from construct import Container
class AskInitNeighs(BaseControlMessage):
type = "sidmessage"
code = MSG.ASK_INIT_NEIGHS
ack = True
@classmethod
def send(cls, sid, peer, out):
d=out.send(cls, Container(streamid = sid), peer)
d.addErrback(trap_sent)
return d
class PeerListMessage(ControlMessage):
type = "peerlistmessage"
code = MSG.SEND_IP_LIST
ack = True
def trigger(self, message):
if self.stream.id != message.streamid:
return False
return True
def action(self, message, peer):
self.log.debug('received peerList message from %s for %s',peer,str(message.peer))
for p in message.peer:
self['overlay'].checkSendAddNeighbour(p,peer)
@classmethod
def send(cls, sid, peerlist, peer, out):
return out.send(cls, Container(streamid=sid, peer=peerlist), peer).addErrback(trap_sent)
class PeerListPMessage(ControlMessage):
type = "peerlistmessage"
code = MSG.SEND_IP_LIST_PRODUCER
ack = True
def trigger(self, message):
if self.stream.id != message.streamid:
return False
return True
def action(self, message, peer):
self.log.debug('received peerList message from %s for %s',peer,str(message.peer))
print 'receive peer list message for producer from ',peer,' for ',message.peer
inpeer=self.root.getPeerObject()
bw=int(self.trafficPipe.callSimple('getBW')/1024)
for p in message.peer:
p.learnedFrom=peer
print 'sending add producer message to ',p
AddProducerMessage.send(self.stream.id,0,bw,inpeer,p,self['overlay'].addProducer,self['overlay'].failedProducer,self.root.controlPipe)
class AddNeighbourMessage(ControlMessage):
type = "overlaymessage"
code = MSG.ADD_NEIGH
ack = True
def trigger(self, message):
if self.stream.id != message.streamid:
return False
return True
def action(self, message, peer):
peer.dataPort=message.port
peer.reportedBW=message.bw
if message.peer:
peer.lip=message.peer.ip
peer.lport=message.peer.port
peer.ldataPort=message.peer.dataPort
peer.hpunch=message.peer.hpunch
self.log.debug('received add neigh message from %s',peer)
print 'received neigh message from ',peer
self['overlay'].checkAcceptNeighbour(peer)
@classmethod
def send(cls, id,port,bw, inpeer, peer, out):
msg = Container(streamid=id,port=int(port), bw=bw,peer=inpeer)
d=out.send(cls, msg, peer)
d.addErrback(trap_sent)
return d
class ConfirmNeighbourMessage(ControlMessage):
type = "sidmessage"
code = MSG.CONFIRM_NEIGH
ack = True
def trigger(self, message):
if self.stream.id != message.streamid:
return False
return True
def action(self, message, peer):
self['overlay'].addNeighbour(peer)
@classmethod
def send(cls, sid, peer, out):
d=out.send(cls, Container(streamid = sid), peer)
d.addErrback(trap_sent)
return d
class AddProducerMessage(BaseControlMessage):
type = "overlaymessage"
code = MSG.ADD_PRODUCER
ack = True
@classmethod
def send(cls, id,port,bw, inpeer, peer,suc_func,err_func, out):
msg = Container(streamid=id,port=port, bw=bw,peer=inpeer)
d=out.send(cls, msg, peer)
d.addErrback(probe_all,suc_func=suc_func,err_func=err_func)
return d
class PingMessage(ControlMessage):
type='basemessage'
code=MSG.ADDNEIGH_RTT
ack=True
def trigger(self,message):
return True
def action(self,message,peer):
return
@classmethod
def send(cls, peer, out):
out.send(cls,Container(message=None),peer).addErrback(trap_sent)
class GetNeighsMessage(ControlMessage):
type='basemessage'
code=MSG.GET_NEIGHS
ack=True
def trigger(self,message):
if message.message!=self.stream.id:
return False
return True
def action(self,message,peer):
self['overlay'].returnNeighs(peer)
class ReturnNeighsMessage(BaseControlMessage):
type='swappeerlistmessage'
code=MSG.RETURN_NEIGHS
ack=True
@classmethod
def send(cls, sid, peerlist, peer, out):
return out.send(cls, Container(streamid=sid, peer=peerlist), peer).addErrback(trap_sent)
class SuggestNewPeerMessage(ControlMessage):
type = "peerlistmessage"
code = MSG.SUGGEST_NEW_PEER
ack = True
def trigger(self, message):
if self.stream.id != message.streamid:
return False
return True
def action(self, message, peer):
self.log.debug('received suggest new peer message from %s',peer)
self['overlay'].suggestNewPeer(peer,message.peer)
@classmethod
def send(cls, sid, peerlist, peer, out, suc_func=None,err_func=None):
return out.send(cls, Container(streamid=sid, peer=peerlist), peer).addErrback(probe_all,err_func=err_func,suc_func=suc_func)
class SuggestMessage(ControlMessage):
type = "peerlistmessage"
code = MSG.SUGGEST
ack = True
def trigger(self, message):
if self.stream.id != message.streamid:
return False
return True
def action(self, message, peer):
self.log.debug('received suggest message from %s',peer)
self['overlay'].availableNewPeers(peer,message.peer)
@classmethod
def send(cls, sid, peerlist, peer, out):
return out.send(cls, Container(streamid=sid, peer=peerlist), peer).addErrback(trap_sent)
class ValidateNeighboursMessage(ControlMessage):
type = "sidmessage"
code = MSG.VALIDATE_NEIGHS
ack = True
def trigger(self, message):
if self.stream.id != message.streamid:
return False
return True
def action(self, message, peer):
self['overlay'].ansValidateNeighs(peer)
@classmethod
def send(cls, sid, peer, out):
d=out.send(cls, Container(streamid = sid), peer)
d.addErrback(trap_sent)
return d
class ReplyValidateNeighboursMessage(ControlMessage):
type='lockmessage'
code = MSG.REPLY_VALIDATE_NEIGHS
ack=True
def trigger(self, message):
if self.stream.id != message.streamid:
return False
return True
def action(self,message,peer):
self['overlay'].checkValidateNeighs(message.lock,peer)
return
@classmethod
def send(cls, sid, ans , peer, out):
return out.send(cls, Container(streamid=sid,swapid=0, lock=ans), peer).addErrback(trap_sent)
| # -*- coding: utf-8 -*-
# Copyright 2012 <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from p2ner.base.ControlMessage import ControlMessage, trap_sent,probe_all,BaseControlMessage
from p2ner.base.Consts import MessageCodes as MSG
from construct import Container
class AskInitNeighs(BaseControlMessage):
type = "sidmessage"
code = MSG.ASK_INIT_NEIGHS
ack = True
@classmethod
def send(cls, sid, peer, out):
d=out.send(cls, Container(streamid = sid), peer)
d.addErrback(trap_sent)
return d
class PeerListMessage(ControlMessage):
type = "peerlistmessage"
code = MSG.SEND_IP_LIST
ack = True
def trigger(self, message):
if self.stream.id != message.streamid:
return False
return True
def action(self, message, peer):
self.log.debug('received peerList message from %s for %s',peer,str(message.peer))
for p in message.peer:
self['overlay'].checkSendAddNeighbour(p,peer)
@classmethod
def send(cls, sid, peerlist, peer, out):
return out.send(cls, Container(streamid=sid, peer=peerlist), peer).addErrback(trap_sent)
class PeerListPMessage(ControlMessage):
type = "peerlistmessage"
code = MSG.SEND_IP_LIST_PRODUCER
ack = True
def trigger(self, message):
if self.stream.id != message.streamid:
return False
return True
def action(self, message, peer):
self.log.debug('received peerList message from %s for %s',peer,str(message.peer))
print 'receive peer list message for producer from ',peer,' for ',message.peer
inpeer=self.root.getPeerObject()
bw=int(self.trafficPipe.callSimple('getBW')/1024)
for p in message.peer:
p.learnedFrom=peer
print 'sending add producer message to ',p
AddProducerMessage.send(self.stream.id,0,bw,inpeer,p,self['overlay'].addProducer,self['overlay'].failedProducer,self.root.controlPipe)
class AddNeighbourMessage(ControlMessage):
type = "overlaymessage"
code = MSG.ADD_NEIGH
ack = True
def trigger(self, message):
if self.stream.id != message.streamid:
return False
return True
def action(self, message, peer):
peer.dataPort=message.port
peer.reportedBW=message.bw
if message.peer:
peer.lip=message.peer.ip
peer.lport=message.peer.port
peer.ldataPort=message.peer.dataPort
peer.hpunch=message.peer.hpunch
self.log.debug('received add neigh message from %s',peer)
print 'received neigh message from ',peer
self['overlay'].checkAcceptNeighbour(peer)
@classmethod
def send(cls, id,port,bw, inpeer, peer, out):
msg = Container(streamid=id,port=int(port), bw=bw,peer=inpeer)
d=out.send(cls, msg, peer)
d.addErrback(trap_sent)
return d
class ConfirmNeighbourMessage(ControlMessage):
type = "sidmessage"
code = MSG.CONFIRM_NEIGH
ack = True
def trigger(self, message):
if self.stream.id != message.streamid:
return False
return True
def action(self, message, peer):
self['overlay'].addNeighbour(peer)
@classmethod
def send(cls, sid, peer, out):
d=out.send(cls, Container(streamid = sid), peer)
d.addErrback(trap_sent)
return d
class AddProducerMessage(BaseControlMessage):
type = "overlaymessage"
code = MSG.ADD_PRODUCER
ack = True
@classmethod
def send(cls, id,port,bw, inpeer, peer,suc_func,err_func, out):
msg = Container(streamid=id,port=port, bw=bw,peer=inpeer)
d=out.send(cls, msg, peer)
d.addErrback(probe_all,suc_func=suc_func,err_func=err_func)
return d
class PingMessage(ControlMessage):
type='basemessage'
code=MSG.ADDNEIGH_RTT
ack=True
def trigger(self,message):
return True
def action(self,message,peer):
return
@classmethod
def send(cls, peer, out):
out.send(cls,Container(message=None),peer).addErrback(trap_sent)
class GetNeighsMessage(ControlMessage):
type='basemessage'
code=MSG.GET_NEIGHS
ack=True
def trigger(self,message):
if message.message!=self.stream.id:
return False
return True
def action(self,message,peer):
self['overlay'].returnNeighs(peer)
class ReturnNeighsMessage(BaseControlMessage):
type='swappeerlistmessage'
code=MSG.RETURN_NEIGHS
ack=True
@classmethod
def send(cls, sid, peerlist, peer, out):
return out.send(cls, Container(streamid=sid, peer=peerlist), peer).addErrback(trap_sent)
class SuggestNewPeerMessage(ControlMessage):
type = "peerlistmessage"
code = MSG.SUGGEST_NEW_PEER
ack = True
def trigger(self, message):
if self.stream.id != message.streamid:
return False
return True
def action(self, message, peer):
self.log.debug('received suggest new peer message from %s',peer)
self['overlay'].suggestNewPeer(peer,message.peer)
@classmethod
def send(cls, sid, peerlist, peer, out, suc_func=None,err_func=None):
return out.send(cls, Container(streamid=sid, peer=peerlist), peer).addErrback(probe_all,err_func=err_func,suc_func=suc_func)
class SuggestMessage(ControlMessage):
type = "peerlistmessage"
code = MSG.SUGGEST
ack = True
def trigger(self, message):
if self.stream.id != message.streamid:
return False
return True
def action(self, message, peer):
self.log.debug('received suggest message from %s',peer)
self['overlay'].availableNewPeers(peer,message.peer)
@classmethod
def send(cls, sid, peerlist, peer, out):
return out.send(cls, Container(streamid=sid, peer=peerlist), peer).addErrback(trap_sent)
class ValidateNeighboursMessage(ControlMessage):
type = "sidmessage"
code = MSG.VALIDATE_NEIGHS
ack = True
def trigger(self, message):
if self.stream.id != message.streamid:
return False
return True
def action(self, message, peer):
self['overlay'].ansValidateNeighs(peer)
@classmethod
def send(cls, sid, peer, out):
d=out.send(cls, Container(streamid = sid), peer)
d.addErrback(trap_sent)
return d
class ReplyValidateNeighboursMessage(ControlMessage):
type='lockmessage'
code = MSG.REPLY_VALIDATE_NEIGHS
ack=True
def trigger(self, message):
if self.stream.id != message.streamid:
return False
return True
def action(self,message,peer):
self['overlay'].checkValidateNeighs(message.lock,peer)
return
@classmethod
def send(cls, sid, ans , peer, out):
return out.send(cls, Container(streamid=sid,swapid=0, lock=ans), peer).addErrback(trap_sent) | en | 0.848736 | # -*- coding: utf-8 -*- # Copyright 2012 <NAME>, <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 2.01981 | 2 |
hs_access_control/management/commands/user.py | tommac7/hydroshare | 0 | 6630813 | <reponame>tommac7/hydroshare
"""
This prints the active permissions of an access control relationship between a user and a resource.
This is invaluable for access control debugging.
"""
from django.core.management.base import BaseCommand
from hs_access_control.models.privilege import PrivilegeCodes
from hs_access_control.management.utilities import user_from_name
def usage():
print("User usage:")
print(" user {username}")
print("Where:")
print(" {username} is a user name.")
def shorten(title, length):
if len(title) <= length:
return title
else:
return title[0:19]+'...'
class Command(BaseCommand):
help = """Print access control provenance."""
def add_arguments(self, parser):
# a command to execute
parser.add_argument('username', type=str)
def handle(self, *args, **options):
if options['username'] is None:
usage()
exit(1)
username = options['username']
user = user_from_name(username)
if user is None:
usage()
exit(1)
print("resources: [on my resources landing page]")
print(" OWNED by user {}:".format(user.username))
resources = user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.OWNER, via_user=True, via_group=False, via_community=False)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" EDITABLE by user {}:".format(user.username))
resources = user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.CHANGE, via_user=True, via_group=False, via_community=False)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" VIEWABLE by user {}:".format(user.username))
resources = user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.VIEW, via_user=True, via_group=False, via_community=False)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print("groups: [on my resources landing page]")
print(" OWNED by user {}:".format(user.username))
groups = user.uaccess.get_groups_with_explicit_access(PrivilegeCodes.OWNER)
for g in groups:
print(" group '{}' (id={})".format(g.name, g.id))
print(" PUBLISHED and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__published=True)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" DISCOVERABLE and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__discoverable=True)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" EDITABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" VIEWABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" VIEWABLE by group '{}' (id={})".format(g.name, g.id))
print(" EDITABLE by user {}:".format(user.username))
groups = user.uaccess.get_groups_with_explicit_access(PrivilegeCodes.CHANGE)
for g in groups:
print(" group '{}' (id={})".format(g.name, g.id))
print(" PUBLISHED and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__published=True)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" DISCOVERABLE and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__discoverable=True)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" EDITABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" VIEWABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" VIEWABLE by group '{}' (id={})".format(g.name, g.id))
print(" VIEWABLE by {}:".format(user.username))
groups = user.uaccess.get_groups_with_explicit_access(PrivilegeCodes.VIEW)
for g in groups:
print(" group '{}' (id={})".format(g.name, g.id))
print(" PUBLISHED and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__published=True)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" DISCOVERABLE and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__discoverable=True)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" EDITABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" VIEWABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print("communities: [on community landing page]")
print(" OWNED by {}".format(user.username))
communities = user.uaccess.get_communities_with_explicit_access(PrivilegeCodes.OWNER)
for c in communities:
print(" community '{}' (id={})".format(c.name, c.id))
print(" {} has EDIT membership:".format(user.username))
communities = user.uaccess.get_communities_with_explicit_membership(PrivilegeCodes.CHANGE)
for c in communities:
print(" community '{}' (id={})".format(c.name, c.id))
print(" groups where {} is granted edit:".format(user.username))
groups = c.get_groups_with_explicit_access(PrivilegeCodes.CHANGE)
for g in groups:
print(" group '{}' (id={})".format(g.name, g.id))
print(" PUBLISHED and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__published=True)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" DISCOVERABLE and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__discoverable=True,
raccess__published=False)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" EDITABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" VIEWABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" groups where {} is granted view:".format(user.username))
groups = c.get_groups_with_explicit_access(PrivilegeCodes.VIEW)
for g in groups:
print(" group '{}' (id={})".format(g.name, g.id))
print(" PUBLISHED and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__published=True)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" DISCOVERABLE and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__discoverable=True,
raccess__published=False)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" EDITABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" VIEWABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" {} has VIEW membership:".format(user.username))
communities = user.uaccess.get_communities_with_explicit_membership(PrivilegeCodes.VIEW)
for c in communities:
print(" community '{}' (id={})".format(c.name, c.id))
print(" groups where {} has edit:".format(user.username))
groups = c.get_groups_with_explicit_access(PrivilegeCodes.CHANGE)
for g in groups:
print(" group '{}' (id={})".format(g.name, g.id))
print(" PUBLISHED and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__published=True)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20),
r.short_id))
print(" DISCOVERABLE and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__discoverable=True,
raccess__published=False)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20),
r.short_id))
print(" EDITABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20),
r.short_id))
print(" VIEWABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20),
r.short_id))
print(" groups where {} has view:".format(user.username))
groups = c.get_groups_with_explicit_access(PrivilegeCodes.VIEW)
for g in groups:
print(" group '{}' (id={})".format(g.name, g.id))
print(" PUBLISHED and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__published=True)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20),
r.short_id))
print(" DISCOVERABLE and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__discoverable=True,
raccess__published=False)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20),
r.short_id))
print(" EDITABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20),
r.short_id))
print(" VIEWABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20),
r.short_id))
| """
This prints the active permissions of an access control relationship between a user and a resource.
This is invaluable for access control debugging.
"""
from django.core.management.base import BaseCommand
from hs_access_control.models.privilege import PrivilegeCodes
from hs_access_control.management.utilities import user_from_name
def usage():
print("User usage:")
print(" user {username}")
print("Where:")
print(" {username} is a user name.")
def shorten(title, length):
if len(title) <= length:
return title
else:
return title[0:19]+'...'
class Command(BaseCommand):
help = """Print access control provenance."""
def add_arguments(self, parser):
# a command to execute
parser.add_argument('username', type=str)
def handle(self, *args, **options):
if options['username'] is None:
usage()
exit(1)
username = options['username']
user = user_from_name(username)
if user is None:
usage()
exit(1)
print("resources: [on my resources landing page]")
print(" OWNED by user {}:".format(user.username))
resources = user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.OWNER, via_user=True, via_group=False, via_community=False)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" EDITABLE by user {}:".format(user.username))
resources = user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.CHANGE, via_user=True, via_group=False, via_community=False)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" VIEWABLE by user {}:".format(user.username))
resources = user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.VIEW, via_user=True, via_group=False, via_community=False)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print("groups: [on my resources landing page]")
print(" OWNED by user {}:".format(user.username))
groups = user.uaccess.get_groups_with_explicit_access(PrivilegeCodes.OWNER)
for g in groups:
print(" group '{}' (id={})".format(g.name, g.id))
print(" PUBLISHED and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__published=True)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" DISCOVERABLE and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__discoverable=True)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" EDITABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" VIEWABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" VIEWABLE by group '{}' (id={})".format(g.name, g.id))
print(" EDITABLE by user {}:".format(user.username))
groups = user.uaccess.get_groups_with_explicit_access(PrivilegeCodes.CHANGE)
for g in groups:
print(" group '{}' (id={})".format(g.name, g.id))
print(" PUBLISHED and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__published=True)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" DISCOVERABLE and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__discoverable=True)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" EDITABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" VIEWABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" VIEWABLE by group '{}' (id={})".format(g.name, g.id))
print(" VIEWABLE by {}:".format(user.username))
groups = user.uaccess.get_groups_with_explicit_access(PrivilegeCodes.VIEW)
for g in groups:
print(" group '{}' (id={})".format(g.name, g.id))
print(" PUBLISHED and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__published=True)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" DISCOVERABLE and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__discoverable=True)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" EDITABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" VIEWABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print("communities: [on community landing page]")
print(" OWNED by {}".format(user.username))
communities = user.uaccess.get_communities_with_explicit_access(PrivilegeCodes.OWNER)
for c in communities:
print(" community '{}' (id={})".format(c.name, c.id))
print(" {} has EDIT membership:".format(user.username))
communities = user.uaccess.get_communities_with_explicit_membership(PrivilegeCodes.CHANGE)
for c in communities:
print(" community '{}' (id={})".format(c.name, c.id))
print(" groups where {} is granted edit:".format(user.username))
groups = c.get_groups_with_explicit_access(PrivilegeCodes.CHANGE)
for g in groups:
print(" group '{}' (id={})".format(g.name, g.id))
print(" PUBLISHED and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__published=True)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" DISCOVERABLE and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__discoverable=True,
raccess__published=False)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" EDITABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" VIEWABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" groups where {} is granted view:".format(user.username))
groups = c.get_groups_with_explicit_access(PrivilegeCodes.VIEW)
for g in groups:
print(" group '{}' (id={})".format(g.name, g.id))
print(" PUBLISHED and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__published=True)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" DISCOVERABLE and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__discoverable=True,
raccess__published=False)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" EDITABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" VIEWABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20), r.short_id))
print(" {} has VIEW membership:".format(user.username))
communities = user.uaccess.get_communities_with_explicit_membership(PrivilegeCodes.VIEW)
for c in communities:
print(" community '{}' (id={})".format(c.name, c.id))
print(" groups where {} has edit:".format(user.username))
groups = c.get_groups_with_explicit_access(PrivilegeCodes.CHANGE)
for g in groups:
print(" group '{}' (id={})".format(g.name, g.id))
print(" PUBLISHED and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__published=True)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20),
r.short_id))
print(" DISCOVERABLE and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__discoverable=True,
raccess__published=False)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20),
r.short_id))
print(" EDITABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20),
r.short_id))
print(" VIEWABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20),
r.short_id))
print(" groups where {} has view:".format(user.username))
groups = c.get_groups_with_explicit_access(PrivilegeCodes.VIEW)
for g in groups:
print(" group '{}' (id={})".format(g.name, g.id))
print(" PUBLISHED and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__published=True)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20),
r.short_id))
print(" DISCOVERABLE and in group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.view_resources.filter(raccess__discoverable=True,
raccess__published=False)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20),
r.short_id))
print(" EDITABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20),
r.short_id))
print(" VIEWABLE by group '{}' (id={})".format(g.name, g.id))
resources = g.gaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW)
for r in resources:
print(" resource '{}' ({})".format(shorten(r.title, 20),
r.short_id)) | en | 0.917588 | This prints the active permissions of an access control relationship between a user and a resource. This is invaluable for access control debugging. Print access control provenance. # a command to execute | 2.810711 | 3 |
simulation/utils/geometry/point.py | KITcar-Team/kitcar-gazebo-simulation | 13 | 6630814 | <reponame>KITcar-Team/kitcar-gazebo-simulation
"""Basic point class which is compatible with all needed formats."""
__copyright__ = "KITcar"
# Compatible formats
import geometry_msgs.msg as geometry_msgs
from simulation.utils.geometry.transform import Transform
from simulation.utils.geometry.vector import Vector # Base class
from .frame import validate_and_maintain_frames
class InvalidPointOperationError(Exception):
pass
class Point(Vector):
"""Point subclass of Vector which implements a point.
Compared with its Superclass, this class imposes some restrictions to better fit
the interpretation of a point in the mathematical sense.
Uses vector's initializer.
"""
def to_geometry_msg(self) -> geometry_msgs.Point:
"""To ROS geometry_msg.
Returns:
This point as a geometry_msgs/Point
"""
return geometry_msgs.Point32(x=self.x, y=self.y, z=self.z)
def rotated(self, *args, **kwargs):
raise InvalidPointOperationError("A point cannot be rotated.")
@validate_and_maintain_frames
def __sub__(self, p):
if not type(p) is Vector:
raise InvalidPointOperationError("A point can only be modified by a vector.")
return super().__sub__(p)
@validate_and_maintain_frames
def __add__(self, p):
if not type(p) is Vector:
raise InvalidPointOperationError("A point can only be modified by a vector.")
return super().__add__(p)
@validate_and_maintain_frames
def __rmul__(self, obj: Transform):
"""Right multiplication of a point.
Only defined for transformations.
"""
if type(obj) == Transform:
# Transform * self
return self.__class__(obj * Vector(self))
raise InvalidPointOperationError("A point cannot be scaled.")
| """Basic point class which is compatible with all needed formats."""
__copyright__ = "KITcar"
# Compatible formats
import geometry_msgs.msg as geometry_msgs
from simulation.utils.geometry.transform import Transform
from simulation.utils.geometry.vector import Vector # Base class
from .frame import validate_and_maintain_frames
class InvalidPointOperationError(Exception):
pass
class Point(Vector):
"""Point subclass of Vector which implements a point.
Compared with its Superclass, this class imposes some restrictions to better fit
the interpretation of a point in the mathematical sense.
Uses vector's initializer.
"""
def to_geometry_msg(self) -> geometry_msgs.Point:
"""To ROS geometry_msg.
Returns:
This point as a geometry_msgs/Point
"""
return geometry_msgs.Point32(x=self.x, y=self.y, z=self.z)
def rotated(self, *args, **kwargs):
raise InvalidPointOperationError("A point cannot be rotated.")
@validate_and_maintain_frames
def __sub__(self, p):
if not type(p) is Vector:
raise InvalidPointOperationError("A point can only be modified by a vector.")
return super().__sub__(p)
@validate_and_maintain_frames
def __add__(self, p):
if not type(p) is Vector:
raise InvalidPointOperationError("A point can only be modified by a vector.")
return super().__add__(p)
@validate_and_maintain_frames
def __rmul__(self, obj: Transform):
"""Right multiplication of a point.
Only defined for transformations.
"""
if type(obj) == Transform:
# Transform * self
return self.__class__(obj * Vector(self))
raise InvalidPointOperationError("A point cannot be scaled.") | en | 0.806178 | Basic point class which is compatible with all needed formats. # Compatible formats # Base class Point subclass of Vector which implements a point. Compared with its Superclass, this class imposes some restrictions to better fit the interpretation of a point in the mathematical sense. Uses vector's initializer. To ROS geometry_msg. Returns: This point as a geometry_msgs/Point Right multiplication of a point. Only defined for transformations. # Transform * self | 3.17577 | 3 |
mrf/evaluation/backward.py | fabianbalsiger/mrf-reconstruction-mlmir2020 | 4 | 6630815 | import os
import numpy as np
import mrf.data.definition as defs
import mrf.data.normalization as norm
import mrf.evaluation.base as evalbase
import mrf.evaluation.metric as metric
import mrf.plot.labeling as pltlbl
import mrf.plot.parameter as pltparam
import mrf.plot.statistics as pltstat
class BackwardEvaluator(evalbase.BaseEvaluator):
"""A backward evaluator, evaluating the goodness of the MR parameter estimation."""
def __init__(self, reference, prediction, ranges,
metrics=metric.get_backward_metrics(), prefix=''):
"""
Args:
reference: The reference MR parameters (normalized).
prediction: The predicted MR parameters (normalized).
ranges: The original range of the MR parameters before normalization.
metrics: A list of pymia.evaluation.metric.INumpyArrayMetric.
prefix: The identifier for the usage of multiple backward evaluators
"""
self.mr_param_ranges = ranges
self.reference = norm.de_normalize_mr_parameters(reference, self.mr_param_ranges, defs.MR_PARAMS)
self.prediction = norm.de_normalize_mr_parameters(prediction, self.mr_param_ranges, defs.MR_PARAMS)
self.metrics = metrics
self.prefix = prefix if len(prefix) == 0 else prefix + '_'
self.results = self.calculate()
def calculate(self):
results = {}
for metric_ in self.metrics:
mean = []
for idx, mr_param in enumerate(defs.MR_PARAMS):
metric_.reference = self.reference[:, idx]
metric_.prediction = self.prediction[:, idx]
val = metric_.calculate()
results[f'{defs.trim_param(mr_param)}/{self.prefix}{metric_.metric}'] = val
mean.append(val)
results[f'MEAN/{self.prefix}{metric_.metric}'] = np.mean(mean)
return results
def plot(self, root_dir: str):
for idx, mr_param in enumerate(defs.MR_PARAMS):
data_ref = self.reference[:, idx]
data_pred = self.prediction[:, idx]
map_ = defs.trim_param(mr_param)
unit = pltlbl.get_map_description(mr_param, True)
pltstat.bland_altman_plot(os.path.join(root_dir, f'bland-altman-{self.prefix}{map_}.png'), data_pred, data_ref, unit)
pltstat.scatter_plot(os.path.join(root_dir, f'scatter-{self.prefix}{map_}.png'), data_ref, data_pred,
f'Reference {unit}', f'Predicted {unit}', with_regression_line=True, with_abline=True)
pltstat.residual_plot(os.path.join(root_dir, f'residual-{self.prefix}{map_}.png'), data_pred, data_ref,
f'Predicted {unit}', f'Residual {unit}')
pltparam.prediction_distribution_plot(os.path.join(root_dir, f'prediction-{self.prefix}{map_}.png'),
data_ref, data_pred, unit)
def save(self, root_dir: str):
np.save(os.path.join(root_dir, f'mr_parameters_{self.prefix}ref.npy'), self.reference)
np.save(os.path.join(root_dir, f'mr_parameters_{self.prefix}pred.npy'), self.prediction)
| import os
import numpy as np
import mrf.data.definition as defs
import mrf.data.normalization as norm
import mrf.evaluation.base as evalbase
import mrf.evaluation.metric as metric
import mrf.plot.labeling as pltlbl
import mrf.plot.parameter as pltparam
import mrf.plot.statistics as pltstat
class BackwardEvaluator(evalbase.BaseEvaluator):
"""A backward evaluator, evaluating the goodness of the MR parameter estimation."""
def __init__(self, reference, prediction, ranges,
metrics=metric.get_backward_metrics(), prefix=''):
"""
Args:
reference: The reference MR parameters (normalized).
prediction: The predicted MR parameters (normalized).
ranges: The original range of the MR parameters before normalization.
metrics: A list of pymia.evaluation.metric.INumpyArrayMetric.
prefix: The identifier for the usage of multiple backward evaluators
"""
self.mr_param_ranges = ranges
self.reference = norm.de_normalize_mr_parameters(reference, self.mr_param_ranges, defs.MR_PARAMS)
self.prediction = norm.de_normalize_mr_parameters(prediction, self.mr_param_ranges, defs.MR_PARAMS)
self.metrics = metrics
self.prefix = prefix if len(prefix) == 0 else prefix + '_'
self.results = self.calculate()
def calculate(self):
results = {}
for metric_ in self.metrics:
mean = []
for idx, mr_param in enumerate(defs.MR_PARAMS):
metric_.reference = self.reference[:, idx]
metric_.prediction = self.prediction[:, idx]
val = metric_.calculate()
results[f'{defs.trim_param(mr_param)}/{self.prefix}{metric_.metric}'] = val
mean.append(val)
results[f'MEAN/{self.prefix}{metric_.metric}'] = np.mean(mean)
return results
def plot(self, root_dir: str):
for idx, mr_param in enumerate(defs.MR_PARAMS):
data_ref = self.reference[:, idx]
data_pred = self.prediction[:, idx]
map_ = defs.trim_param(mr_param)
unit = pltlbl.get_map_description(mr_param, True)
pltstat.bland_altman_plot(os.path.join(root_dir, f'bland-altman-{self.prefix}{map_}.png'), data_pred, data_ref, unit)
pltstat.scatter_plot(os.path.join(root_dir, f'scatter-{self.prefix}{map_}.png'), data_ref, data_pred,
f'Reference {unit}', f'Predicted {unit}', with_regression_line=True, with_abline=True)
pltstat.residual_plot(os.path.join(root_dir, f'residual-{self.prefix}{map_}.png'), data_pred, data_ref,
f'Predicted {unit}', f'Residual {unit}')
pltparam.prediction_distribution_plot(os.path.join(root_dir, f'prediction-{self.prefix}{map_}.png'),
data_ref, data_pred, unit)
def save(self, root_dir: str):
np.save(os.path.join(root_dir, f'mr_parameters_{self.prefix}ref.npy'), self.reference)
np.save(os.path.join(root_dir, f'mr_parameters_{self.prefix}pred.npy'), self.prediction)
| en | 0.394016 | A backward evaluator, evaluating the goodness of the MR parameter estimation. Args: reference: The reference MR parameters (normalized). prediction: The predicted MR parameters (normalized). ranges: The original range of the MR parameters before normalization. metrics: A list of pymia.evaluation.metric.INumpyArrayMetric. prefix: The identifier for the usage of multiple backward evaluators | 2.721963 | 3 |
voxplex/config.py | metrasynth/voxplex | 0 | 6630816 | <filename>voxplex/config.py
from os.path import expanduser
import yaml
def config_from_file(path):
with open(expanduser(path), 'r') as f:
return yaml.load(f)
| <filename>voxplex/config.py
from os.path import expanduser
import yaml
def config_from_file(path):
with open(expanduser(path), 'r') as f:
return yaml.load(f)
| none | 1 | 2.162198 | 2 |
|
app/training/migrations/0002_auto_20210413_1232.py | DmitryBovsunovskyi/workout-restfull-api | 0 | 6630817 | <reponame>DmitryBovsunovskyi/workout-restfull-api
# Generated by Django 3.1.8 on 2021-04-13 12:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('training', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='exerciseset',
old_name='workout_session_id',
new_name='workout_session',
),
migrations.RenameField(
model_name='workoutsession',
old_name='workout_id',
new_name='workout',
),
]
| # Generated by Django 3.1.8 on 2021-04-13 12:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('training', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='exerciseset',
old_name='workout_session_id',
new_name='workout_session',
),
migrations.RenameField(
model_name='workoutsession',
old_name='workout_id',
new_name='workout',
),
] | en | 0.770804 | # Generated by Django 3.1.8 on 2021-04-13 12:32 | 1.787987 | 2 |
api/src/repository/DocumentationRepository.py | thalesgelinger/truco-alpha | 0 | 6630818 | from python_framework import Repository, OpenApiDocumentationFile
@Repository()
class DocumentationRepository:
def getSwaggerDocumentation(self):
return OpenApiDocumentationFile.loadDocumentation(self.globals.api)
def getApiTree(self):
return self.globals.apiTree
| from python_framework import Repository, OpenApiDocumentationFile
@Repository()
class DocumentationRepository:
def getSwaggerDocumentation(self):
return OpenApiDocumentationFile.loadDocumentation(self.globals.api)
def getApiTree(self):
return self.globals.apiTree
| none | 1 | 2.049082 | 2 |
|
objects/email_helper.py | lawrel/ArtificialWormBlast | 3 | 6630819 | """email_helper.py creates and sends emails"""
import smtplib
import ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import uuid
def send_email(r_email, subjectline, body):
"""Function checks the credentials of a user (login)"""
# Create a secure SSL context
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
# get emails to and from
s_email = "<EMAIL>"
# Log into our account to send email
server.login("<EMAIL>", "TSITMonsterCards")
# create message
message = MIMEMultipart()
message["From"] = s_email
message["To"] = r_email
message["Subject"] = subjectline
message.attach(MIMEText(body, "plain"))
text = message.as_string()
# send message
server.sendmail(s_email, r_email, text)
def email_reset(email, link):
"""Function checks the credentials of a user (login)"""
body = """
Hi there!
It looks like you forgot your password. Sad.
Well here is a link to reset your password: """ + link + """
Better luck in remembering your password next time!
-AWB team
"""
send_email(email, "Monster's Ink Password Reset", body)
def email_gamelink(email, link):
"""Function checks the credentials of a user (login)"""
body = """
Hi there!
It looks like you have a friend. Congratulations!
Your friend would like to play a game of Monster's Ink with you. Click the link to join them or do not and let them be sad: """ + link + """
Enjoy your Game!
-AWB team
"""
send_email(email, "Monster's Ink Game Invitation", body)
| """email_helper.py creates and sends emails"""
import smtplib
import ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import uuid
def send_email(r_email, subjectline, body):
"""Function checks the credentials of a user (login)"""
# Create a secure SSL context
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
# get emails to and from
s_email = "<EMAIL>"
# Log into our account to send email
server.login("<EMAIL>", "TSITMonsterCards")
# create message
message = MIMEMultipart()
message["From"] = s_email
message["To"] = r_email
message["Subject"] = subjectline
message.attach(MIMEText(body, "plain"))
text = message.as_string()
# send message
server.sendmail(s_email, r_email, text)
def email_reset(email, link):
"""Function checks the credentials of a user (login)"""
body = """
Hi there!
It looks like you forgot your password. Sad.
Well here is a link to reset your password: """ + link + """
Better luck in remembering your password next time!
-AWB team
"""
send_email(email, "Monster's Ink Password Reset", body)
def email_gamelink(email, link):
"""Function checks the credentials of a user (login)"""
body = """
Hi there!
It looks like you have a friend. Congratulations!
Your friend would like to play a game of Monster's Ink with you. Click the link to join them or do not and let them be sad: """ + link + """
Enjoy your Game!
-AWB team
"""
send_email(email, "Monster's Ink Game Invitation", body)
| en | 0.917787 | email_helper.py creates and sends emails Function checks the credentials of a user (login) # Create a secure SSL context # get emails to and from # Log into our account to send email # create message # send message Function checks the credentials of a user (login) Hi there!
It looks like you forgot your password. Sad.
Well here is a link to reset your password: Better luck in remembering your password next time!
-AWB team Function checks the credentials of a user (login) Hi there!
It looks like you have a friend. Congratulations!
Your friend would like to play a game of Monster's Ink with you. Click the link to join them or do not and let them be sad: Enjoy your Game!
-AWB team | 3.516718 | 4 |
0049.Group Anagrams/solution.py | zhlinh/leetcode | 0 | 6630820 | <reponame>zhlinh/leetcode<filename>0049.Group Anagrams/solution.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
*****************************************
Author: zhlinh
Email: <EMAIL>
Version: 0.0.1
Created Time: 2016-02-20
Last_modify: 2016-02-20
******************************************
'''
'''
Given an array of strings, group anagrams together.
For example, given: ["eat", "tea", "tan", "ate", "nat", "bat"],
Return:
[
["ate", "eat","tea"],
["nat","tan"],
["bat"]
]
Note:
For the return value, each inner list's elements must follow
the lexicographic order.
All inputs will be in lower-case.
'''
class Solution(object):
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
dic = {}
for s in sorted(strs):
if s != "":
key = ''.join(sorted(s))
dic[key] = dic.get(key, []) + [s]
else:
dic["null"] = dic.get("null", []) + [""]
return list(dic.values())
| Anagrams/solution.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
*****************************************
Author: zhlinh
Email: <EMAIL>
Version: 0.0.1
Created Time: 2016-02-20
Last_modify: 2016-02-20
******************************************
'''
'''
Given an array of strings, group anagrams together.
For example, given: ["eat", "tea", "tan", "ate", "nat", "bat"],
Return:
[
["ate", "eat","tea"],
["nat","tan"],
["bat"]
]
Note:
For the return value, each inner list's elements must follow
the lexicographic order.
All inputs will be in lower-case.
'''
class Solution(object):
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
dic = {}
for s in sorted(strs):
if s != "":
key = ''.join(sorted(s))
dic[key] = dic.get(key, []) + [s]
else:
dic["null"] = dic.get("null", []) + [""]
return list(dic.values()) | en | 0.477401 | #!/usr/bin/env python # -*- coding: utf-8 -*- ***************************************** Author: zhlinh Email: <EMAIL> Version: 0.0.1 Created Time: 2016-02-20 Last_modify: 2016-02-20 ****************************************** Given an array of strings, group anagrams together. For example, given: ["eat", "tea", "tan", "ate", "nat", "bat"], Return: [ ["ate", "eat","tea"], ["nat","tan"], ["bat"] ] Note: For the return value, each inner list's elements must follow the lexicographic order. All inputs will be in lower-case. :type strs: List[str] :rtype: List[List[str]] | 3.89673 | 4 |
tests/test_companies.py | exit107/gremlin-python | 29 | 6630821 | <filename>tests/test_companies.py
import unittest
from unittest.mock import patch
import logging
import requests
from gremlinapi.companies import GremlinAPICompanies
from .util import mock_json, mock_data, mock_identifier, hooli_id
class TestCompanies(unittest.TestCase):
@patch("requests.get")
def test_get_company_with_decorator(self, mock_get) -> None:
mock_get.return_value = requests.Response()
mock_get.return_value.status_code = 200
mock_get.return_value.json = mock_json
self.assertEqual(GremlinAPICompanies.get_company(**mock_identifier), mock_data)
@patch("requests.get")
def test_list_company_clients_with_decorator(self, mock_get) -> None:
mock_get.return_value = requests.Response()
mock_get.return_value.status_code = 200
mock_get.return_value.json = mock_json
self.assertEqual(
GremlinAPICompanies.list_company_clients(**mock_identifier), mock_data
)
@patch("requests.post")
def test_invite_company_user_with_decorator(self, mock_get) -> None:
mock_get.return_value = requests.Response()
mock_get.return_value.status_code = 200
mock_get.return_value.json = mock_json
self.assertEqual(
GremlinAPICompanies.invite_company_user(**mock_identifier), mock_data
)
@patch("requests.delete")
def test_delete_company_invite_with_decorator(self, mock_get) -> None:
mock_get.return_value = requests.Response()
mock_get.return_value.status_code = 200
mock_get.return_value.json = mock_json
self.assertEqual(
GremlinAPICompanies.delete_company_invite(**mock_identifier), mock_data
)
@patch("requests.post")
def test_company_mfa_prefs_with_decorator(self, mock_get) -> None:
mock_get.return_value = requests.Response()
mock_get.return_value.status_code = 200
mock_get.return_value.json = mock_json
self.assertEqual(
GremlinAPICompanies.company_mfa_prefs(**mock_identifier), mock_data
)
@patch("requests.post")
def test_update_company_prefs_with_decorator(self, mock_get) -> None:
mock_get.return_value = requests.Response()
mock_get.return_value.status_code = 200
mock_get.return_value.json = mock_json
self.assertEqual(
GremlinAPICompanies.update_company_prefs(**mock_identifier), mock_data
)
@patch("requests.post")
def test_update_company_saml_props_with_decorator(self, mock_get) -> None:
mock_get.return_value = requests.Response()
mock_get.return_value.status_code = 200
mock_get.return_value.json = mock_json
self.assertEqual(
GremlinAPICompanies.update_company_saml_props(**mock_identifier), mock_data
)
@patch("requests.get")
def test_list_company_users_with_decorator(self, mock_get) -> None:
mock_get.return_value = requests.Response()
mock_get.return_value.status_code = 200
mock_get.return_value.json = mock_json
self.assertEqual(
GremlinAPICompanies.list_company_users(**mock_identifier), mock_data
)
@patch("requests.put")
def test_update_company_user_role_with_decorator(self, mock_get) -> None:
mock_get.return_value = requests.Response()
mock_get.return_value.status_code = 200
mock_get.return_value.json = mock_json
self.assertEqual(
GremlinAPICompanies.update_company_user_role(**mock_identifier), mock_data
)
@patch("requests.post")
def test_activate_company_user_with_decorator(self, mock_get) -> None:
mock_get.return_value = requests.Response()
mock_get.return_value.status_code = 200
mock_get.return_value.json = mock_json
self.assertEqual(
GremlinAPICompanies.activate_company_user(**mock_identifier), mock_data
)
@patch("requests.delete")
def test_deactivate_company_user_with_decorator(self, mock_get) -> None:
mock_get.return_value = requests.Response()
mock_get.return_value.status_code = 200
mock_get.return_value.json = mock_json
self.assertEqual(
GremlinAPICompanies.deactivate_company_user(**mock_identifier), mock_data
)
@patch("requests.post")
def test_auth_toggles_with_decorator(self, mock_get) -> None:
toggles_body = {
"companyId": hooli_id,
"passwordEnabled": True,
"mfaRequired": False,
"googleEnabled": True,
"oauthEnabled": True,
"samlEnabled": True,
"claimsRequired": False,
}
mock_get.return_value = requests.Response()
mock_get.return_value.status_code = 200
mock_get.return_value.json = mock_json
self.assertEqual(
GremlinAPICompanies.auth_toggles(**toggles_body),
mock_get.return_value.status_code,
)
| <filename>tests/test_companies.py
import unittest
from unittest.mock import patch
import logging
import requests
from gremlinapi.companies import GremlinAPICompanies
from .util import mock_json, mock_data, mock_identifier, hooli_id
class TestCompanies(unittest.TestCase):
@patch("requests.get")
def test_get_company_with_decorator(self, mock_get) -> None:
mock_get.return_value = requests.Response()
mock_get.return_value.status_code = 200
mock_get.return_value.json = mock_json
self.assertEqual(GremlinAPICompanies.get_company(**mock_identifier), mock_data)
@patch("requests.get")
def test_list_company_clients_with_decorator(self, mock_get) -> None:
mock_get.return_value = requests.Response()
mock_get.return_value.status_code = 200
mock_get.return_value.json = mock_json
self.assertEqual(
GremlinAPICompanies.list_company_clients(**mock_identifier), mock_data
)
@patch("requests.post")
def test_invite_company_user_with_decorator(self, mock_get) -> None:
mock_get.return_value = requests.Response()
mock_get.return_value.status_code = 200
mock_get.return_value.json = mock_json
self.assertEqual(
GremlinAPICompanies.invite_company_user(**mock_identifier), mock_data
)
@patch("requests.delete")
def test_delete_company_invite_with_decorator(self, mock_get) -> None:
mock_get.return_value = requests.Response()
mock_get.return_value.status_code = 200
mock_get.return_value.json = mock_json
self.assertEqual(
GremlinAPICompanies.delete_company_invite(**mock_identifier), mock_data
)
@patch("requests.post")
def test_company_mfa_prefs_with_decorator(self, mock_get) -> None:
mock_get.return_value = requests.Response()
mock_get.return_value.status_code = 200
mock_get.return_value.json = mock_json
self.assertEqual(
GremlinAPICompanies.company_mfa_prefs(**mock_identifier), mock_data
)
@patch("requests.post")
def test_update_company_prefs_with_decorator(self, mock_get) -> None:
mock_get.return_value = requests.Response()
mock_get.return_value.status_code = 200
mock_get.return_value.json = mock_json
self.assertEqual(
GremlinAPICompanies.update_company_prefs(**mock_identifier), mock_data
)
@patch("requests.post")
def test_update_company_saml_props_with_decorator(self, mock_get) -> None:
mock_get.return_value = requests.Response()
mock_get.return_value.status_code = 200
mock_get.return_value.json = mock_json
self.assertEqual(
GremlinAPICompanies.update_company_saml_props(**mock_identifier), mock_data
)
@patch("requests.get")
def test_list_company_users_with_decorator(self, mock_get) -> None:
mock_get.return_value = requests.Response()
mock_get.return_value.status_code = 200
mock_get.return_value.json = mock_json
self.assertEqual(
GremlinAPICompanies.list_company_users(**mock_identifier), mock_data
)
@patch("requests.put")
def test_update_company_user_role_with_decorator(self, mock_get) -> None:
mock_get.return_value = requests.Response()
mock_get.return_value.status_code = 200
mock_get.return_value.json = mock_json
self.assertEqual(
GremlinAPICompanies.update_company_user_role(**mock_identifier), mock_data
)
@patch("requests.post")
def test_activate_company_user_with_decorator(self, mock_get) -> None:
mock_get.return_value = requests.Response()
mock_get.return_value.status_code = 200
mock_get.return_value.json = mock_json
self.assertEqual(
GremlinAPICompanies.activate_company_user(**mock_identifier), mock_data
)
@patch("requests.delete")
def test_deactivate_company_user_with_decorator(self, mock_get) -> None:
mock_get.return_value = requests.Response()
mock_get.return_value.status_code = 200
mock_get.return_value.json = mock_json
self.assertEqual(
GremlinAPICompanies.deactivate_company_user(**mock_identifier), mock_data
)
@patch("requests.post")
def test_auth_toggles_with_decorator(self, mock_get) -> None:
toggles_body = {
"companyId": hooli_id,
"passwordEnabled": True,
"mfaRequired": False,
"googleEnabled": True,
"oauthEnabled": True,
"samlEnabled": True,
"claimsRequired": False,
}
mock_get.return_value = requests.Response()
mock_get.return_value.status_code = 200
mock_get.return_value.json = mock_json
self.assertEqual(
GremlinAPICompanies.auth_toggles(**toggles_body),
mock_get.return_value.status_code,
)
| none | 1 | 2.541172 | 3 |
|
tests/st/gradient/test_function_vjp_graph.py | httpsgithu/mindspore | 1 | 6630822 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test vjp in graph mode"""
import numpy as np
import pytest
import mindspore.nn as nn
import mindspore.context as context
from mindspore import Tensor
from mindspore import ms_function
from mindspore.ops.functional import vjp
context.set_context(mode=context.GRAPH_MODE)
class SingleInputNet(nn.Cell):
def construct(self, x):
return x**3
class MultipleInputsOutputNet(nn.Cell):
def construct(self, x, y):
return 2*x, y**3
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_vjp_single_input_graph():
"""
Features: Function vjp
Description: Test vjp with single input, single output and default v in graph mode.
Expectation: No exception.
"""
x = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32))
v = Tensor(np.array([[1, 1], [1, 1]]).astype(np.float32))
net = SingleInputNet()
expect_primal = Tensor(np.array([[1, 8], [27, 64]]).astype(np.float32))
expect_grad = Tensor(np.array([[3, 12], [27, 48]]).astype(np.float32))
primal, grad = vjp(net, x, v)
assert np.allclose(primal.asnumpy(), expect_primal.asnumpy())
assert np.allclose(grad.asnumpy(), expect_grad.asnumpy())
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_vjp_multiple_inputs_default_v_graph():
"""
Features: Function vjp
Description: Test vjp with single input, single output and default v in graph mode.
Expectation: No exception.
"""
x = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32))
y = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32))
v = Tensor(np.array([[1, 1], [1, 1]]).astype(np.float32))
net = MultipleInputsOutputNet()
expect_primal_0 = Tensor(np.array([[2, 4], [6, 8]]).astype(np.float32))
expect_primal_1 = Tensor(np.array([[1, 8], [27, 64]]).astype(np.float32))
expect_grad_0 = Tensor(np.array([[2, 2], [2, 2]]).astype(np.float32))
expect_grad_1 = Tensor(np.array([[3, 12], [27, 48]]).astype(np.float32))
primal, grad = vjp(net, (x, y), (v, v))
assert isinstance(primal, tuple)
assert len(primal) == 2
assert np.allclose(primal[0].asnumpy(), expect_primal_0.asnumpy())
assert np.allclose(primal[1].asnumpy(), expect_primal_1.asnumpy())
assert isinstance(grad, tuple)
assert len(grad) == 2
assert np.allclose(grad[0].asnumpy(), expect_grad_0.asnumpy())
assert np.allclose(grad[1].asnumpy(), expect_grad_1.asnumpy())
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_vjp_ms_function_single_input_single_output_default_v_graph():
"""
Features: Function vjp
Description: Test vjp with ms_function, single input, single output and default v in graph mode.
Expectation: No exception.
"""
x = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32))
v = Tensor(np.array([[1, 1], [1, 1]]).astype(np.float32))
net = SingleInputNet()
@ms_function
def vjp_with_ms_function(inputs, vectors):
output, vjp_grad = vjp(net, inputs, vectors)
return output, vjp_grad
primal, grad = vjp_with_ms_function(x, v)
expect_primal = Tensor(np.array([[1, 8], [27, 64]]).astype(np.float32))
expect_grad = Tensor(np.array([[3, 12], [27, 48]]).astype(np.float32))
assert np.allclose(primal.asnumpy(), expect_primal.asnumpy())
assert np.allclose(grad.asnumpy(), expect_grad.asnumpy())
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_vjp_input_function_single_input_single_output_default_v_graph():
"""
Features: Function vjp
Description: Test vjp with function, single input, single output and default v in graph mode.
Expectation: No exception.
"""
x = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32))
v = Tensor(np.array([[1, 1], [1, 1]]).astype(np.float32))
def test_function(inputs):
return inputs**3
primal, grad = vjp(test_function, x, v)
expect_primal = Tensor(np.array([[1, 8], [27, 64]]).astype(np.float32))
expect_grad = Tensor(np.array([[3, 12], [27, 48]]).astype(np.float32))
assert np.allclose(primal.asnumpy(), expect_primal.asnumpy())
assert np.allclose(grad.asnumpy(), expect_grad.asnumpy())
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_vjp_construct_single_input_single_output_default_v_graph():
"""
Features: Function vjp
Description: Test vjp with function, single input, single output and default v in graph mode.
Expectation: No exception.
"""
x = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32))
v = Tensor(np.array([[1, 1], [1, 1]]).astype(np.float32))
class Net(nn.Cell):
def __init__(self, network):
super(Net, self).__init__()
self.net = network
def construct(self, inputs, vectors):
net_out, vjp_out = vjp(self.net, inputs, vectors)
return net_out, vjp_out
test_net_graph = Net(SingleInputNet())
primal, grad = test_net_graph(x, v)
expect_primal = Tensor(np.array([[1, 8], [27, 64]]).astype(np.float32))
expect_grad = Tensor(np.array([[3, 12], [27, 48]]).astype(np.float32))
assert np.allclose(primal.asnumpy(), expect_primal.asnumpy())
assert np.allclose(grad.asnumpy(), expect_grad.asnumpy())
| # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test vjp in graph mode"""
import numpy as np
import pytest
import mindspore.nn as nn
import mindspore.context as context
from mindspore import Tensor
from mindspore import ms_function
from mindspore.ops.functional import vjp
context.set_context(mode=context.GRAPH_MODE)
class SingleInputNet(nn.Cell):
def construct(self, x):
return x**3
class MultipleInputsOutputNet(nn.Cell):
def construct(self, x, y):
return 2*x, y**3
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_vjp_single_input_graph():
"""
Features: Function vjp
Description: Test vjp with single input, single output and default v in graph mode.
Expectation: No exception.
"""
x = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32))
v = Tensor(np.array([[1, 1], [1, 1]]).astype(np.float32))
net = SingleInputNet()
expect_primal = Tensor(np.array([[1, 8], [27, 64]]).astype(np.float32))
expect_grad = Tensor(np.array([[3, 12], [27, 48]]).astype(np.float32))
primal, grad = vjp(net, x, v)
assert np.allclose(primal.asnumpy(), expect_primal.asnumpy())
assert np.allclose(grad.asnumpy(), expect_grad.asnumpy())
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_vjp_multiple_inputs_default_v_graph():
"""
Features: Function vjp
Description: Test vjp with single input, single output and default v in graph mode.
Expectation: No exception.
"""
x = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32))
y = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32))
v = Tensor(np.array([[1, 1], [1, 1]]).astype(np.float32))
net = MultipleInputsOutputNet()
expect_primal_0 = Tensor(np.array([[2, 4], [6, 8]]).astype(np.float32))
expect_primal_1 = Tensor(np.array([[1, 8], [27, 64]]).astype(np.float32))
expect_grad_0 = Tensor(np.array([[2, 2], [2, 2]]).astype(np.float32))
expect_grad_1 = Tensor(np.array([[3, 12], [27, 48]]).astype(np.float32))
primal, grad = vjp(net, (x, y), (v, v))
assert isinstance(primal, tuple)
assert len(primal) == 2
assert np.allclose(primal[0].asnumpy(), expect_primal_0.asnumpy())
assert np.allclose(primal[1].asnumpy(), expect_primal_1.asnumpy())
assert isinstance(grad, tuple)
assert len(grad) == 2
assert np.allclose(grad[0].asnumpy(), expect_grad_0.asnumpy())
assert np.allclose(grad[1].asnumpy(), expect_grad_1.asnumpy())
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_vjp_ms_function_single_input_single_output_default_v_graph():
"""
Features: Function vjp
Description: Test vjp with ms_function, single input, single output and default v in graph mode.
Expectation: No exception.
"""
x = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32))
v = Tensor(np.array([[1, 1], [1, 1]]).astype(np.float32))
net = SingleInputNet()
@ms_function
def vjp_with_ms_function(inputs, vectors):
output, vjp_grad = vjp(net, inputs, vectors)
return output, vjp_grad
primal, grad = vjp_with_ms_function(x, v)
expect_primal = Tensor(np.array([[1, 8], [27, 64]]).astype(np.float32))
expect_grad = Tensor(np.array([[3, 12], [27, 48]]).astype(np.float32))
assert np.allclose(primal.asnumpy(), expect_primal.asnumpy())
assert np.allclose(grad.asnumpy(), expect_grad.asnumpy())
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_vjp_input_function_single_input_single_output_default_v_graph():
"""
Features: Function vjp
Description: Test vjp with function, single input, single output and default v in graph mode.
Expectation: No exception.
"""
x = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32))
v = Tensor(np.array([[1, 1], [1, 1]]).astype(np.float32))
def test_function(inputs):
return inputs**3
primal, grad = vjp(test_function, x, v)
expect_primal = Tensor(np.array([[1, 8], [27, 64]]).astype(np.float32))
expect_grad = Tensor(np.array([[3, 12], [27, 48]]).astype(np.float32))
assert np.allclose(primal.asnumpy(), expect_primal.asnumpy())
assert np.allclose(grad.asnumpy(), expect_grad.asnumpy())
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_vjp_construct_single_input_single_output_default_v_graph():
"""
Features: Function vjp
Description: Test vjp with function, single input, single output and default v in graph mode.
Expectation: No exception.
"""
x = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32))
v = Tensor(np.array([[1, 1], [1, 1]]).astype(np.float32))
class Net(nn.Cell):
def __init__(self, network):
super(Net, self).__init__()
self.net = network
def construct(self, inputs, vectors):
net_out, vjp_out = vjp(self.net, inputs, vectors)
return net_out, vjp_out
test_net_graph = Net(SingleInputNet())
primal, grad = test_net_graph(x, v)
expect_primal = Tensor(np.array([[1, 8], [27, 64]]).astype(np.float32))
expect_grad = Tensor(np.array([[3, 12], [27, 48]]).astype(np.float32))
assert np.allclose(primal.asnumpy(), expect_primal.asnumpy())
assert np.allclose(grad.asnumpy(), expect_grad.asnumpy())
| en | 0.759593 | # Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ test vjp in graph mode Features: Function vjp Description: Test vjp with single input, single output and default v in graph mode. Expectation: No exception. Features: Function vjp Description: Test vjp with single input, single output and default v in graph mode. Expectation: No exception. Features: Function vjp Description: Test vjp with ms_function, single input, single output and default v in graph mode. Expectation: No exception. Features: Function vjp Description: Test vjp with function, single input, single output and default v in graph mode. Expectation: No exception. Features: Function vjp Description: Test vjp with function, single input, single output and default v in graph mode. Expectation: No exception. | 2.225777 | 2 |
backend/api/migrations/0001_initial.py | SamerElhamdo/tejruba-frontend | 0 | 6630823 | # Generated by Django 2.1.5 on 2019-07-28 13:38
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(max_length=300)),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('likes', models.IntegerField()),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(blank=True, upload_to='images')),
('name', models.CharField(blank=True, max_length=150)),
('bio', models.TextField(blank=True, default='')),
('job', models.CharField(blank=True, default='', max_length=150)),
('age', models.CharField(blank=True, default='', max_length=150)),
('country', models.CharField(blank=True, default='', max_length=150)),
('facebook', models.URLField(blank=True, default='')),
('instagram', models.URLField(blank=True, default='')),
('twitter', models.URLField(blank=True, default='')),
('personal_website', models.URLField(blank=True, default='')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150)),
],
),
migrations.CreateModel(
name='Tujruba',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(upload_to='media/images')),
('title', models.CharField(max_length=150)),
('description', models.TextField(max_length=999)),
('publish_date', models.DateTimeField(default=django.utils.timezone.now)),
('stars', models.IntegerField()),
('likes', models.IntegerField()),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='comment',
name='tujruba',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Tujruba'),
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| # Generated by Django 2.1.5 on 2019-07-28 13:38
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(max_length=300)),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('likes', models.IntegerField()),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(blank=True, upload_to='images')),
('name', models.CharField(blank=True, max_length=150)),
('bio', models.TextField(blank=True, default='')),
('job', models.CharField(blank=True, default='', max_length=150)),
('age', models.CharField(blank=True, default='', max_length=150)),
('country', models.CharField(blank=True, default='', max_length=150)),
('facebook', models.URLField(blank=True, default='')),
('instagram', models.URLField(blank=True, default='')),
('twitter', models.URLField(blank=True, default='')),
('personal_website', models.URLField(blank=True, default='')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150)),
],
),
migrations.CreateModel(
name='Tujruba',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(upload_to='media/images')),
('title', models.CharField(max_length=150)),
('description', models.TextField(max_length=999)),
('publish_date', models.DateTimeField(default=django.utils.timezone.now)),
('stars', models.IntegerField()),
('likes', models.IntegerField()),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='comment',
name='tujruba',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Tujruba'),
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| en | 0.715505 | # Generated by Django 2.1.5 on 2019-07-28 13:38 | 1.764826 | 2 |
tests/test_df.py | listuser/jc | 0 | 6630824 | <reponame>listuser/jc<filename>tests/test_df.py
import os
import json
import unittest
import jc.parsers.df
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class MyTests(unittest.TestCase):
def setUp(self):
# input
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/df.out'), 'r', encoding='utf-8') as f:
self.centos_7_7_df = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/df.out'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_df = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.11.6/df.out'), 'r', encoding='utf-8') as f:
self.osx_10_11_6_df = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/df.out'), 'r', encoding='utf-8') as f:
self.osx_10_14_6_df = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/df-h.out'), 'r', encoding='utf-8') as f:
self.centos_7_7_df_h = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/df-h.out'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_df_h = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.11.6/df-h.out'), 'r', encoding='utf-8') as f:
self.osx_10_11_6_df_h = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/df-h.out'), 'r', encoding='utf-8') as f:
self.osx_10_14_6_df_h = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/df-long-filesystem.out'), 'r', encoding='utf-8') as f:
self.generic_df_long_filesystem = f.read()
# output
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/df.json'), 'r', encoding='utf-8') as f:
self.centos_7_7_df_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/df.json'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_df_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.11.6/df.json'), 'r', encoding='utf-8') as f:
self.osx_10_11_6_df_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/df.json'), 'r', encoding='utf-8') as f:
self.osx_10_14_6_df_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/df-h.json'), 'r', encoding='utf-8') as f:
self.centos_7_7_df_h_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/df-h.json'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_df_h_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.11.6/df-h.json'), 'r', encoding='utf-8') as f:
self.osx_10_11_6_df_h_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/df-h.json'), 'r', encoding='utf-8') as f:
self.osx_10_14_6_df_h_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/df-long-filesystem.json'), 'r', encoding='utf-8') as f:
self.generic_df_long_filesystem_json = json.loads(f.read())
def test_df_nodata(self):
"""
Test plain 'df' with no data
"""
self.assertEqual(jc.parsers.df.parse('', quiet=True), [])
def test_df_centos_7_7(self):
"""
Test plain 'df' on Centos 7.7
"""
self.assertEqual(jc.parsers.df.parse(self.centos_7_7_df, quiet=True), self.centos_7_7_df_json)
def test_df_ubuntu_18_4(self):
"""
Test plain 'df' on Ubuntu 18.4
"""
self.assertEqual(jc.parsers.df.parse(self.ubuntu_18_4_df, quiet=True), self.ubuntu_18_4_df_json)
def test_df_osx_10_11_6(self):
"""
Test plain 'df' on OSX 10.11.6
"""
self.assertEqual(jc.parsers.df.parse(self.osx_10_11_6_df, quiet=True), self.osx_10_11_6_df_json)
def test_df_osx_10_14_6(self):
"""
Test plain 'df' on OSX 10.14.6
"""
self.assertEqual(jc.parsers.df.parse(self.osx_10_14_6_df, quiet=True), self.osx_10_14_6_df_json)
def test_df_h_centos_7_7(self):
"""
Test 'df -h' on Centos 7.7
"""
self.assertEqual(jc.parsers.df.parse(self.centos_7_7_df_h, quiet=True), self.centos_7_7_df_h_json)
def test_df_h_ubuntu_18_4(self):
"""
Test 'df -h' on Ubuntu 18.4
"""
self.assertEqual(jc.parsers.df.parse(self.ubuntu_18_4_df_h, quiet=True), self.ubuntu_18_4_df_h_json)
def test_df_h_osx_10_11_6(self):
"""
Test 'df -h' on OSX 10.11.6
"""
self.assertEqual(jc.parsers.df.parse(self.osx_10_11_6_df_h, quiet=True), self.osx_10_11_6_df_h_json)
def test_df_h_osx_10_14_6(self):
"""
Test 'df -h' on OSX 10.14.6
"""
self.assertEqual(jc.parsers.df.parse(self.osx_10_14_6_df_h, quiet=True), self.osx_10_14_6_df_h_json)
def test_df_long_filesystem(self):
"""
Test older version of 'df' with long filesystem data
"""
self.assertEqual(jc.parsers.df.parse(self.generic_df_long_filesystem, quiet=True), self.generic_df_long_filesystem_json)
if __name__ == '__main__':
unittest.main()
| import os
import json
import unittest
import jc.parsers.df
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class MyTests(unittest.TestCase):
def setUp(self):
# input
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/df.out'), 'r', encoding='utf-8') as f:
self.centos_7_7_df = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/df.out'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_df = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.11.6/df.out'), 'r', encoding='utf-8') as f:
self.osx_10_11_6_df = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/df.out'), 'r', encoding='utf-8') as f:
self.osx_10_14_6_df = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/df-h.out'), 'r', encoding='utf-8') as f:
self.centos_7_7_df_h = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/df-h.out'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_df_h = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.11.6/df-h.out'), 'r', encoding='utf-8') as f:
self.osx_10_11_6_df_h = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/df-h.out'), 'r', encoding='utf-8') as f:
self.osx_10_14_6_df_h = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/df-long-filesystem.out'), 'r', encoding='utf-8') as f:
self.generic_df_long_filesystem = f.read()
# output
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/df.json'), 'r', encoding='utf-8') as f:
self.centos_7_7_df_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/df.json'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_df_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.11.6/df.json'), 'r', encoding='utf-8') as f:
self.osx_10_11_6_df_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/df.json'), 'r', encoding='utf-8') as f:
self.osx_10_14_6_df_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/df-h.json'), 'r', encoding='utf-8') as f:
self.centos_7_7_df_h_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/df-h.json'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_df_h_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.11.6/df-h.json'), 'r', encoding='utf-8') as f:
self.osx_10_11_6_df_h_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/df-h.json'), 'r', encoding='utf-8') as f:
self.osx_10_14_6_df_h_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/df-long-filesystem.json'), 'r', encoding='utf-8') as f:
self.generic_df_long_filesystem_json = json.loads(f.read())
def test_df_nodata(self):
"""
Test plain 'df' with no data
"""
self.assertEqual(jc.parsers.df.parse('', quiet=True), [])
def test_df_centos_7_7(self):
"""
Test plain 'df' on Centos 7.7
"""
self.assertEqual(jc.parsers.df.parse(self.centos_7_7_df, quiet=True), self.centos_7_7_df_json)
def test_df_ubuntu_18_4(self):
"""
Test plain 'df' on Ubuntu 18.4
"""
self.assertEqual(jc.parsers.df.parse(self.ubuntu_18_4_df, quiet=True), self.ubuntu_18_4_df_json)
def test_df_osx_10_11_6(self):
"""
Test plain 'df' on OSX 10.11.6
"""
self.assertEqual(jc.parsers.df.parse(self.osx_10_11_6_df, quiet=True), self.osx_10_11_6_df_json)
def test_df_osx_10_14_6(self):
"""
Test plain 'df' on OSX 10.14.6
"""
self.assertEqual(jc.parsers.df.parse(self.osx_10_14_6_df, quiet=True), self.osx_10_14_6_df_json)
def test_df_h_centos_7_7(self):
"""
Test 'df -h' on Centos 7.7
"""
self.assertEqual(jc.parsers.df.parse(self.centos_7_7_df_h, quiet=True), self.centos_7_7_df_h_json)
def test_df_h_ubuntu_18_4(self):
"""
Test 'df -h' on Ubuntu 18.4
"""
self.assertEqual(jc.parsers.df.parse(self.ubuntu_18_4_df_h, quiet=True), self.ubuntu_18_4_df_h_json)
def test_df_h_osx_10_11_6(self):
"""
Test 'df -h' on OSX 10.11.6
"""
self.assertEqual(jc.parsers.df.parse(self.osx_10_11_6_df_h, quiet=True), self.osx_10_11_6_df_h_json)
def test_df_h_osx_10_14_6(self):
"""
Test 'df -h' on OSX 10.14.6
"""
self.assertEqual(jc.parsers.df.parse(self.osx_10_14_6_df_h, quiet=True), self.osx_10_14_6_df_h_json)
def test_df_long_filesystem(self):
"""
Test older version of 'df' with long filesystem data
"""
self.assertEqual(jc.parsers.df.parse(self.generic_df_long_filesystem, quiet=True), self.generic_df_long_filesystem_json)
if __name__ == '__main__':
unittest.main() | en | 0.40378 | # input # output Test plain 'df' with no data Test plain 'df' on Centos 7.7 Test plain 'df' on Ubuntu 18.4 Test plain 'df' on OSX 10.11.6 Test plain 'df' on OSX 10.14.6 Test 'df -h' on Centos 7.7 Test 'df -h' on Ubuntu 18.4 Test 'df -h' on OSX 10.11.6 Test 'df -h' on OSX 10.14.6 Test older version of 'df' with long filesystem data | 2.564413 | 3 |
wemake_python_styleguide/formatter.py | Andrka/wemake-python-styleguide | 1 | 6630825 | """
Our very own ``flake8`` formatter for better error messages.
That's how all ``flake8`` formatters work:
.. mermaid::
:caption: ``flake8`` formatting API calls order.
graph LR
F2[start] --> F3[after_init]
F3 --> F4[start]
F4 --> F5[beggining]
F5 --> F6[handle]
F6 --> F7[format]
F6 --> F8[show_source]
F6 --> F9[show_statistic]
F7 --> F10[finished]
F8 --> F10[finished]
F9 --> F10[finished]
F10 -.-> F5
F10 --> F11[stop]
.. autoclass:: WemakeFormatter
:no-undoc-members:
"""
from collections import defaultdict
from typing import ClassVar, DefaultDict, List
from flake8.formatting.base import BaseFormatter
from flake8.statistics import Statistics
from flake8.style_guide import Violation
from pygments import highlight
from pygments.formatters import TerminalFormatter
from pygments.lexers import PythonLexer
from typing_extensions import Final
from wemake_python_styleguide.version import pkg_version
#: That url is generated and hosted by Sphinx.
DOCS_URL_TEMPLATE: Final = (
'https://wemake-python-stylegui.de/en/{0}/pages/usage/violations/'
)
class WemakeFormatter(BaseFormatter): # noqa: WPS214
"""
We need to format our style :term:`violations <violation>` beatifully.
The default formatter does not allow us to do that.
What things do we miss?
1. Spacing, everything is just mixed up and glued together
2. Colors and decoration, some information is easier
to gather just with colors or underlined text
3. Grouping, we need explicit grouping by filename
4. Incomplete and non-informative statistics
"""
_doc_url: ClassVar[str] = DOCS_URL_TEMPLATE.format(pkg_version)
# API:
def after_init(self):
"""Called after the original ``init`` is used to set extra fields."""
self._lexer = PythonLexer()
self._formatter = TerminalFormatter()
# Logic:
self._proccessed_filenames: List[str] = []
self._error_count = 0
def handle(self, error: Violation) -> None: # noqa: WPS110
"""Processes each :term:`violation` to print it and all related."""
if error.filename not in self._proccessed_filenames:
self._print_header(error.filename)
self._proccessed_filenames.append(error.filename)
super().handle(error)
self._error_count += 1
def format(self, error: Violation) -> str: # noqa: A003
"""Called to format each individual :term:`violation`."""
return '{newline} {row_col:<8} {code:<5} {text}'.format(
newline=self.newline if self._should_show_source(error) else '',
code=error.code,
text=error.text,
row_col='{0}:{1}'.format(error.line_number, error.column_number),
)
def show_source(self, error: Violation) -> str:
"""Called when ``--show-source`` option is provided."""
if not self._should_show_source(error):
return ''
formated_line = error.physical_line.lstrip()
adjust = len(error.physical_line) - len(formated_line)
code = _highlight(
formated_line,
self._lexer,
self._formatter,
)
return ' {code} {pointer}^'.format(
code=code,
pointer=' ' * (error.column_number - 1 - adjust),
)
def show_statistics(self, statistics: Statistics) -> None: # noqa: WPS210
"""Called when ``--statistic`` option is passed."""
all_errors = 0
for error_code in statistics.error_codes():
stats_for_error_code = statistics.statistics_for(error_code)
statistic = next(stats_for_error_code)
count = statistic.count
count += sum(stat.count for stat in stats_for_error_code)
all_errors += count
error_by_file = _count_per_filename(statistics, error_code)
self._print_violation_per_file(
statistic,
error_code,
count,
error_by_file,
)
self._write(self.newline)
self._write(_underline(_bold('All errors: {0}'.format(all_errors))))
def stop(self) -> None:
"""Runs once per app when the formatting ends."""
if self._error_count:
message = '{0}Full list of violations and explanations:{0}{1}'
self._write(message.format(self.newline, self._doc_url))
# Our own methods:
def _print_header(self, filename: str) -> None:
self._write(
'{newline}{filename}'.format(
filename=_underline(_bold(filename)),
newline=self.newline,
),
)
def _print_violation_per_file(
self,
statistic: Statistics,
error_code: str,
count: int,
error_by_file: DefaultDict[str, int],
):
self._write(
'{newline}{error_code}: {message}'.format(
newline=self.newline,
error_code=_bold(error_code),
message=statistic.message,
),
)
for filename, error_count in error_by_file.items():
self._write(
' {error_count:<5} {filename}'.format(
error_count=error_count,
filename=filename,
),
)
self._write(_underline('Total: {0}'.format(count)))
def _should_show_source(self, error: Violation) -> bool:
return self.options.show_source and error.physical_line is not None
# Formatting text:
def _bold(text: str) -> str:
r"""
Returns bold formatted text.
>>> _bold('Hello!')
'\x1b[1mHello!\x1b[0m'
"""
return '\033[1m{0}\033[0m'.format(text)
def _underline(text: str) -> str:
r"""
Returns underlined formatted text.
>>> _underline('Hello!')
'\x1b[4mHello!\x1b[0m'
"""
return '\033[4m{0}\033[0m'.format(text)
def _highlight(source: str, lexer, formatter) -> str:
"""
Highlights source code. Might fail.
See also:
https://github.com/wemake-services/wemake-python-styleguide/issues/794
"""
try:
return highlight(source, lexer, formatter)
except Exception: # pragma: no cover
# Might fail on some systems, when colors are set incorrectly,
# or not available at all. In this case code will be just text.
return source
# Helpers:
def _count_per_filename(
statistics: Statistics,
error_code: str,
) -> DefaultDict[str, int]:
filenames: DefaultDict[str, int] = defaultdict(int)
stats_for_error_code = statistics.statistics_for(error_code)
for stat in stats_for_error_code:
filenames[stat.filename] += stat.count
return filenames
| """
Our very own ``flake8`` formatter for better error messages.
That's how all ``flake8`` formatters work:
.. mermaid::
:caption: ``flake8`` formatting API calls order.
graph LR
F2[start] --> F3[after_init]
F3 --> F4[start]
F4 --> F5[beggining]
F5 --> F6[handle]
F6 --> F7[format]
F6 --> F8[show_source]
F6 --> F9[show_statistic]
F7 --> F10[finished]
F8 --> F10[finished]
F9 --> F10[finished]
F10 -.-> F5
F10 --> F11[stop]
.. autoclass:: WemakeFormatter
:no-undoc-members:
"""
from collections import defaultdict
from typing import ClassVar, DefaultDict, List
from flake8.formatting.base import BaseFormatter
from flake8.statistics import Statistics
from flake8.style_guide import Violation
from pygments import highlight
from pygments.formatters import TerminalFormatter
from pygments.lexers import PythonLexer
from typing_extensions import Final
from wemake_python_styleguide.version import pkg_version
#: That url is generated and hosted by Sphinx.
DOCS_URL_TEMPLATE: Final = (
'https://wemake-python-stylegui.de/en/{0}/pages/usage/violations/'
)
class WemakeFormatter(BaseFormatter): # noqa: WPS214
"""
We need to format our style :term:`violations <violation>` beatifully.
The default formatter does not allow us to do that.
What things do we miss?
1. Spacing, everything is just mixed up and glued together
2. Colors and decoration, some information is easier
to gather just with colors or underlined text
3. Grouping, we need explicit grouping by filename
4. Incomplete and non-informative statistics
"""
_doc_url: ClassVar[str] = DOCS_URL_TEMPLATE.format(pkg_version)
# API:
def after_init(self):
"""Called after the original ``init`` is used to set extra fields."""
self._lexer = PythonLexer()
self._formatter = TerminalFormatter()
# Logic:
self._proccessed_filenames: List[str] = []
self._error_count = 0
def handle(self, error: Violation) -> None: # noqa: WPS110
"""Processes each :term:`violation` to print it and all related."""
if error.filename not in self._proccessed_filenames:
self._print_header(error.filename)
self._proccessed_filenames.append(error.filename)
super().handle(error)
self._error_count += 1
def format(self, error: Violation) -> str: # noqa: A003
"""Called to format each individual :term:`violation`."""
return '{newline} {row_col:<8} {code:<5} {text}'.format(
newline=self.newline if self._should_show_source(error) else '',
code=error.code,
text=error.text,
row_col='{0}:{1}'.format(error.line_number, error.column_number),
)
def show_source(self, error: Violation) -> str:
"""Called when ``--show-source`` option is provided."""
if not self._should_show_source(error):
return ''
formated_line = error.physical_line.lstrip()
adjust = len(error.physical_line) - len(formated_line)
code = _highlight(
formated_line,
self._lexer,
self._formatter,
)
return ' {code} {pointer}^'.format(
code=code,
pointer=' ' * (error.column_number - 1 - adjust),
)
def show_statistics(self, statistics: Statistics) -> None: # noqa: WPS210
"""Called when ``--statistic`` option is passed."""
all_errors = 0
for error_code in statistics.error_codes():
stats_for_error_code = statistics.statistics_for(error_code)
statistic = next(stats_for_error_code)
count = statistic.count
count += sum(stat.count for stat in stats_for_error_code)
all_errors += count
error_by_file = _count_per_filename(statistics, error_code)
self._print_violation_per_file(
statistic,
error_code,
count,
error_by_file,
)
self._write(self.newline)
self._write(_underline(_bold('All errors: {0}'.format(all_errors))))
def stop(self) -> None:
"""Runs once per app when the formatting ends."""
if self._error_count:
message = '{0}Full list of violations and explanations:{0}{1}'
self._write(message.format(self.newline, self._doc_url))
# Our own methods:
def _print_header(self, filename: str) -> None:
self._write(
'{newline}{filename}'.format(
filename=_underline(_bold(filename)),
newline=self.newline,
),
)
def _print_violation_per_file(
self,
statistic: Statistics,
error_code: str,
count: int,
error_by_file: DefaultDict[str, int],
):
self._write(
'{newline}{error_code}: {message}'.format(
newline=self.newline,
error_code=_bold(error_code),
message=statistic.message,
),
)
for filename, error_count in error_by_file.items():
self._write(
' {error_count:<5} {filename}'.format(
error_count=error_count,
filename=filename,
),
)
self._write(_underline('Total: {0}'.format(count)))
def _should_show_source(self, error: Violation) -> bool:
return self.options.show_source and error.physical_line is not None
# Formatting text:
def _bold(text: str) -> str:
r"""
Returns bold formatted text.
>>> _bold('Hello!')
'\x1b[1mHello!\x1b[0m'
"""
return '\033[1m{0}\033[0m'.format(text)
def _underline(text: str) -> str:
r"""
Returns underlined formatted text.
>>> _underline('Hello!')
'\x1b[4mHello!\x1b[0m'
"""
return '\033[4m{0}\033[0m'.format(text)
def _highlight(source: str, lexer, formatter) -> str:
"""
Highlights source code. Might fail.
See also:
https://github.com/wemake-services/wemake-python-styleguide/issues/794
"""
try:
return highlight(source, lexer, formatter)
except Exception: # pragma: no cover
# Might fail on some systems, when colors are set incorrectly,
# or not available at all. In this case code will be just text.
return source
# Helpers:
def _count_per_filename(
statistics: Statistics,
error_code: str,
) -> DefaultDict[str, int]:
filenames: DefaultDict[str, int] = defaultdict(int)
stats_for_error_code = statistics.statistics_for(error_code)
for stat in stats_for_error_code:
filenames[stat.filename] += stat.count
return filenames
| en | 0.710249 | Our very own ``flake8`` formatter for better error messages. That's how all ``flake8`` formatters work: .. mermaid:: :caption: ``flake8`` formatting API calls order. graph LR F2[start] --> F3[after_init] F3 --> F4[start] F4 --> F5[beggining] F5 --> F6[handle] F6 --> F7[format] F6 --> F8[show_source] F6 --> F9[show_statistic] F7 --> F10[finished] F8 --> F10[finished] F9 --> F10[finished] F10 -.-> F5 F10 --> F11[stop] .. autoclass:: WemakeFormatter :no-undoc-members: #: That url is generated and hosted by Sphinx. # noqa: WPS214 We need to format our style :term:`violations <violation>` beatifully. The default formatter does not allow us to do that. What things do we miss? 1. Spacing, everything is just mixed up and glued together 2. Colors and decoration, some information is easier to gather just with colors or underlined text 3. Grouping, we need explicit grouping by filename 4. Incomplete and non-informative statistics # API: Called after the original ``init`` is used to set extra fields. # Logic: # noqa: WPS110 Processes each :term:`violation` to print it and all related. # noqa: A003 Called to format each individual :term:`violation`. Called when ``--show-source`` option is provided. # noqa: WPS210 Called when ``--statistic`` option is passed. Runs once per app when the formatting ends. # Our own methods: # Formatting text: Returns bold formatted text. >>> _bold('Hello!') '\x1b[1mHello!\x1b[0m' Returns underlined formatted text. >>> _underline('Hello!') '\x1b[4mHello!\x1b[0m' Highlights source code. Might fail. See also: https://github.com/wemake-services/wemake-python-styleguide/issues/794 # pragma: no cover # Might fail on some systems, when colors are set incorrectly, # or not available at all. In this case code will be just text. # Helpers: | 2.067964 | 2 |
tsv2json.py | spookyahell/covid19-impfung-de | 0 | 6630826 | '''by spookyahell'''
import json
class TSV2JSONConverter(object):
def __init__(self, file_path):
self.file = open(file_path)
self.fpath = file_path
#~ mode allows for a list and dict(ionary) version of the data
def convert(self, mode = 'dict', needle = 2):
if mode == 'dict':
result = {}
data = self.file.read()
lines = data.split('\n')
data_titles = lines[0].split('\t')
for idx, item in enumerate(lines):
if item.strip() == '':
continue
indiv_res = {}
if idx>0:
datas = item.split('\t')
for idx_d, data in enumerate(datas):
if idx_d>0:
indiv_res[data_titles[idx_d]] = datas[idx_d]
if not datas[0] in result:
result[datas[0]] = indiv_res
else:
if type(result[datas[0]]) is list:
result[datas[0]].append(indiv_res)
else:
result[datas[0]] = [result[datas[0]], indiv_res ]
return json.dumps(result, indent = 2) | '''by spookyahell'''
import json
class TSV2JSONConverter(object):
def __init__(self, file_path):
self.file = open(file_path)
self.fpath = file_path
#~ mode allows for a list and dict(ionary) version of the data
def convert(self, mode = 'dict', needle = 2):
if mode == 'dict':
result = {}
data = self.file.read()
lines = data.split('\n')
data_titles = lines[0].split('\t')
for idx, item in enumerate(lines):
if item.strip() == '':
continue
indiv_res = {}
if idx>0:
datas = item.split('\t')
for idx_d, data in enumerate(datas):
if idx_d>0:
indiv_res[data_titles[idx_d]] = datas[idx_d]
if not datas[0] in result:
result[datas[0]] = indiv_res
else:
if type(result[datas[0]]) is list:
result[datas[0]].append(indiv_res)
else:
result[datas[0]] = [result[datas[0]], indiv_res ]
return json.dumps(result, indent = 2) | en | 0.742189 | by spookyahell #~ mode allows for a list and dict(ionary) version of the data | 2.899704 | 3 |
tests/interactive/font/test_font.py | whitestone8214/pyglet-1.3.0-mod | 0 | 6630827 | <gh_stars>0
"""
Test font loading and rendering.
"""
import pytest
import pyglet
from pyglet import font
from tests.annotations import Platform
from .font_test_base import font_fixture
@pytest.mark.parametrize('question,color', [
('Default font should appear at 0.3 opacity (faint grey)', (0, 0, 0, 0.3)),
('Text should not be visible due to opacity 0.0', (0, 0, 0, 0)),
('Default font should appear at full opacity (black)', (0, 0, 0, 1)),
('Default font should appear blue', (0, 0, 1, 1)),
('Default font should appear red', (1, 0, 0, 1)),
])
def test_color(font_fixture, question, color):
"""Test that font colour is applied correctly."""
font_fixture.create_window()
font_fixture.create_label(
color=color
)
font_fixture.ask_question(question)
def test_default_font(font_fixture):
"""Test that a font with no name given still renders using some sort
of default system font.
"""
font_fixture.create_window()
font_fixture.load_font(
name=''
)
font_fixture.create_label()
font_fixture.ask_question(
'Font should be rendered using a default system font'
)
def test_system_font(font_fixture):
"""Test that a font likely to be installed on the computer can be
loaded and displayed correctly.
One window will open, it should show "Quickly brown fox" at 24pt using:
* "Helvetica" on Mac OS X
* "Arial" on Windows
* "Arial" on Linux
"""
if pyglet.compat_platform in Platform.OSX:
font_name = 'Helvetica'
elif pyglet.compat_platform in Platform.WINDOWS:
font_name = 'Arial'
else:
font_name = 'Arial'
font_fixture.create_window()
font_fixture.load_font(
name=font_name
)
font_fixture.create_label()
font_fixture.ask_question(
'"Quickly brown fox" should be shown at 24pt using font ' + font_name
)
def test_bullet_glyphs(font_fixture):
"""Test that rendering of unicode glyphs works."""
font_fixture.create_window()
font_fixture.load_font(
size=60
)
font_fixture.create_label(
text=u'\u2022'*5,
)
font_fixture.ask_question(
'You should see 5 bullet glyphs.'
)
def test_large_font(font_fixture):
"Render a font using a very large size. Tests issue 684."
font_fixture.create_window(
width=1000,
height=400,
)
font_fixture.load_font(
name='Arial',
size=292,
)
font_fixture.create_label(
text='trawant',
)
font_fixture.ask_question(
'Is the word "trawant" rendered in a large font?'
)
@pytest.mark.parametrize('font_desc,font_file, font_options', [
('regular', 'action_man.ttf', {}),
('bold', 'action_man_bold.ttf', {'bold':True}),
('italic', 'action_man_italic.ttf', {'italic':True}),
('bold italic', 'action_man_bold_italic.ttf', {'bold':True, 'italic':True})
])
def test_add_font(font_fixture, test_data, font_desc, font_file, font_options):
"""Test that a font distributed with the application can be displayed.
Four lines of text should be displayed, each in a different variant
(bold/italic/regular) of Action Man at 24pt. The Action Man fonts are
included in the test data directory (tests/data/fonts) as action_man*.ttf.
"""
font.add_file(test_data.get_file('fonts', font_file))
font_fixture.create_window()
font_fixture.load_font(
name='Action Man',
**font_options
)
font_fixture.create_label()
font_fixture.ask_question(
"""You should see {} style Action Man at 24pt.""".format(font_desc)
)
@pytest.mark.parametrize('font_name,text', [
('Action man', 'Action Man'),
('Action man', 'Action Man longer test with more words'),
('Arial', 'Arial'),
('Arial', 'Arial longer test with more words'),
('Times New Roman', 'Times New Roman'),
('Times New Roman', 'Times New Roman longer test with more words'),
])
def test_horizontal_metrics(font_fixture, test_data, font_name, text):
"""Test that the horizontal font metrics are calculated correctly.
Some text in various fonts will be displayed. Green vertical lines mark
the left edge of the text. Blue vertical lines mark the right edge of the
text.
"""
font.add_file(test_data.get_file('fonts', 'action_man.ttf'))
question=("The green vertical lines should match the left edge of the text"
+ "and the blue vertical lines should match the right edge of the text.")
font_fixture.create_window(
width=600,
)
font_fixture.draw_metrics = True
font_fixture.load_font(
name=font_name,
size=16,
)
font_fixture.create_label(
text=text,
)
font_fixture.ask_question(
question,
)
def test_metrics_workaround(font_fixture, test_data):
"""Test workaround for font missing metrics.
Font should fit between top and bottom lines.
"""
font.add_file(test_data.get_file('fonts', 'courR12-ISO8859-1.pcf'))
font_fixture.create_window(
width=600,
)
font_fixture.draw_metrics = True
font_fixture.load_font(
name='Courier',
size=16,
)
font_fixture.create_label(
text='ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz',
)
font_fixture.ask_question(
'The text should fit between the top and bottom lines',
)
@pytest.mark.parametrize('dpi,width,height', [
(120, 169, 23),
(100, 138, 19),
(160, 226, 30),
])
def test_dpi(font_fixture, test_data, dpi, width, height):
font.add_file(test_data.get_file('fonts', 'action_man.ttf'))
question=("The green vertical lines should match the left edge of the text"
+ "and the blue vertical lines should match the right edge of the text.")
font_fixture.create_window()
font_fixture.draw_custom_metrics = width, height
font_fixture.load_font(
name='Action Man',
size=16,
dpi=dpi,
)
font_fixture.create_label(
text='The DPI is {}'.format(dpi),
)
font_fixture.ask_question(
question,
)
| """
Test font loading and rendering.
"""
import pytest
import pyglet
from pyglet import font
from tests.annotations import Platform
from .font_test_base import font_fixture
@pytest.mark.parametrize('question,color', [
('Default font should appear at 0.3 opacity (faint grey)', (0, 0, 0, 0.3)),
('Text should not be visible due to opacity 0.0', (0, 0, 0, 0)),
('Default font should appear at full opacity (black)', (0, 0, 0, 1)),
('Default font should appear blue', (0, 0, 1, 1)),
('Default font should appear red', (1, 0, 0, 1)),
])
def test_color(font_fixture, question, color):
"""Test that font colour is applied correctly."""
font_fixture.create_window()
font_fixture.create_label(
color=color
)
font_fixture.ask_question(question)
def test_default_font(font_fixture):
"""Test that a font with no name given still renders using some sort
of default system font.
"""
font_fixture.create_window()
font_fixture.load_font(
name=''
)
font_fixture.create_label()
font_fixture.ask_question(
'Font should be rendered using a default system font'
)
def test_system_font(font_fixture):
"""Test that a font likely to be installed on the computer can be
loaded and displayed correctly.
One window will open, it should show "Quickly brown fox" at 24pt using:
* "Helvetica" on Mac OS X
* "Arial" on Windows
* "Arial" on Linux
"""
if pyglet.compat_platform in Platform.OSX:
font_name = 'Helvetica'
elif pyglet.compat_platform in Platform.WINDOWS:
font_name = 'Arial'
else:
font_name = 'Arial'
font_fixture.create_window()
font_fixture.load_font(
name=font_name
)
font_fixture.create_label()
font_fixture.ask_question(
'"Quickly brown fox" should be shown at 24pt using font ' + font_name
)
def test_bullet_glyphs(font_fixture):
"""Test that rendering of unicode glyphs works."""
font_fixture.create_window()
font_fixture.load_font(
size=60
)
font_fixture.create_label(
text=u'\u2022'*5,
)
font_fixture.ask_question(
'You should see 5 bullet glyphs.'
)
def test_large_font(font_fixture):
"Render a font using a very large size. Tests issue 684."
font_fixture.create_window(
width=1000,
height=400,
)
font_fixture.load_font(
name='Arial',
size=292,
)
font_fixture.create_label(
text='trawant',
)
font_fixture.ask_question(
'Is the word "trawant" rendered in a large font?'
)
@pytest.mark.parametrize('font_desc,font_file, font_options', [
('regular', 'action_man.ttf', {}),
('bold', 'action_man_bold.ttf', {'bold':True}),
('italic', 'action_man_italic.ttf', {'italic':True}),
('bold italic', 'action_man_bold_italic.ttf', {'bold':True, 'italic':True})
])
def test_add_font(font_fixture, test_data, font_desc, font_file, font_options):
"""Test that a font distributed with the application can be displayed.
Four lines of text should be displayed, each in a different variant
(bold/italic/regular) of Action Man at 24pt. The Action Man fonts are
included in the test data directory (tests/data/fonts) as action_man*.ttf.
"""
font.add_file(test_data.get_file('fonts', font_file))
font_fixture.create_window()
font_fixture.load_font(
name='Action Man',
**font_options
)
font_fixture.create_label()
font_fixture.ask_question(
"""You should see {} style Action Man at 24pt.""".format(font_desc)
)
@pytest.mark.parametrize('font_name,text', [
('Action man', 'Action Man'),
('Action man', 'Action Man longer test with more words'),
('Arial', 'Arial'),
('Arial', 'Arial longer test with more words'),
('Times New Roman', 'Times New Roman'),
('Times New Roman', 'Times New Roman longer test with more words'),
])
def test_horizontal_metrics(font_fixture, test_data, font_name, text):
"""Test that the horizontal font metrics are calculated correctly.
Some text in various fonts will be displayed. Green vertical lines mark
the left edge of the text. Blue vertical lines mark the right edge of the
text.
"""
font.add_file(test_data.get_file('fonts', 'action_man.ttf'))
question=("The green vertical lines should match the left edge of the text"
+ "and the blue vertical lines should match the right edge of the text.")
font_fixture.create_window(
width=600,
)
font_fixture.draw_metrics = True
font_fixture.load_font(
name=font_name,
size=16,
)
font_fixture.create_label(
text=text,
)
font_fixture.ask_question(
question,
)
def test_metrics_workaround(font_fixture, test_data):
"""Test workaround for font missing metrics.
Font should fit between top and bottom lines.
"""
font.add_file(test_data.get_file('fonts', 'courR12-ISO8859-1.pcf'))
font_fixture.create_window(
width=600,
)
font_fixture.draw_metrics = True
font_fixture.load_font(
name='Courier',
size=16,
)
font_fixture.create_label(
text='ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz',
)
font_fixture.ask_question(
'The text should fit between the top and bottom lines',
)
@pytest.mark.parametrize('dpi,width,height', [
(120, 169, 23),
(100, 138, 19),
(160, 226, 30),
])
def test_dpi(font_fixture, test_data, dpi, width, height):
font.add_file(test_data.get_file('fonts', 'action_man.ttf'))
question=("The green vertical lines should match the left edge of the text"
+ "and the blue vertical lines should match the right edge of the text.")
font_fixture.create_window()
font_fixture.draw_custom_metrics = width, height
font_fixture.load_font(
name='Action Man',
size=16,
dpi=dpi,
)
font_fixture.create_label(
text='The DPI is {}'.format(dpi),
)
font_fixture.ask_question(
question,
) | en | 0.841238 | Test font loading and rendering. Test that font colour is applied correctly. Test that a font with no name given still renders using some sort of default system font. Test that a font likely to be installed on the computer can be loaded and displayed correctly. One window will open, it should show "Quickly brown fox" at 24pt using: * "Helvetica" on Mac OS X * "Arial" on Windows * "Arial" on Linux Test that rendering of unicode glyphs works. Test that a font distributed with the application can be displayed. Four lines of text should be displayed, each in a different variant (bold/italic/regular) of Action Man at 24pt. The Action Man fonts are included in the test data directory (tests/data/fonts) as action_man*.ttf. You should see {} style Action Man at 24pt. Test that the horizontal font metrics are calculated correctly. Some text in various fonts will be displayed. Green vertical lines mark the left edge of the text. Blue vertical lines mark the right edge of the text. Test workaround for font missing metrics. Font should fit between top and bottom lines. | 2.745565 | 3 |
superorm/session.py | lyoshur/superorm | 0 | 6630828 | <filename>superorm/session.py
import threading
from superorm.pool import ConnPool
class SessionManager:
# Database conn pool
_conn_pool = None
# conn Session cache
_cache = None
def __init__(self, conn_pool: ConnPool):
"""
Init the Thread cache for conn session
"""
self._conn_pool = conn_pool
self._cache = {}
def get_session(self):
"""
Get the cache session
"""
_ident = threading.currentThread().ident
if _ident in self._cache:
return self._cache[_ident]
_session = Session(self._conn_pool, self._conn_pool.obtain(), self)
self._cache[_ident] = _session
return self._cache[_ident]
def destroy_session(self):
"""
Destroy the cache
"""
_ident = threading.currentThread().ident
if _ident in self._cache:
del self._cache[_ident]
class Session:
# session pool
_conn_pool = None
# use index
_conn_index = -1
# session manager
_session_manager = None
# auto commit
auto_commit = None
def __init__(self, conn_pool: ConnPool, conn_index: int, session_manager: SessionManager):
"""
Init SQLSession
:param conn_pool: conn pool
:param conn_index: The database connection index being used
:param session_manager: session manager
"""
self._conn_pool = conn_pool
self._conn_index = conn_index
self._session_manager = session_manager
self.auto_commit = self.commit
def __enter__(self):
self.begin()
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def conn(self):
"""
Get this session conn
:return: conn
"""
return self._conn_pool.get(self._conn_index)
def begin(self):
"""
Begin transaction
"""
def auto_commit():
pass
self.auto_commit = auto_commit
def commit(self):
"""
Commit this session
"""
self._conn_pool.commit(self._conn_index)
self._destroy()
def rollback(self):
"""
Rollback this session
"""
self._conn_pool.rollback(self._conn_index)
self._destroy()
def _destroy(self):
"""
Destroy this session
"""
self._session_manager.destroy_session()
self._conn_pool.give_back(self._conn_index)
self._conn_pool = None
self._conn_index = -1
self._session_manager = None
self.auto_commit = None
| <filename>superorm/session.py
import threading
from superorm.pool import ConnPool
class SessionManager:
# Database conn pool
_conn_pool = None
# conn Session cache
_cache = None
def __init__(self, conn_pool: ConnPool):
"""
Init the Thread cache for conn session
"""
self._conn_pool = conn_pool
self._cache = {}
def get_session(self):
"""
Get the cache session
"""
_ident = threading.currentThread().ident
if _ident in self._cache:
return self._cache[_ident]
_session = Session(self._conn_pool, self._conn_pool.obtain(), self)
self._cache[_ident] = _session
return self._cache[_ident]
def destroy_session(self):
"""
Destroy the cache
"""
_ident = threading.currentThread().ident
if _ident in self._cache:
del self._cache[_ident]
class Session:
# session pool
_conn_pool = None
# use index
_conn_index = -1
# session manager
_session_manager = None
# auto commit
auto_commit = None
def __init__(self, conn_pool: ConnPool, conn_index: int, session_manager: SessionManager):
"""
Init SQLSession
:param conn_pool: conn pool
:param conn_index: The database connection index being used
:param session_manager: session manager
"""
self._conn_pool = conn_pool
self._conn_index = conn_index
self._session_manager = session_manager
self.auto_commit = self.commit
def __enter__(self):
self.begin()
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def conn(self):
"""
Get this session conn
:return: conn
"""
return self._conn_pool.get(self._conn_index)
def begin(self):
"""
Begin transaction
"""
def auto_commit():
pass
self.auto_commit = auto_commit
def commit(self):
"""
Commit this session
"""
self._conn_pool.commit(self._conn_index)
self._destroy()
def rollback(self):
"""
Rollback this session
"""
self._conn_pool.rollback(self._conn_index)
self._destroy()
def _destroy(self):
"""
Destroy this session
"""
self._session_manager.destroy_session()
self._conn_pool.give_back(self._conn_index)
self._conn_pool = None
self._conn_index = -1
self._session_manager = None
self.auto_commit = None
| en | 0.480736 | # Database conn pool # conn Session cache Init the Thread cache for conn session Get the cache session Destroy the cache # session pool # use index # session manager # auto commit Init SQLSession :param conn_pool: conn pool :param conn_index: The database connection index being used :param session_manager: session manager Get this session conn :return: conn Begin transaction Commit this session Rollback this session Destroy this session | 2.979377 | 3 |
gym/frozen2a_policyiteration.py | randhawp/rl-gridworlds | 0 | 6630829 | <gh_stars>0
import gym
import numpy as np
'''
In froze2_policyiteration the result on convergence is not
+-----+-----+-----+-----+
| 0 | -14 | -20 | -22 |
+-----+-----+-----+-----+
| -14 | -18 | -20 | -20 |
+-----+-----+-----+-----+
| -20 | -20 | -18 | -14 |
+-----+-----+-----+-----+
| -22 | -20 | -14 | 0 |
+-----+-----+-----+-----+
as in <NAME> and <NAME> bible on RL
but it is
[[ 0. -13. -19. -21.]
[-13. -17. -19. -19.]
[-19. -19. -17. -13.]
[-21. -19. -13. 0.]]
This is because the terminal states are included in the evaluation.
Different state values do not matter as long as they are all proportional
and the policy still converges
In this example we exclude the terminal states to arrive at the result in the book
'''
# 4x4 grid - do not change size/shape as it is hardcoded in code below
# can change location of S
custom_map = [
'GSFF',
'FFFF',
'FFFF',
'FFFG'
]
# in above S can be anywhere there is a F. Only one S though
gamma=1.0 #discount factor
reward=0
cstate=[] # current state value in a sweep
fstate=[] # final state value
env = gym.make("FrozenLake-v0", desc=custom_map,is_slippery=False)
env.reset()
env.render()
'''
Starting at a grid cell, sweep through the entire state space (i.e our policy)
For each calcualte the utility value v(s) till done (i.e reach terminal state)
Note 2 terminal states in this case.
At the end of a sweep state update the utility values with the new ones calcuated.
Continue till the time convergence is achieved.
'''
i=j=0
v=np.zeros(16) # holds the actual values
vtemp=np.zeros(16) # holds values temporarily until sweep is finished
actionvalue=np.zeros(4) # array to store the value for a state due to actions in that state
convergencelimit = 0.0001
converged = False
reward = -1 # override the environment and change reward to -1 for each step
c=0
p=0.25 # override probability distribution and set every action to equal chance
while not converged:
i=1
while i < env.observation_space.n -1: #sweep across the state space
j=0
while j< env.action_space.n:
nextstate = env.P[i][j][0][1] #next state
done = env.P[i][j][0][3] #done
actionvalue[j] = p * (reward + gamma*v[nextstate]) # value of this state for this action
j=j+1
vtemp[i] = np.sum(actionvalue) # value is the sum of all action value
i=i+1
#check if converged
#calculate the diff between the two state spaces
diff = v - vtemp
diffav = abs(np.sum(diff))/(16)
v = np.copy(vtemp) #sweep is finished, update the entire state space with new values
if(diffav <= convergencelimit):
break
v=np.round(v)
print(v.reshape(4,4)) | import gym
import numpy as np
'''
In froze2_policyiteration the result on convergence is not
+-----+-----+-----+-----+
| 0 | -14 | -20 | -22 |
+-----+-----+-----+-----+
| -14 | -18 | -20 | -20 |
+-----+-----+-----+-----+
| -20 | -20 | -18 | -14 |
+-----+-----+-----+-----+
| -22 | -20 | -14 | 0 |
+-----+-----+-----+-----+
as in <NAME> and <NAME> bible on RL
but it is
[[ 0. -13. -19. -21.]
[-13. -17. -19. -19.]
[-19. -19. -17. -13.]
[-21. -19. -13. 0.]]
This is because the terminal states are included in the evaluation.
Different state values do not matter as long as they are all proportional
and the policy still converges
In this example we exclude the terminal states to arrive at the result in the book
'''
# 4x4 grid - do not change size/shape as it is hardcoded in code below
# can change location of S
custom_map = [
'GSFF',
'FFFF',
'FFFF',
'FFFG'
]
# in above S can be anywhere there is a F. Only one S though
gamma=1.0 #discount factor
reward=0
cstate=[] # current state value in a sweep
fstate=[] # final state value
env = gym.make("FrozenLake-v0", desc=custom_map,is_slippery=False)
env.reset()
env.render()
'''
Starting at a grid cell, sweep through the entire state space (i.e our policy)
For each calcualte the utility value v(s) till done (i.e reach terminal state)
Note 2 terminal states in this case.
At the end of a sweep state update the utility values with the new ones calcuated.
Continue till the time convergence is achieved.
'''
i=j=0
v=np.zeros(16) # holds the actual values
vtemp=np.zeros(16) # holds values temporarily until sweep is finished
actionvalue=np.zeros(4) # array to store the value for a state due to actions in that state
convergencelimit = 0.0001
converged = False
reward = -1 # override the environment and change reward to -1 for each step
c=0
p=0.25 # override probability distribution and set every action to equal chance
while not converged:
i=1
while i < env.observation_space.n -1: #sweep across the state space
j=0
while j< env.action_space.n:
nextstate = env.P[i][j][0][1] #next state
done = env.P[i][j][0][3] #done
actionvalue[j] = p * (reward + gamma*v[nextstate]) # value of this state for this action
j=j+1
vtemp[i] = np.sum(actionvalue) # value is the sum of all action value
i=i+1
#check if converged
#calculate the diff between the two state spaces
diff = v - vtemp
diffav = abs(np.sum(diff))/(16)
v = np.copy(vtemp) #sweep is finished, update the entire state space with new values
if(diffav <= convergencelimit):
break
v=np.round(v)
print(v.reshape(4,4)) | en | 0.836106 | In froze2_policyiteration the result on convergence is not +-----+-----+-----+-----+ | 0 | -14 | -20 | -22 | +-----+-----+-----+-----+ | -14 | -18 | -20 | -20 | +-----+-----+-----+-----+ | -20 | -20 | -18 | -14 | +-----+-----+-----+-----+ | -22 | -20 | -14 | 0 | +-----+-----+-----+-----+ as in <NAME> and <NAME> bible on RL but it is [[ 0. -13. -19. -21.] [-13. -17. -19. -19.] [-19. -19. -17. -13.] [-21. -19. -13. 0.]] This is because the terminal states are included in the evaluation. Different state values do not matter as long as they are all proportional and the policy still converges In this example we exclude the terminal states to arrive at the result in the book # 4x4 grid - do not change size/shape as it is hardcoded in code below # can change location of S # in above S can be anywhere there is a F. Only one S though #discount factor # current state value in a sweep # final state value Starting at a grid cell, sweep through the entire state space (i.e our policy) For each calcualte the utility value v(s) till done (i.e reach terminal state) Note 2 terminal states in this case. At the end of a sweep state update the utility values with the new ones calcuated. Continue till the time convergence is achieved. # holds the actual values # holds values temporarily until sweep is finished # array to store the value for a state due to actions in that state # override the environment and change reward to -1 for each step # override probability distribution and set every action to equal chance #sweep across the state space #next state #done # value of this state for this action # value is the sum of all action value #check if converged #calculate the diff between the two state spaces #sweep is finished, update the entire state space with new values | 2.940612 | 3 |
src/configlookup/utils.py | tgedr/configlookup | 0 | 6630830 | <filename>src/configlookup/utils.py
import logging
import os
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
log = logging.getLogger(__name__)
class ConfigurationUtils:
@staticmethod
def merge_dict(
source: Dict[str, Any],
target: Dict[str, Any],
target_property: Optional[str] = None,
target_root: Optional[Dict[str, Any]] = None,
) -> None:
"""
merges dictionary values recurrently
Parameters
----------
source : Dict[str, Any]
the source dictionary to merge
target : Dict[str, Any]
the destination dictionary
target_property: Optional[str]
target dict equivalent property if not in the root of the target dict
target_root: Optional[Dict[str, Any]]
Raises
------
ValueError
if source or target are None
TypeError
when key types do not match across dictionaries
"""
log.debug(f"[merge_dict|in] ({source}, {target}, {target_property}, {target_root}")
if source is None:
raise ValueError("mandatory to provide at least the source dict")
if target is None:
raise ValueError("mandatory to provide at least the target dict")
if target_root is None:
target_root = target
for adding_key in source.keys():
# for every entry in the source dictionary
adding_value = source[adding_key]
adding_value_type = type(adding_value).__name__
if adding_value_type == "dict":
# if value we want to add is a 'dict' then define the entry in the target and drill down recursively
if adding_key not in target.keys():
target[adding_key] = {}
elif type(target[adding_key]).__name__ != "dict":
raise TypeError(f"key: {adding_key} type does not match")
adding_property = f"{'' if target_property is None else (target_property + '.')}{adding_key}"
ConfigurationUtils.merge_dict(adding_value, target[adding_key], adding_property, target_root)
elif adding_value_type == "list":
# if value we want to add is a 'list' then define the list entry and extend it with the new values
if adding_key not in target.keys():
target[adding_key] = []
elif type(target[adding_key]).__name__ != "list":
raise TypeError(f"key: {adding_key} type does not match")
existing_list = target[adding_key]
existing_list.extend(adding_value)
target[adding_key] = list(set(existing_list))
# set the equivalent variable
if target_root != target:
adding_property = f"{'' if target_property is None else (target_property + '.')}{adding_key}"
adding_var = adding_property.replace(".", "__").upper()
log.debug(f"[merge_dict] adding new entry {adding_var}: {target[adding_key]}")
target_root[adding_var] = target[adding_key]
else:
# if a scalar/text then just upsert the value
log.debug(f"[merge_dict] adding new entry {adding_key}: {adding_value}")
target[adding_key] = adding_value
# set the equivalent variable
if target_root == target:
adding_var = adding_key.upper()
target[adding_var] = adding_value
else:
adding_property = f"{'' if target_property is None else (target_property + '.')}{adding_key}"
adding_var = adding_property.replace(".", "__").upper()
log.debug(f"[merge_dict] adding new entry {adding_var}: {target[adding_key]}")
target_root[adding_var] = target[adding_key]
log.debug(f"[merge_dict|out] => {target}")
@staticmethod
def find_property(key: str, target: Dict[str, Any]) -> Optional[Dict[str, Any]]:
"""
finds a property/variable with key format example "a.b.c" in the target dictionary
Parameters
----------
key : str
the key to search for in the dictionary, in format "a.b.c"
target : Dict[str, Any]
the dictionary where to search for the key
Returns
-------
Optional[Dict[str, Any]]
a dict in the format {"pointer": target, "key": subkey} or None
Raises
------
ValueError
if key or target are None
"""
log.debug(f"[ConfigurationUtils.find_property|in] ({key}, {target})")
if key is None:
raise ValueError("no key name")
if target is None:
raise ValueError("no target")
result = None
# split the key in sub keys and start descending the dict structure from its root
components = key.split(sep=".")
for index, subkey in enumerate(components):
# at every level of the structure check if the current sub key is present
if subkey in target.keys():
if index + 1 == len(components):
# if in last iteration and subkey and is part of the structure then wrap up in this dict format
result = {"pointer": target, "key": subkey}
else:
# if not last iteration, then resume the search with the remaining subkeys in the child structure found
remaining_subkeys = ".".join(components[index + 1 :])
child_structure = target[subkey]
result = ConfigurationUtils.find_property(remaining_subkeys, child_structure)
if result:
# don't iterate further if we have a solution
break
log.debug(f"[ConfigurationUtils.find_property|out] => {result}")
return result
@staticmethod
def get_config_file_paths(config_dir: str, config_file_prefix: str, config_file_suffixes: List[str]):
"""Function that looks for the required config files in dir_path.
Parameters
----------
config_dir : str
The absolute path to a directory to look for files.
config_file_prefix: str
the file name prefix to look for, when appended with suffixes
config_file_suffixes: List[str]
the file name additional suffixes to use when searching for config files
Returns
-------
list
A list with the correct order of the config files.
Raises
------
FileNotFoundError
If no config files are present in directory.
"""
log.info(
f"[ConfigurationUtils.get_config_file_paths|in] ({config_dir}, {config_file_prefix}, "
f"{config_file_suffixes})"
)
file_paths = []
files_to_find = [f"{config_file_prefix}{x}.json" for x in config_file_suffixes]
log.info(f"[ConfigurationUtils.get_config_file_paths] files_to_find: {files_to_find}")
available_files = [x.name for x in Path(config_dir).iterdir() if x.is_file()]
for file_to_find in files_to_find:
if file_to_find in available_files:
file_paths.append(f"{config_dir}/{file_to_find}")
if not file_paths:
raise FileNotFoundError(
f"[ConfigurationUtils.get_config_file_paths] Cannot locate configuration files in specified directory: "
f"{config_dir}."
)
log.info(f"[ConfigurationUtils.get_config_file_paths|out] => {file_paths}")
return file_paths
@staticmethod
def resolve_env_variable(variable: str, default: Optional[str] = None) -> str:
log.info(f"[ConfigurationUtils.resolve_env_variable|in] ({variable}, {default})")
_result = None
log.debug("[ConfigurationUtils.resolve_env_variable] trying to find it in env variables")
try:
_result = os.environ[variable]
except Exception as x:
log.debug(
f"[ConfigurationUtils.resolve_env_variable] not found: {variable}",
exc_info=x,
)
result = _result if _result is not None else default
log.info(f"[ConfigurationUtils.resolve_env_variable|out] => {result}")
return result
@staticmethod
def property_to_variable(prop: str) -> str:
return prop.upper().replace(".", "__")
@staticmethod
def variable_to_property(var: str) -> str:
return var.lower().replace("__", ".")
@staticmethod
def prop_and_var_from_key(key: str) -> Tuple[str, str]:
prop = None
var = None
if 0 == key.count("."):
# assume variable
var = key.upper()
prop = ConfigurationUtils.variable_to_property(var)
else:
prop = key.lower()
var = ConfigurationUtils.property_to_variable(prop)
return prop, var
| <filename>src/configlookup/utils.py
import logging
import os
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
log = logging.getLogger(__name__)
class ConfigurationUtils:
@staticmethod
def merge_dict(
source: Dict[str, Any],
target: Dict[str, Any],
target_property: Optional[str] = None,
target_root: Optional[Dict[str, Any]] = None,
) -> None:
"""
merges dictionary values recurrently
Parameters
----------
source : Dict[str, Any]
the source dictionary to merge
target : Dict[str, Any]
the destination dictionary
target_property: Optional[str]
target dict equivalent property if not in the root of the target dict
target_root: Optional[Dict[str, Any]]
Raises
------
ValueError
if source or target are None
TypeError
when key types do not match across dictionaries
"""
log.debug(f"[merge_dict|in] ({source}, {target}, {target_property}, {target_root}")
if source is None:
raise ValueError("mandatory to provide at least the source dict")
if target is None:
raise ValueError("mandatory to provide at least the target dict")
if target_root is None:
target_root = target
for adding_key in source.keys():
# for every entry in the source dictionary
adding_value = source[adding_key]
adding_value_type = type(adding_value).__name__
if adding_value_type == "dict":
# if value we want to add is a 'dict' then define the entry in the target and drill down recursively
if adding_key not in target.keys():
target[adding_key] = {}
elif type(target[adding_key]).__name__ != "dict":
raise TypeError(f"key: {adding_key} type does not match")
adding_property = f"{'' if target_property is None else (target_property + '.')}{adding_key}"
ConfigurationUtils.merge_dict(adding_value, target[adding_key], adding_property, target_root)
elif adding_value_type == "list":
# if value we want to add is a 'list' then define the list entry and extend it with the new values
if adding_key not in target.keys():
target[adding_key] = []
elif type(target[adding_key]).__name__ != "list":
raise TypeError(f"key: {adding_key} type does not match")
existing_list = target[adding_key]
existing_list.extend(adding_value)
target[adding_key] = list(set(existing_list))
# set the equivalent variable
if target_root != target:
adding_property = f"{'' if target_property is None else (target_property + '.')}{adding_key}"
adding_var = adding_property.replace(".", "__").upper()
log.debug(f"[merge_dict] adding new entry {adding_var}: {target[adding_key]}")
target_root[adding_var] = target[adding_key]
else:
# if a scalar/text then just upsert the value
log.debug(f"[merge_dict] adding new entry {adding_key}: {adding_value}")
target[adding_key] = adding_value
# set the equivalent variable
if target_root == target:
adding_var = adding_key.upper()
target[adding_var] = adding_value
else:
adding_property = f"{'' if target_property is None else (target_property + '.')}{adding_key}"
adding_var = adding_property.replace(".", "__").upper()
log.debug(f"[merge_dict] adding new entry {adding_var}: {target[adding_key]}")
target_root[adding_var] = target[adding_key]
log.debug(f"[merge_dict|out] => {target}")
@staticmethod
def find_property(key: str, target: Dict[str, Any]) -> Optional[Dict[str, Any]]:
"""
finds a property/variable with key format example "a.b.c" in the target dictionary
Parameters
----------
key : str
the key to search for in the dictionary, in format "a.b.c"
target : Dict[str, Any]
the dictionary where to search for the key
Returns
-------
Optional[Dict[str, Any]]
a dict in the format {"pointer": target, "key": subkey} or None
Raises
------
ValueError
if key or target are None
"""
log.debug(f"[ConfigurationUtils.find_property|in] ({key}, {target})")
if key is None:
raise ValueError("no key name")
if target is None:
raise ValueError("no target")
result = None
# split the key in sub keys and start descending the dict structure from its root
components = key.split(sep=".")
for index, subkey in enumerate(components):
# at every level of the structure check if the current sub key is present
if subkey in target.keys():
if index + 1 == len(components):
# if in last iteration and subkey and is part of the structure then wrap up in this dict format
result = {"pointer": target, "key": subkey}
else:
# if not last iteration, then resume the search with the remaining subkeys in the child structure found
remaining_subkeys = ".".join(components[index + 1 :])
child_structure = target[subkey]
result = ConfigurationUtils.find_property(remaining_subkeys, child_structure)
if result:
# don't iterate further if we have a solution
break
log.debug(f"[ConfigurationUtils.find_property|out] => {result}")
return result
@staticmethod
def get_config_file_paths(config_dir: str, config_file_prefix: str, config_file_suffixes: List[str]):
"""Function that looks for the required config files in dir_path.
Parameters
----------
config_dir : str
The absolute path to a directory to look for files.
config_file_prefix: str
the file name prefix to look for, when appended with suffixes
config_file_suffixes: List[str]
the file name additional suffixes to use when searching for config files
Returns
-------
list
A list with the correct order of the config files.
Raises
------
FileNotFoundError
If no config files are present in directory.
"""
log.info(
f"[ConfigurationUtils.get_config_file_paths|in] ({config_dir}, {config_file_prefix}, "
f"{config_file_suffixes})"
)
file_paths = []
files_to_find = [f"{config_file_prefix}{x}.json" for x in config_file_suffixes]
log.info(f"[ConfigurationUtils.get_config_file_paths] files_to_find: {files_to_find}")
available_files = [x.name for x in Path(config_dir).iterdir() if x.is_file()]
for file_to_find in files_to_find:
if file_to_find in available_files:
file_paths.append(f"{config_dir}/{file_to_find}")
if not file_paths:
raise FileNotFoundError(
f"[ConfigurationUtils.get_config_file_paths] Cannot locate configuration files in specified directory: "
f"{config_dir}."
)
log.info(f"[ConfigurationUtils.get_config_file_paths|out] => {file_paths}")
return file_paths
@staticmethod
def resolve_env_variable(variable: str, default: Optional[str] = None) -> str:
log.info(f"[ConfigurationUtils.resolve_env_variable|in] ({variable}, {default})")
_result = None
log.debug("[ConfigurationUtils.resolve_env_variable] trying to find it in env variables")
try:
_result = os.environ[variable]
except Exception as x:
log.debug(
f"[ConfigurationUtils.resolve_env_variable] not found: {variable}",
exc_info=x,
)
result = _result if _result is not None else default
log.info(f"[ConfigurationUtils.resolve_env_variable|out] => {result}")
return result
@staticmethod
def property_to_variable(prop: str) -> str:
return prop.upper().replace(".", "__")
@staticmethod
def variable_to_property(var: str) -> str:
return var.lower().replace("__", ".")
@staticmethod
def prop_and_var_from_key(key: str) -> Tuple[str, str]:
prop = None
var = None
if 0 == key.count("."):
# assume variable
var = key.upper()
prop = ConfigurationUtils.variable_to_property(var)
else:
prop = key.lower()
var = ConfigurationUtils.property_to_variable(prop)
return prop, var
| en | 0.674183 | merges dictionary values recurrently Parameters ---------- source : Dict[str, Any] the source dictionary to merge target : Dict[str, Any] the destination dictionary target_property: Optional[str] target dict equivalent property if not in the root of the target dict target_root: Optional[Dict[str, Any]] Raises ------ ValueError if source or target are None TypeError when key types do not match across dictionaries # for every entry in the source dictionary # if value we want to add is a 'dict' then define the entry in the target and drill down recursively # if value we want to add is a 'list' then define the list entry and extend it with the new values # set the equivalent variable # if a scalar/text then just upsert the value # set the equivalent variable finds a property/variable with key format example "a.b.c" in the target dictionary Parameters ---------- key : str the key to search for in the dictionary, in format "a.b.c" target : Dict[str, Any] the dictionary where to search for the key Returns ------- Optional[Dict[str, Any]] a dict in the format {"pointer": target, "key": subkey} or None Raises ------ ValueError if key or target are None # split the key in sub keys and start descending the dict structure from its root # at every level of the structure check if the current sub key is present # if in last iteration and subkey and is part of the structure then wrap up in this dict format # if not last iteration, then resume the search with the remaining subkeys in the child structure found # don't iterate further if we have a solution Function that looks for the required config files in dir_path. Parameters ---------- config_dir : str The absolute path to a directory to look for files. config_file_prefix: str the file name prefix to look for, when appended with suffixes config_file_suffixes: List[str] the file name additional suffixes to use when searching for config files Returns ------- list A list with the correct order of the config files. Raises ------ FileNotFoundError If no config files are present in directory. # assume variable | 2.585934 | 3 |
moai/monads/distribution/prior/spatial_softmax.py | tzole1155/moai | 10 | 6630831 | <filename>moai/monads/distribution/prior/spatial_softmax.py
from moai.monads.utils import flatten_spatial_dims
import torch
__all__ = ["SpatialSoftmax"]
#NOTE: see "FlowCap: 2D Human Pose from Optical Flow" for sharpening
#NOTE: see https://timvieira.github.io/blog/post/2014/02/11/exp-normalize-trick/
class SpatialSoftmax(torch.nn.Module):
def __init__(self,
temperature: float=1.0, # smoothen out the output by premultiplying input
alpha: float=1.0, # sharpen output
normalize: bool=False, # normalize output
):
super(SpatialSoftmax, self).__init__()
self.temp = temperature
self.alpha = alpha
self.normalize = normalize
#TODO: add inplace version / flag
def forward(self, tensor: torch.Tensor) -> torch.Tensor:
reduced = flatten_spatial_dims(tensor)
if self.temp != 1.0:
reduced = reduced * self.temp
if self.alpha != 1.0:
reduced = reduced ** self.alpha
if self.normalize:
reduced = reduced / reduced.flatten(2).sum(-1)
softmaxed = torch.nn.functional.softmax(reduced, dim=-1)
return softmaxed.view_as(tensor) | <filename>moai/monads/distribution/prior/spatial_softmax.py
from moai.monads.utils import flatten_spatial_dims
import torch
__all__ = ["SpatialSoftmax"]
#NOTE: see "FlowCap: 2D Human Pose from Optical Flow" for sharpening
#NOTE: see https://timvieira.github.io/blog/post/2014/02/11/exp-normalize-trick/
class SpatialSoftmax(torch.nn.Module):
def __init__(self,
temperature: float=1.0, # smoothen out the output by premultiplying input
alpha: float=1.0, # sharpen output
normalize: bool=False, # normalize output
):
super(SpatialSoftmax, self).__init__()
self.temp = temperature
self.alpha = alpha
self.normalize = normalize
#TODO: add inplace version / flag
def forward(self, tensor: torch.Tensor) -> torch.Tensor:
reduced = flatten_spatial_dims(tensor)
if self.temp != 1.0:
reduced = reduced * self.temp
if self.alpha != 1.0:
reduced = reduced ** self.alpha
if self.normalize:
reduced = reduced / reduced.flatten(2).sum(-1)
softmaxed = torch.nn.functional.softmax(reduced, dim=-1)
return softmaxed.view_as(tensor) | en | 0.56343 | #NOTE: see "FlowCap: 2D Human Pose from Optical Flow" for sharpening #NOTE: see https://timvieira.github.io/blog/post/2014/02/11/exp-normalize-trick/ # smoothen out the output by premultiplying input # sharpen output # normalize output #TODO: add inplace version / flag | 2.222785 | 2 |
grid/single.py | hposborn/isoclassify | 0 | 6630832 | #%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from .classify_grid import *
import os, ebf
from astropy.io import ascii
import time
#import mwdust
if __name__ == '__main__':
homedir=os.path.expanduser('~/')
model=ebf.read(homedir+'science/models/MIST/mesa.ebf')
model['rho']=np.log10(model['rho'])
# do this to turn off scaling relation corrections
model['fdnu'][:]=1.
model['avs']=np.zeros(len(model['teff']))
model['dis']=np.zeros(len(model['teff']))
#x.addcoords(338.3683920,-9.0227690)
#dustmodel = mwdust.Combined15()
x=obsdata()
x.addspec([5801.,-99.0,-0.07],[80.,0.0,0.1])
x.addseismo([1240.,63.5],[70.,1.5])
x.addjhk([10.369,10.07,10.025],[0.022,0.018,0.019])
x.addgriz([11.776,11.354,11.238,11.178],[0.02,0.02,0.02,0.02])
#x.addplx(2.71/1e3,0.08/1e3)
paras=classify(input=x,model=model,dustmodel=0.,doplot=1)
| #%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from .classify_grid import *
import os, ebf
from astropy.io import ascii
import time
#import mwdust
if __name__ == '__main__':
homedir=os.path.expanduser('~/')
model=ebf.read(homedir+'science/models/MIST/mesa.ebf')
model['rho']=np.log10(model['rho'])
# do this to turn off scaling relation corrections
model['fdnu'][:]=1.
model['avs']=np.zeros(len(model['teff']))
model['dis']=np.zeros(len(model['teff']))
#x.addcoords(338.3683920,-9.0227690)
#dustmodel = mwdust.Combined15()
x=obsdata()
x.addspec([5801.,-99.0,-0.07],[80.,0.0,0.1])
x.addseismo([1240.,63.5],[70.,1.5])
x.addjhk([10.369,10.07,10.025],[0.022,0.018,0.019])
x.addgriz([11.776,11.354,11.238,11.178],[0.02,0.02,0.02,0.02])
#x.addplx(2.71/1e3,0.08/1e3)
paras=classify(input=x,model=model,dustmodel=0.,doplot=1)
| en | 0.273158 | #%matplotlib inline #import mwdust # do this to turn off scaling relation corrections #x.addcoords(338.3683920,-9.0227690) #dustmodel = mwdust.Combined15() #x.addplx(2.71/1e3,0.08/1e3) | 1.930307 | 2 |
modulo 1/d018/angulo.py | rafa-evangelista/PYTHON | 0 | 6630833 | <reponame>rafa-evangelista/PYTHON
import math
ang=float(input('Digite um ângulo: '))
seno=(math.sin(ang))
cosseno=(math.cos(ang))
tangente=(math.tan(ang))
print(' O seno é {}, o cosseno é {} e a tangente é {}'.format (seno, cosseno, tangente))
| import math
ang=float(input('Digite um ângulo: '))
seno=(math.sin(ang))
cosseno=(math.cos(ang))
tangente=(math.tan(ang))
print(' O seno é {}, o cosseno é {} e a tangente é {}'.format (seno, cosseno, tangente)) | none | 1 | 3.705699 | 4 |
|
mwa_trigger/VCS_test.py | MWATelescope/mwa_trigger | 3 | 6630834 | <reponame>MWATelescope/mwa_trigger
#! python
"""
Library containing one or more functions to process incoming VOEvent XML strings. This library will
be imported by a long running process, so you can load large data files, etc, at import time, rather than
inside the processevent() function, to save time.
This library only handles SWIFT VOEvents, other types of event would be handled in a seperate library.
"""
__version__ = "0.3"
__author__ = ["<NAME>", "<NAME>", "<NAME>"]
import logging
import sys
import astropy
from astropy.coordinates import Angle
from astropy.time import Time
import voeventparse
from . import handlers
from . import triggerservice
log = logging.getLogger('voevent.handlers.VCS_test') # Inherit the logging setup from handlers.py
# Settings
FERMI_POBABILITY_THRESHOLD = 50 # Trigger on Fermi events that have most-likely-prob > this number
LONG_SHORT_LIMIT = 2.05 # seconds
PROJECT_ID = 'D0009'
SECURE_KEY = handlers.get_secure_key(PROJECT_ID)
NOTIFY_LIST = ["<EMAIL>", "<EMAIL>"]
EMAIL_TEMPLATE = """
The VCS_test handler triggered an MWA observation for a
Swift GRB at %(trigtime)s UTC.
Details are:
Trigger ID: %(triggerid)s
RA: %(ra)s hours
Dec: %(dec)s deg
Error Rad: %(err)7.3f deg
"""
EMAIL_SUBJECT_TEMPLATE = "VCS_Test Swift handler trigger for %s"
# state storage
xml_cache = {}
class GRB(handlers.TriggerEvent):
"""
Subclass the TriggerEvent class to add a parameter 'short', relevant only for GRB type events.
"""
def __init__(self, event=None):
handlers.TriggerEvent.__init__(self, event=event)
self.short = False # True if short
# Override or add GRB specific methods here if desired.
def processevent(event='', pretend=True):
"""
Called externally by the voevent_handler script when a new VOEvent is received. Return True if
the event was parsed by this handler, False if it was another type of event that should be
examined by a different handler.
:param event: A string containg the XML string in VOEvent format
:param pretend: Boolean, True if we don't want to actually schedule the observations.
:return: Boolean, True if this handler processed this event, False to pass it to another handler function.
"""
if sys.version_info.major == 2:
# event arrives as a unicode string but loads requires a non-unicode string.
v = voeventparse.loads(str(event))
else:
v = voeventparse.loads(event.encode('latin-1'))
log.info("Working on: %s" % v.attrib['ivorn'])
isgrb = is_grb(v)
log.debug("GRB? {0}".format(isgrb))
if isgrb:
handle_grb(v, pretend=pretend)
log.info("Finished.")
return isgrb # True if we're handling this event, False if we're rejecting it
def is_grb(v):
"""
Tests to see if this XML packet is a Gamma Ray Burst event (SWIFT or Fermi alert).
:param v: string in VOEvent XML format
:return: Boolean, True if this event is a GRB.
"""
ivorn = v.attrib['ivorn']
trig_list = ["ivo://nasa.gsfc.gcn/SWIFT#BAT_GRB_Pos", ]
swift_fermi = False
for t in trig_list:
if ivorn.find(t) == 0:
swift_fermi = True
break
if not swift_fermi:
return False
else:
grbid = v.find(".//Param[@name='GRB_Identified']").attrib['value']
if grbid != 'true':
return False
return True
def handle_grb(v, pretend=False):
"""
Handles the actual VOEvent parsing, generating observations if appropriate.
:param v: string in VOEvent XML format
:param pretend: Boolean, True if we don't want to actually schedule the observations.
:return: None
"""
log.debug("processing GRB {0}".format(v.attrib['ivorn']))
# trigger = False
if 'SWIFT' in v.attrib['ivorn']:
grbid = v.find(".//Param[@name='GRB_Identified']").attrib['value']
if grbid != 'true':
log.debug("SWIFT alert but not a GRB")
return
log.debug("SWIFT GRB trigger detected")
this_trig_type = "SWIFT"
# cache the event using the trigger id
trig_id = "SWIFT_" + v.attrib['ivorn'].split('_')[-1].split('-')[0]
if trig_id not in xml_cache:
grb = GRB(event=v)
grb.trigger_id = trig_id
# set trigger mode to vcs for now
grb.vcsmode = True
grb.buffered = True
grb.exptime = 12*60
grb.avoidsun = False
xml_cache[trig_id] = grb
else:
grb = xml_cache[trig_id]
grb.add_event(v)
trig_time = float(v.find(".//Param[@name='Integ_Time']").attrib['value'])
if trig_time < LONG_SHORT_LIMIT:
grb.debug("Probably a short GRB: t={0} < 2".format(trig_time))
grb.short = True
trigger = True
else:
grb.debug("Probably a long GRB: t={0} > 2".format(trig_time))
grb.short = False
trigger = True
else:
log.debug("Not a SWIFT GRB.")
log.debug("Not Triggering")
return
if not trigger:
grb.debug("Not Triggering")
return
# get current position
ra, dec, err = handlers.get_position_info(v)
# add it to the list of positions
grb.add_pos((ra, dec, err))
grb.debug("RA {0}, Dec {1}, err {2}".format(ra, dec, err))
req_time_min = 30
# look at the schedule
obslist = triggerservice.obslist(obstime=1800)
if obslist is not None and len(obslist) > 0:
grb.debug("Currently observing:")
grb.debug(str(obslist))
# are we currently observing *this* GRB?
obs = str(obslist[0][1]) # in case the obslist is returning unicode strings
grb.debug("obs {0}, trig {1}".format(obs, trig_id))
# Same GRB trigger from same telescope
if obs == trig_id:
if "SWIFT" in trig_id:
if obs in xml_cache:
prev_short = xml_cache[obs].short
else:
prev_short = False # best bet if we don't know
grb.info("Curently observing a SWIFT trigger")
if grb.short and not prev_short:
grb.info("Interrupting with a short SWIFT GRB")
else:
grb.info("Not interrupting previous obs")
return
else:
grb.info("Not interrupting previous obs")
return
else:
grb.info("Not currently observing any GRBs")
else:
grb.debug("Current schedule empty")
emaildict = {'triggerid': grb.trigger_id,
'trigtime': Time.now().iso,
'ra': Angle(grb.ra[-1], unit=astropy.units.deg).to_string(unit=astropy.units.hour, sep=':'),
'dec': Angle(grb.dec[-1], unit=astropy.units.deg).to_string(unit=astropy.units.deg, sep=':'),
'err': grb.err[-1]}
email_text = EMAIL_TEMPLATE % emaildict
email_subject = EMAIL_SUBJECT_TEMPLATE % grb.trigger_id
# Do the trigger
grb.trigger_observation(ttype=this_trig_type,
obsname=trig_id+"_test", # add test to file name so we don't archive these obs.
time_min=12,
pretend=pretend,
project_id=PROJECT_ID,
secure_key=SECURE_KEY,
email_tolist=NOTIFY_LIST,
email_text=email_text,
email_subject=email_subject)
| #! python
"""
Library containing one or more functions to process incoming VOEvent XML strings. This library will
be imported by a long running process, so you can load large data files, etc, at import time, rather than
inside the processevent() function, to save time.
This library only handles SWIFT VOEvents, other types of event would be handled in a seperate library.
"""
__version__ = "0.3"
__author__ = ["<NAME>", "<NAME>", "<NAME>"]
import logging
import sys
import astropy
from astropy.coordinates import Angle
from astropy.time import Time
import voeventparse
from . import handlers
from . import triggerservice
log = logging.getLogger('voevent.handlers.VCS_test') # Inherit the logging setup from handlers.py
# Settings
FERMI_POBABILITY_THRESHOLD = 50 # Trigger on Fermi events that have most-likely-prob > this number
LONG_SHORT_LIMIT = 2.05 # seconds
PROJECT_ID = 'D0009'
SECURE_KEY = handlers.get_secure_key(PROJECT_ID)
NOTIFY_LIST = ["<EMAIL>", "<EMAIL>"]
EMAIL_TEMPLATE = """
The VCS_test handler triggered an MWA observation for a
Swift GRB at %(trigtime)s UTC.
Details are:
Trigger ID: %(triggerid)s
RA: %(ra)s hours
Dec: %(dec)s deg
Error Rad: %(err)7.3f deg
"""
EMAIL_SUBJECT_TEMPLATE = "VCS_Test Swift handler trigger for %s"
# state storage
xml_cache = {}
class GRB(handlers.TriggerEvent):
"""
Subclass the TriggerEvent class to add a parameter 'short', relevant only for GRB type events.
"""
def __init__(self, event=None):
handlers.TriggerEvent.__init__(self, event=event)
self.short = False # True if short
# Override or add GRB specific methods here if desired.
def processevent(event='', pretend=True):
"""
Called externally by the voevent_handler script when a new VOEvent is received. Return True if
the event was parsed by this handler, False if it was another type of event that should be
examined by a different handler.
:param event: A string containg the XML string in VOEvent format
:param pretend: Boolean, True if we don't want to actually schedule the observations.
:return: Boolean, True if this handler processed this event, False to pass it to another handler function.
"""
if sys.version_info.major == 2:
# event arrives as a unicode string but loads requires a non-unicode string.
v = voeventparse.loads(str(event))
else:
v = voeventparse.loads(event.encode('latin-1'))
log.info("Working on: %s" % v.attrib['ivorn'])
isgrb = is_grb(v)
log.debug("GRB? {0}".format(isgrb))
if isgrb:
handle_grb(v, pretend=pretend)
log.info("Finished.")
return isgrb # True if we're handling this event, False if we're rejecting it
def is_grb(v):
"""
Tests to see if this XML packet is a Gamma Ray Burst event (SWIFT or Fermi alert).
:param v: string in VOEvent XML format
:return: Boolean, True if this event is a GRB.
"""
ivorn = v.attrib['ivorn']
trig_list = ["ivo://nasa.gsfc.gcn/SWIFT#BAT_GRB_Pos", ]
swift_fermi = False
for t in trig_list:
if ivorn.find(t) == 0:
swift_fermi = True
break
if not swift_fermi:
return False
else:
grbid = v.find(".//Param[@name='GRB_Identified']").attrib['value']
if grbid != 'true':
return False
return True
def handle_grb(v, pretend=False):
"""
Handles the actual VOEvent parsing, generating observations if appropriate.
:param v: string in VOEvent XML format
:param pretend: Boolean, True if we don't want to actually schedule the observations.
:return: None
"""
log.debug("processing GRB {0}".format(v.attrib['ivorn']))
# trigger = False
if 'SWIFT' in v.attrib['ivorn']:
grbid = v.find(".//Param[@name='GRB_Identified']").attrib['value']
if grbid != 'true':
log.debug("SWIFT alert but not a GRB")
return
log.debug("SWIFT GRB trigger detected")
this_trig_type = "SWIFT"
# cache the event using the trigger id
trig_id = "SWIFT_" + v.attrib['ivorn'].split('_')[-1].split('-')[0]
if trig_id not in xml_cache:
grb = GRB(event=v)
grb.trigger_id = trig_id
# set trigger mode to vcs for now
grb.vcsmode = True
grb.buffered = True
grb.exptime = 12*60
grb.avoidsun = False
xml_cache[trig_id] = grb
else:
grb = xml_cache[trig_id]
grb.add_event(v)
trig_time = float(v.find(".//Param[@name='Integ_Time']").attrib['value'])
if trig_time < LONG_SHORT_LIMIT:
grb.debug("Probably a short GRB: t={0} < 2".format(trig_time))
grb.short = True
trigger = True
else:
grb.debug("Probably a long GRB: t={0} > 2".format(trig_time))
grb.short = False
trigger = True
else:
log.debug("Not a SWIFT GRB.")
log.debug("Not Triggering")
return
if not trigger:
grb.debug("Not Triggering")
return
# get current position
ra, dec, err = handlers.get_position_info(v)
# add it to the list of positions
grb.add_pos((ra, dec, err))
grb.debug("RA {0}, Dec {1}, err {2}".format(ra, dec, err))
req_time_min = 30
# look at the schedule
obslist = triggerservice.obslist(obstime=1800)
if obslist is not None and len(obslist) > 0:
grb.debug("Currently observing:")
grb.debug(str(obslist))
# are we currently observing *this* GRB?
obs = str(obslist[0][1]) # in case the obslist is returning unicode strings
grb.debug("obs {0}, trig {1}".format(obs, trig_id))
# Same GRB trigger from same telescope
if obs == trig_id:
if "SWIFT" in trig_id:
if obs in xml_cache:
prev_short = xml_cache[obs].short
else:
prev_short = False # best bet if we don't know
grb.info("Curently observing a SWIFT trigger")
if grb.short and not prev_short:
grb.info("Interrupting with a short SWIFT GRB")
else:
grb.info("Not interrupting previous obs")
return
else:
grb.info("Not interrupting previous obs")
return
else:
grb.info("Not currently observing any GRBs")
else:
grb.debug("Current schedule empty")
emaildict = {'triggerid': grb.trigger_id,
'trigtime': Time.now().iso,
'ra': Angle(grb.ra[-1], unit=astropy.units.deg).to_string(unit=astropy.units.hour, sep=':'),
'dec': Angle(grb.dec[-1], unit=astropy.units.deg).to_string(unit=astropy.units.deg, sep=':'),
'err': grb.err[-1]}
email_text = EMAIL_TEMPLATE % emaildict
email_subject = EMAIL_SUBJECT_TEMPLATE % grb.trigger_id
# Do the trigger
grb.trigger_observation(ttype=this_trig_type,
obsname=trig_id+"_test", # add test to file name so we don't archive these obs.
time_min=12,
pretend=pretend,
project_id=PROJECT_ID,
secure_key=SECURE_KEY,
email_tolist=NOTIFY_LIST,
email_text=email_text,
email_subject=email_subject) | en | 0.813603 | #! python Library containing one or more functions to process incoming VOEvent XML strings. This library will be imported by a long running process, so you can load large data files, etc, at import time, rather than inside the processevent() function, to save time. This library only handles SWIFT VOEvents, other types of event would be handled in a seperate library. # Inherit the logging setup from handlers.py # Settings # Trigger on Fermi events that have most-likely-prob > this number # seconds The VCS_test handler triggered an MWA observation for a Swift GRB at %(trigtime)s UTC. Details are: Trigger ID: %(triggerid)s RA: %(ra)s hours Dec: %(dec)s deg Error Rad: %(err)7.3f deg # state storage Subclass the TriggerEvent class to add a parameter 'short', relevant only for GRB type events. # True if short # Override or add GRB specific methods here if desired. Called externally by the voevent_handler script when a new VOEvent is received. Return True if the event was parsed by this handler, False if it was another type of event that should be examined by a different handler. :param event: A string containg the XML string in VOEvent format :param pretend: Boolean, True if we don't want to actually schedule the observations. :return: Boolean, True if this handler processed this event, False to pass it to another handler function. # event arrives as a unicode string but loads requires a non-unicode string. # True if we're handling this event, False if we're rejecting it Tests to see if this XML packet is a Gamma Ray Burst event (SWIFT or Fermi alert). :param v: string in VOEvent XML format :return: Boolean, True if this event is a GRB. #BAT_GRB_Pos", ] Handles the actual VOEvent parsing, generating observations if appropriate. :param v: string in VOEvent XML format :param pretend: Boolean, True if we don't want to actually schedule the observations. :return: None # trigger = False # cache the event using the trigger id # set trigger mode to vcs for now # get current position # add it to the list of positions # look at the schedule # are we currently observing *this* GRB? # in case the obslist is returning unicode strings # Same GRB trigger from same telescope # best bet if we don't know # Do the trigger # add test to file name so we don't archive these obs. | 2.38329 | 2 |
tests/pipelines/test_unflatten.py | bidhive/kingfisher-collect | 0 | 6630835 | import pytest
from flattentool.input import BadXLSXZipFile
from openpyxl import Workbook
from openpyxl.writer.excel import save_virtual_workbook
from kingfisher_scrapy.items import File
from kingfisher_scrapy.pipelines import Unflatten
from tests import spider_with_crawler
def test_process_item_csv():
spider = spider_with_crawler(unflatten=True)
pipeline = Unflatten()
item = File({
'file_name': 'test.csv',
'data': b'data',
'data_type': 'release_package',
'url': 'http://test.com/test.csv',
})
assert pipeline.process_item(item, spider) == item
def test_process_item_xlsx():
spider = spider_with_crawler(unflatten=True)
pipeline = Unflatten()
item = File({
'file_name': 'test.xlsx',
'data': save_virtual_workbook(Workbook()),
'data_type': 'release_package',
'url': 'http://test.com/test.xlsx',
})
assert pipeline.process_item(item, spider) == item
def test_process_item_extension_error():
spider = spider_with_crawler(unflatten=True)
pipeline = Unflatten()
item = File({
'file_name': 'file',
'data': b'data',
'data_type': 'release_package',
'url': 'http://test.com/file',
})
with pytest.raises(NotImplementedError):
pipeline.process_item(item, spider)
def test_process_item_xlsx_error():
spider = spider_with_crawler(unflatten=True)
pipeline = Unflatten()
item = File({
'file_name': 'test.xlsx',
'data': b'data',
'data_type': 'release_package',
'url': 'http://test.com/test.xlsx',
})
with pytest.raises(BadXLSXZipFile):
pipeline.process_item(item, spider)
| import pytest
from flattentool.input import BadXLSXZipFile
from openpyxl import Workbook
from openpyxl.writer.excel import save_virtual_workbook
from kingfisher_scrapy.items import File
from kingfisher_scrapy.pipelines import Unflatten
from tests import spider_with_crawler
def test_process_item_csv():
spider = spider_with_crawler(unflatten=True)
pipeline = Unflatten()
item = File({
'file_name': 'test.csv',
'data': b'data',
'data_type': 'release_package',
'url': 'http://test.com/test.csv',
})
assert pipeline.process_item(item, spider) == item
def test_process_item_xlsx():
spider = spider_with_crawler(unflatten=True)
pipeline = Unflatten()
item = File({
'file_name': 'test.xlsx',
'data': save_virtual_workbook(Workbook()),
'data_type': 'release_package',
'url': 'http://test.com/test.xlsx',
})
assert pipeline.process_item(item, spider) == item
def test_process_item_extension_error():
spider = spider_with_crawler(unflatten=True)
pipeline = Unflatten()
item = File({
'file_name': 'file',
'data': b'data',
'data_type': 'release_package',
'url': 'http://test.com/file',
})
with pytest.raises(NotImplementedError):
pipeline.process_item(item, spider)
def test_process_item_xlsx_error():
spider = spider_with_crawler(unflatten=True)
pipeline = Unflatten()
item = File({
'file_name': 'test.xlsx',
'data': b'data',
'data_type': 'release_package',
'url': 'http://test.com/test.xlsx',
})
with pytest.raises(BadXLSXZipFile):
pipeline.process_item(item, spider)
| none | 1 | 2.270844 | 2 |
|
Pyrado/scripts/simulation/sim_policy_live_rand_env.py | jacarvalho/SimuRLacra | 0 | 6630836 | <filename>Pyrado/scripts/simulation/sim_policy_live_rand_env.py
"""
Simulate (with animation) a rollout in a live perturbed environment.
"""
import pyrado
from pyrado.domain_randomization.domain_parameter import UniformDomainParam
from pyrado.domain_randomization.utils import print_domain_params, get_default_randomizer
from pyrado.environment_wrappers.action_delay import ActDelayWrapper
from pyrado.environment_wrappers.domain_randomization import DomainRandWrapperLive
from pyrado.logger.experiment import ask_for_experiment
from pyrado.sampling.rollout import rollout, after_rollout_query
from pyrado.utils.data_types import RenderMode
from pyrado.utils.experiments import load_experiment
from pyrado.utils.input_output import print_cbt
from pyrado.utils.argparser import get_argparser
if __name__ == '__main__':
# Parse command line arguments
args = get_argparser().parse_args()
# Get the experiment's directory to load from
ex_dir = ask_for_experiment()
# Get the simulation environment
env, policy, kwout = load_experiment(ex_dir)
# Override the time step size if specified
if args.dt is not None:
env.dt = args.dt
if not isinstance(env, DomainRandWrapperLive):
# Add default domain randomization wrapper with action delay
randomizer = get_default_randomizer(env)
env = ActDelayWrapper(env)
randomizer.add_domain_params(
UniformDomainParam(name='act_delay', mean=5, halfspan=5, clip_lo=0, roundint=True))
env = DomainRandWrapperLive(env, randomizer)
print_cbt('Using default randomizer with additional action delay.', 'c')
else:
print_cbt('Using loaded randomizer.', 'c')
# Simulate
done, state, param = False, None, None
while not done:
ro = rollout(env, policy, render_mode=RenderMode(text=args.verbose, video=True), eval=True,
reset_kwargs=dict(domain_param=param, init_state=state)) # calls env.reset()
print_domain_params(env.domain_param)
print_cbt(f'Return: {ro.undiscounted_return()}', 'g', bright=True)
done, state, param = after_rollout_query(env, policy, ro)
pyrado.close_vpython()
| <filename>Pyrado/scripts/simulation/sim_policy_live_rand_env.py
"""
Simulate (with animation) a rollout in a live perturbed environment.
"""
import pyrado
from pyrado.domain_randomization.domain_parameter import UniformDomainParam
from pyrado.domain_randomization.utils import print_domain_params, get_default_randomizer
from pyrado.environment_wrappers.action_delay import ActDelayWrapper
from pyrado.environment_wrappers.domain_randomization import DomainRandWrapperLive
from pyrado.logger.experiment import ask_for_experiment
from pyrado.sampling.rollout import rollout, after_rollout_query
from pyrado.utils.data_types import RenderMode
from pyrado.utils.experiments import load_experiment
from pyrado.utils.input_output import print_cbt
from pyrado.utils.argparser import get_argparser
if __name__ == '__main__':
# Parse command line arguments
args = get_argparser().parse_args()
# Get the experiment's directory to load from
ex_dir = ask_for_experiment()
# Get the simulation environment
env, policy, kwout = load_experiment(ex_dir)
# Override the time step size if specified
if args.dt is not None:
env.dt = args.dt
if not isinstance(env, DomainRandWrapperLive):
# Add default domain randomization wrapper with action delay
randomizer = get_default_randomizer(env)
env = ActDelayWrapper(env)
randomizer.add_domain_params(
UniformDomainParam(name='act_delay', mean=5, halfspan=5, clip_lo=0, roundint=True))
env = DomainRandWrapperLive(env, randomizer)
print_cbt('Using default randomizer with additional action delay.', 'c')
else:
print_cbt('Using loaded randomizer.', 'c')
# Simulate
done, state, param = False, None, None
while not done:
ro = rollout(env, policy, render_mode=RenderMode(text=args.verbose, video=True), eval=True,
reset_kwargs=dict(domain_param=param, init_state=state)) # calls env.reset()
print_domain_params(env.domain_param)
print_cbt(f'Return: {ro.undiscounted_return()}', 'g', bright=True)
done, state, param = after_rollout_query(env, policy, ro)
pyrado.close_vpython()
| en | 0.602717 | Simulate (with animation) a rollout in a live perturbed environment. # Parse command line arguments # Get the experiment's directory to load from # Get the simulation environment # Override the time step size if specified # Add default domain randomization wrapper with action delay # Simulate # calls env.reset() | 2.179733 | 2 |
blaseball_mike/models/statsheet.py | rgallo/blaseball-mike | 14 | 6630837 | <filename>blaseball_mike/models/statsheet.py
from collections import OrderedDict
from .base import Base
from .. import database
class PlayerStatsheet(Base):
@classmethod
def _get_fields(cls):
id_ = "e80b9497-c604-456d-9bee-c860d4759b14"
p = cls.load(id_).get(id_)
return [cls._from_api_conversion(x) for x in p.fields]
@classmethod
def load(cls, ids):
stats = database.get_player_statsheets(ids)
stats_dict = OrderedDict()
for k, v in stats.items():
stats_dict[k] = cls(v)
return stats_dict
class TeamStatsheet(Base):
@classmethod
def _get_fields(cls):
id_ = "07b2b5bf-9eeb-4eff-9be9-d0f66c687f76"
p = cls.load(id_).get(id_)
return [cls._from_api_conversion(x) for x in p.fields]
@classmethod
def load(cls, ids):
stats = database.get_team_statsheets(ids)
stats_dict = OrderedDict()
for k, v in stats.items():
stats_dict[k] = cls(v)
return stats_dict
@Base.lazy_load("_player_stat_ids", cache_name="_player_stats", default_value=list())
def player_stats(self):
return list(PlayerStatsheet.load(self._player_stat_ids).values())
class GameStatsheet(Base):
@classmethod
def _get_fields(cls):
id_ = "f852abec-b80e-40e2-b213-f0368d4e7f57"
p = cls.load(id_).get(id_)
return [cls._from_api_conversion(x) for x in p.fields]
@classmethod
def load(cls, ids):
stats = database.get_game_statsheets(ids)
stats_dict = OrderedDict()
for k, v in stats.items():
stats_dict[k] = cls(v)
return stats_dict
@classmethod
def load_by_day(cls, season, day):
from .game import Game
games = Game.load_by_day(season, day)
return {k: g.statsheet for k, g in games.items()}
def team_stats(self):
if getattr(self, '_team_stats', None):
return self._team_stats
self._team_stats = TeamStatsheet.load([
self._home_team_stats_id,
self._away_team_stats_id,
])
return self._team_stats
@property
def away_team_stats(self):
return self.team_stats()[self._away_team_stats_id]
@away_team_stats.setter
def away_team_stats(self, value):
self._away_team_stats_id = value
self._team_stats = None
self.key_transform_lookup["away_team_stats"] = "_away_team_stats_id"
@property
def home_team_stats(self):
return self.team_stats()[self._home_team_stats_id]
@home_team_stats.setter
def home_team_stats(self, value):
self._home_team_stats_id = value
self._team_stats = None
self.key_transform_lookup["home_team_stats"] = "_home_team_stats_id"
class SeasonStatsheet(Base):
@classmethod
def _get_fields(cls):
id_ = "64392ad5-e14c-42c0-825c-c85da29addaa"
p = cls.load(id_).get(id_)
return [cls._from_api_conversion(x) for x in p.fields]
@classmethod
def load(cls, ids):
stats = database.get_season_statsheets(ids)
stats_dict = OrderedDict()
for k, v in stats.items():
stats_dict[k] = cls(v)
return stats_dict
@classmethod
def load_by_season(cls, season):
"""Season is 1 indexed."""
from .season import Season
season = Season.load(season)
return season.stats
@Base.lazy_load("_team_stat_ids", cache_name="_team_stats", default_value=list())
def team_stats(self):
return list(TeamStatsheet.load(self._team_stat_ids).values())
| <filename>blaseball_mike/models/statsheet.py
from collections import OrderedDict
from .base import Base
from .. import database
class PlayerStatsheet(Base):
@classmethod
def _get_fields(cls):
id_ = "e80b9497-c604-456d-9bee-c860d4759b14"
p = cls.load(id_).get(id_)
return [cls._from_api_conversion(x) for x in p.fields]
@classmethod
def load(cls, ids):
stats = database.get_player_statsheets(ids)
stats_dict = OrderedDict()
for k, v in stats.items():
stats_dict[k] = cls(v)
return stats_dict
class TeamStatsheet(Base):
@classmethod
def _get_fields(cls):
id_ = "07b2b5bf-9eeb-4eff-9be9-d0f66c687f76"
p = cls.load(id_).get(id_)
return [cls._from_api_conversion(x) for x in p.fields]
@classmethod
def load(cls, ids):
stats = database.get_team_statsheets(ids)
stats_dict = OrderedDict()
for k, v in stats.items():
stats_dict[k] = cls(v)
return stats_dict
@Base.lazy_load("_player_stat_ids", cache_name="_player_stats", default_value=list())
def player_stats(self):
return list(PlayerStatsheet.load(self._player_stat_ids).values())
class GameStatsheet(Base):
@classmethod
def _get_fields(cls):
id_ = "f852abec-b80e-40e2-b213-f0368d4e7f57"
p = cls.load(id_).get(id_)
return [cls._from_api_conversion(x) for x in p.fields]
@classmethod
def load(cls, ids):
stats = database.get_game_statsheets(ids)
stats_dict = OrderedDict()
for k, v in stats.items():
stats_dict[k] = cls(v)
return stats_dict
@classmethod
def load_by_day(cls, season, day):
from .game import Game
games = Game.load_by_day(season, day)
return {k: g.statsheet for k, g in games.items()}
def team_stats(self):
if getattr(self, '_team_stats', None):
return self._team_stats
self._team_stats = TeamStatsheet.load([
self._home_team_stats_id,
self._away_team_stats_id,
])
return self._team_stats
@property
def away_team_stats(self):
return self.team_stats()[self._away_team_stats_id]
@away_team_stats.setter
def away_team_stats(self, value):
self._away_team_stats_id = value
self._team_stats = None
self.key_transform_lookup["away_team_stats"] = "_away_team_stats_id"
@property
def home_team_stats(self):
return self.team_stats()[self._home_team_stats_id]
@home_team_stats.setter
def home_team_stats(self, value):
self._home_team_stats_id = value
self._team_stats = None
self.key_transform_lookup["home_team_stats"] = "_home_team_stats_id"
class SeasonStatsheet(Base):
@classmethod
def _get_fields(cls):
id_ = "64392ad5-e14c-42c0-825c-c85da29addaa"
p = cls.load(id_).get(id_)
return [cls._from_api_conversion(x) for x in p.fields]
@classmethod
def load(cls, ids):
stats = database.get_season_statsheets(ids)
stats_dict = OrderedDict()
for k, v in stats.items():
stats_dict[k] = cls(v)
return stats_dict
@classmethod
def load_by_season(cls, season):
"""Season is 1 indexed."""
from .season import Season
season = Season.load(season)
return season.stats
@Base.lazy_load("_team_stat_ids", cache_name="_team_stats", default_value=list())
def team_stats(self):
return list(TeamStatsheet.load(self._team_stat_ids).values())
| en | 0.990269 | Season is 1 indexed. | 2.49964 | 2 |
poky/meta/lib/oeqa/controllers/masterimage.py | buildlinux/unityos | 0 | 6630838 | # Copyright (C) 2014 Intel Corporation
#
# Released under the MIT license (see COPYING.MIT)
# This module adds support to testimage.bbclass to deploy images and run
# tests using a "master image" - this is a "known good" image that is
# installed onto the device as part of initial setup and will be booted into
# with no interaction; we can then use it to deploy the image to be tested
# to a second partition before running the tests.
#
# For an example master image, see core-image-testmaster
# (meta/recipes-extended/images/core-image-testmaster.bb)
import os
import bb
import traceback
import time
import subprocess
import oeqa.targetcontrol
import oeqa.utils.sshcontrol as sshcontrol
import oeqa.utils.commands as commands
from oeqa.utils import CommandError
from abc import ABCMeta, abstractmethod
class MasterImageHardwareTarget(oeqa.targetcontrol.BaseTarget, metaclass=ABCMeta):
supported_image_fstypes = ['tar.gz', 'tar.bz2']
def __init__(self, d):
super(MasterImageHardwareTarget, self).__init__(d)
# target ip
addr = d.getVar("TEST_TARGET_IP") or bb.fatal('Please set TEST_TARGET_IP with the IP address of the machine you want to run the tests on.')
self.ip = addr.split(":")[0]
try:
self.port = addr.split(":")[1]
except IndexError:
self.port = None
bb.note("Target IP: %s" % self.ip)
self.server_ip = d.getVar("TEST_SERVER_IP")
if not self.server_ip:
try:
self.server_ip = subprocess.check_output(['ip', 'route', 'get', self.ip ]).split("\n")[0].split()[-1]
except Exception as e:
bb.fatal("Failed to determine the host IP address (alternatively you can set TEST_SERVER_IP with the IP address of this machine): %s" % e)
bb.note("Server IP: %s" % self.server_ip)
# test rootfs + kernel
self.image_fstype = self.get_image_fstype(d)
self.rootfs = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), d.getVar("IMAGE_LINK_NAME") + '.' + self.image_fstype)
self.kernel = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), d.getVar("KERNEL_IMAGETYPE", False) + '-' + d.getVar('MACHINE', False) + '.bin')
if not os.path.isfile(self.rootfs):
# we could've checked that IMAGE_FSTYPES contains tar.gz but the config for running testimage might not be
# the same as the config with which the image was build, ie
# you bitbake core-image-sato with IMAGE_FSTYPES += "tar.gz"
# and your autobuilder overwrites the config, adds the test bits and runs bitbake core-image-sato -c testimage
bb.fatal("No rootfs found. Did you build the image ?\nIf yes, did you build it with IMAGE_FSTYPES += \"tar.gz\" ? \
\nExpected path: %s" % self.rootfs)
if not os.path.isfile(self.kernel):
bb.fatal("No kernel found. Expected path: %s" % self.kernel)
# master ssh connection
self.master = None
# if the user knows what they are doing, then by all means...
self.user_cmds = d.getVar("TEST_DEPLOY_CMDS")
self.deploy_cmds = None
# this is the name of the command that controls the power for a board
# e.g: TEST_POWERCONTROL_CMD = "/home/user/myscripts/powercontrol.py ${MACHINE} what-ever-other-args-the-script-wants"
# the command should take as the last argument "off" and "on" and "cycle" (off, on)
self.powercontrol_cmd = d.getVar("TEST_POWERCONTROL_CMD") or None
self.powercontrol_args = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS", False) or ""
self.serialcontrol_cmd = d.getVar("TEST_SERIALCONTROL_CMD") or None
self.serialcontrol_args = d.getVar("TEST_SERIALCONTROL_EXTRA_ARGS", False) or ""
self.origenv = os.environ
if self.powercontrol_cmd or self.serialcontrol_cmd:
# the external script for controlling power might use ssh
# ssh + keys means we need the original user env
bborigenv = d.getVar("BB_ORIGENV", False) or {}
for key in bborigenv:
val = bborigenv.getVar(key)
if val is not None:
self.origenv[key] = str(val)
if self.powercontrol_cmd:
if self.powercontrol_args:
self.powercontrol_cmd = "%s %s" % (self.powercontrol_cmd, self.powercontrol_args)
if self.serialcontrol_cmd:
if self.serialcontrol_args:
self.serialcontrol_cmd = "%s %s" % (self.serialcontrol_cmd, self.serialcontrol_args)
def power_ctl(self, msg):
if self.powercontrol_cmd:
cmd = "%s %s" % (self.powercontrol_cmd, msg)
try:
commands.runCmd(cmd, assert_error=False, preexec_fn=os.setsid, env=self.origenv)
except CommandError as e:
bb.fatal(str(e))
def power_cycle(self, conn):
if self.powercontrol_cmd:
# be nice, don't just cut power
conn.run("shutdown -h now")
time.sleep(10)
self.power_ctl("cycle")
else:
status, output = conn.run("reboot")
if status != 0:
bb.error("Failed rebooting target and no power control command defined. You need to manually reset the device.\n%s" % output)
def _wait_until_booted(self):
''' Waits until the target device has booted (if we have just power cycled it) '''
# Subclasses with better methods of determining boot can override this
time.sleep(120)
def deploy(self):
# base class just sets the ssh log file for us
super(MasterImageHardwareTarget, self).deploy()
self.master = sshcontrol.SSHControl(ip=self.ip, logfile=self.sshlog, timeout=600, port=self.port)
status, output = self.master.run("cat /etc/masterimage")
if status != 0:
# We're not booted into the master image, so try rebooting
bb.plain("%s - booting into the master image" % self.pn)
self.power_ctl("cycle")
self._wait_until_booted()
bb.plain("%s - deploying image on target" % self.pn)
status, output = self.master.run("cat /etc/masterimage")
if status != 0:
bb.fatal("No ssh connectivity or target isn't running a master image.\n%s" % output)
if self.user_cmds:
self.deploy_cmds = self.user_cmds.split("\n")
try:
self._deploy()
except Exception as e:
bb.fatal("Failed deploying test image: %s" % e)
@abstractmethod
def _deploy(self):
pass
def start(self, params=None):
bb.plain("%s - boot test image on target" % self.pn)
self._start()
# set the ssh object for the target/test image
self.connection = sshcontrol.SSHControl(self.ip, logfile=self.sshlog, port=self.port)
bb.plain("%s - start running tests" % self.pn)
@abstractmethod
def _start(self):
pass
def stop(self):
bb.plain("%s - reboot/powercycle target" % self.pn)
self.power_cycle(self.connection)
class SystemdbootTarget(MasterImageHardwareTarget):
def __init__(self, d):
super(SystemdbootTarget, self).__init__(d)
# this the value we need to set in the LoaderEntryOneShot EFI variable
# so the system boots the 'test' bootloader label and not the default
# The first four bytes are EFI bits, and the rest is an utf-16le string
# (EFI vars values need to be utf-16)
# $ echo -en "test\0" | iconv -f ascii -t utf-16le | hexdump -C
# 00000000 74 00 65 00 73 00 74 00 00 00 |t.e.s.t...|
self.efivarvalue = r'\x07\x00\x00\x00\x74\x00\x65\x00\x73\x00\x74\x00\x00\x00'
self.deploy_cmds = [
'mount -L boot /boot',
'mkdir -p /mnt/testrootfs',
'mount -L testrootfs /mnt/testrootfs',
'modprobe efivarfs',
'mount -t efivarfs efivarfs /sys/firmware/efi/efivars',
'cp ~/test-kernel /boot',
'rm -rf /mnt/testrootfs/*',
'tar xvf ~/test-rootfs.%s -C /mnt/testrootfs' % self.image_fstype,
'printf "%s" > /sys/firmware/efi/efivars/LoaderEntryOneShot-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f' % self.efivarvalue
]
def _deploy(self):
# make sure these aren't mounted
self.master.run("umount /boot; umount /mnt/testrootfs; umount /sys/firmware/efi/efivars;")
# from now on, every deploy cmd should return 0
# else an exception will be thrown by sshcontrol
self.master.ignore_status = False
self.master.copy_to(self.rootfs, "~/test-rootfs." + self.image_fstype)
self.master.copy_to(self.kernel, "~/test-kernel")
for cmd in self.deploy_cmds:
self.master.run(cmd)
def _start(self, params=None):
self.power_cycle(self.master)
# there are better ways than a timeout but this should work for now
time.sleep(120)
class SystemdbootTarget(MasterImageHardwareTarget):
def __init__(self, d):
super(SystemdbootTarget, self).__init__(d)
# this the value we need to set in the LoaderEntryOneShot EFI variable
# so the system boots the 'test' bootloader label and not the default
# The first four bytes are EFI bits, and the rest is an utf-16le string
# (EFI vars values need to be utf-16)
# $ echo -en "test\0" | iconv -f ascii -t utf-16le | hexdump -C
# 00000000 74 00 65 00 73 00 74 00 00 00 |t.e.s.t...|
self.efivarvalue = r'\x07\x00\x00\x00\x74\x00\x65\x00\x73\x00\x74\x00\x00\x00'
self.deploy_cmds = [
'mount -L boot /boot',
'mkdir -p /mnt/testrootfs',
'mount -L testrootfs /mnt/testrootfs',
'modprobe efivarfs',
'mount -t efivarfs efivarfs /sys/firmware/efi/efivars',
'cp ~/test-kernel /boot',
'rm -rf /mnt/testrootfs/*',
'tar xvf ~/test-rootfs.%s -C /mnt/testrootfs' % self.image_fstype,
'printf "%s" > /sys/firmware/efi/efivars/LoaderEntryOneShot-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f' % self.efivarvalue
]
def _deploy(self):
# make sure these aren't mounted
self.master.run("umount /boot; umount /mnt/testrootfs; umount /sys/firmware/efi/efivars;")
# from now on, every deploy cmd should return 0
# else an exception will be thrown by sshcontrol
self.master.ignore_status = False
self.master.copy_to(self.rootfs, "~/test-rootfs." + self.image_fstype)
self.master.copy_to(self.kernel, "~/test-kernel")
for cmd in self.deploy_cmds:
self.master.run(cmd)
def _start(self, params=None):
self.power_cycle(self.master)
# there are better ways than a timeout but this should work for now
time.sleep(120)
| # Copyright (C) 2014 Intel Corporation
#
# Released under the MIT license (see COPYING.MIT)
# This module adds support to testimage.bbclass to deploy images and run
# tests using a "master image" - this is a "known good" image that is
# installed onto the device as part of initial setup and will be booted into
# with no interaction; we can then use it to deploy the image to be tested
# to a second partition before running the tests.
#
# For an example master image, see core-image-testmaster
# (meta/recipes-extended/images/core-image-testmaster.bb)
import os
import bb
import traceback
import time
import subprocess
import oeqa.targetcontrol
import oeqa.utils.sshcontrol as sshcontrol
import oeqa.utils.commands as commands
from oeqa.utils import CommandError
from abc import ABCMeta, abstractmethod
class MasterImageHardwareTarget(oeqa.targetcontrol.BaseTarget, metaclass=ABCMeta):
supported_image_fstypes = ['tar.gz', 'tar.bz2']
def __init__(self, d):
super(MasterImageHardwareTarget, self).__init__(d)
# target ip
addr = d.getVar("TEST_TARGET_IP") or bb.fatal('Please set TEST_TARGET_IP with the IP address of the machine you want to run the tests on.')
self.ip = addr.split(":")[0]
try:
self.port = addr.split(":")[1]
except IndexError:
self.port = None
bb.note("Target IP: %s" % self.ip)
self.server_ip = d.getVar("TEST_SERVER_IP")
if not self.server_ip:
try:
self.server_ip = subprocess.check_output(['ip', 'route', 'get', self.ip ]).split("\n")[0].split()[-1]
except Exception as e:
bb.fatal("Failed to determine the host IP address (alternatively you can set TEST_SERVER_IP with the IP address of this machine): %s" % e)
bb.note("Server IP: %s" % self.server_ip)
# test rootfs + kernel
self.image_fstype = self.get_image_fstype(d)
self.rootfs = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), d.getVar("IMAGE_LINK_NAME") + '.' + self.image_fstype)
self.kernel = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), d.getVar("KERNEL_IMAGETYPE", False) + '-' + d.getVar('MACHINE', False) + '.bin')
if not os.path.isfile(self.rootfs):
# we could've checked that IMAGE_FSTYPES contains tar.gz but the config for running testimage might not be
# the same as the config with which the image was build, ie
# you bitbake core-image-sato with IMAGE_FSTYPES += "tar.gz"
# and your autobuilder overwrites the config, adds the test bits and runs bitbake core-image-sato -c testimage
bb.fatal("No rootfs found. Did you build the image ?\nIf yes, did you build it with IMAGE_FSTYPES += \"tar.gz\" ? \
\nExpected path: %s" % self.rootfs)
if not os.path.isfile(self.kernel):
bb.fatal("No kernel found. Expected path: %s" % self.kernel)
# master ssh connection
self.master = None
# if the user knows what they are doing, then by all means...
self.user_cmds = d.getVar("TEST_DEPLOY_CMDS")
self.deploy_cmds = None
# this is the name of the command that controls the power for a board
# e.g: TEST_POWERCONTROL_CMD = "/home/user/myscripts/powercontrol.py ${MACHINE} what-ever-other-args-the-script-wants"
# the command should take as the last argument "off" and "on" and "cycle" (off, on)
self.powercontrol_cmd = d.getVar("TEST_POWERCONTROL_CMD") or None
self.powercontrol_args = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS", False) or ""
self.serialcontrol_cmd = d.getVar("TEST_SERIALCONTROL_CMD") or None
self.serialcontrol_args = d.getVar("TEST_SERIALCONTROL_EXTRA_ARGS", False) or ""
self.origenv = os.environ
if self.powercontrol_cmd or self.serialcontrol_cmd:
# the external script for controlling power might use ssh
# ssh + keys means we need the original user env
bborigenv = d.getVar("BB_ORIGENV", False) or {}
for key in bborigenv:
val = bborigenv.getVar(key)
if val is not None:
self.origenv[key] = str(val)
if self.powercontrol_cmd:
if self.powercontrol_args:
self.powercontrol_cmd = "%s %s" % (self.powercontrol_cmd, self.powercontrol_args)
if self.serialcontrol_cmd:
if self.serialcontrol_args:
self.serialcontrol_cmd = "%s %s" % (self.serialcontrol_cmd, self.serialcontrol_args)
def power_ctl(self, msg):
if self.powercontrol_cmd:
cmd = "%s %s" % (self.powercontrol_cmd, msg)
try:
commands.runCmd(cmd, assert_error=False, preexec_fn=os.setsid, env=self.origenv)
except CommandError as e:
bb.fatal(str(e))
def power_cycle(self, conn):
if self.powercontrol_cmd:
# be nice, don't just cut power
conn.run("shutdown -h now")
time.sleep(10)
self.power_ctl("cycle")
else:
status, output = conn.run("reboot")
if status != 0:
bb.error("Failed rebooting target and no power control command defined. You need to manually reset the device.\n%s" % output)
def _wait_until_booted(self):
''' Waits until the target device has booted (if we have just power cycled it) '''
# Subclasses with better methods of determining boot can override this
time.sleep(120)
def deploy(self):
# base class just sets the ssh log file for us
super(MasterImageHardwareTarget, self).deploy()
self.master = sshcontrol.SSHControl(ip=self.ip, logfile=self.sshlog, timeout=600, port=self.port)
status, output = self.master.run("cat /etc/masterimage")
if status != 0:
# We're not booted into the master image, so try rebooting
bb.plain("%s - booting into the master image" % self.pn)
self.power_ctl("cycle")
self._wait_until_booted()
bb.plain("%s - deploying image on target" % self.pn)
status, output = self.master.run("cat /etc/masterimage")
if status != 0:
bb.fatal("No ssh connectivity or target isn't running a master image.\n%s" % output)
if self.user_cmds:
self.deploy_cmds = self.user_cmds.split("\n")
try:
self._deploy()
except Exception as e:
bb.fatal("Failed deploying test image: %s" % e)
@abstractmethod
def _deploy(self):
pass
def start(self, params=None):
bb.plain("%s - boot test image on target" % self.pn)
self._start()
# set the ssh object for the target/test image
self.connection = sshcontrol.SSHControl(self.ip, logfile=self.sshlog, port=self.port)
bb.plain("%s - start running tests" % self.pn)
@abstractmethod
def _start(self):
pass
def stop(self):
bb.plain("%s - reboot/powercycle target" % self.pn)
self.power_cycle(self.connection)
class SystemdbootTarget(MasterImageHardwareTarget):
def __init__(self, d):
super(SystemdbootTarget, self).__init__(d)
# this the value we need to set in the LoaderEntryOneShot EFI variable
# so the system boots the 'test' bootloader label and not the default
# The first four bytes are EFI bits, and the rest is an utf-16le string
# (EFI vars values need to be utf-16)
# $ echo -en "test\0" | iconv -f ascii -t utf-16le | hexdump -C
# 00000000 74 00 65 00 73 00 74 00 00 00 |t.e.s.t...|
self.efivarvalue = r'\x07\x00\x00\x00\x74\x00\x65\x00\x73\x00\x74\x00\x00\x00'
self.deploy_cmds = [
'mount -L boot /boot',
'mkdir -p /mnt/testrootfs',
'mount -L testrootfs /mnt/testrootfs',
'modprobe efivarfs',
'mount -t efivarfs efivarfs /sys/firmware/efi/efivars',
'cp ~/test-kernel /boot',
'rm -rf /mnt/testrootfs/*',
'tar xvf ~/test-rootfs.%s -C /mnt/testrootfs' % self.image_fstype,
'printf "%s" > /sys/firmware/efi/efivars/LoaderEntryOneShot-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f' % self.efivarvalue
]
def _deploy(self):
# make sure these aren't mounted
self.master.run("umount /boot; umount /mnt/testrootfs; umount /sys/firmware/efi/efivars;")
# from now on, every deploy cmd should return 0
# else an exception will be thrown by sshcontrol
self.master.ignore_status = False
self.master.copy_to(self.rootfs, "~/test-rootfs." + self.image_fstype)
self.master.copy_to(self.kernel, "~/test-kernel")
for cmd in self.deploy_cmds:
self.master.run(cmd)
def _start(self, params=None):
self.power_cycle(self.master)
# there are better ways than a timeout but this should work for now
time.sleep(120)
class SystemdbootTarget(MasterImageHardwareTarget):
def __init__(self, d):
super(SystemdbootTarget, self).__init__(d)
# this the value we need to set in the LoaderEntryOneShot EFI variable
# so the system boots the 'test' bootloader label and not the default
# The first four bytes are EFI bits, and the rest is an utf-16le string
# (EFI vars values need to be utf-16)
# $ echo -en "test\0" | iconv -f ascii -t utf-16le | hexdump -C
# 00000000 74 00 65 00 73 00 74 00 00 00 |t.e.s.t...|
self.efivarvalue = r'\x07\x00\x00\x00\x74\x00\x65\x00\x73\x00\x74\x00\x00\x00'
self.deploy_cmds = [
'mount -L boot /boot',
'mkdir -p /mnt/testrootfs',
'mount -L testrootfs /mnt/testrootfs',
'modprobe efivarfs',
'mount -t efivarfs efivarfs /sys/firmware/efi/efivars',
'cp ~/test-kernel /boot',
'rm -rf /mnt/testrootfs/*',
'tar xvf ~/test-rootfs.%s -C /mnt/testrootfs' % self.image_fstype,
'printf "%s" > /sys/firmware/efi/efivars/LoaderEntryOneShot-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f' % self.efivarvalue
]
def _deploy(self):
# make sure these aren't mounted
self.master.run("umount /boot; umount /mnt/testrootfs; umount /sys/firmware/efi/efivars;")
# from now on, every deploy cmd should return 0
# else an exception will be thrown by sshcontrol
self.master.ignore_status = False
self.master.copy_to(self.rootfs, "~/test-rootfs." + self.image_fstype)
self.master.copy_to(self.kernel, "~/test-kernel")
for cmd in self.deploy_cmds:
self.master.run(cmd)
def _start(self, params=None):
self.power_cycle(self.master)
# there are better ways than a timeout but this should work for now
time.sleep(120)
| en | 0.865165 | # Copyright (C) 2014 Intel Corporation # # Released under the MIT license (see COPYING.MIT) # This module adds support to testimage.bbclass to deploy images and run # tests using a "master image" - this is a "known good" image that is # installed onto the device as part of initial setup and will be booted into # with no interaction; we can then use it to deploy the image to be tested # to a second partition before running the tests. # # For an example master image, see core-image-testmaster # (meta/recipes-extended/images/core-image-testmaster.bb) # target ip # test rootfs + kernel # we could've checked that IMAGE_FSTYPES contains tar.gz but the config for running testimage might not be # the same as the config with which the image was build, ie # you bitbake core-image-sato with IMAGE_FSTYPES += "tar.gz" # and your autobuilder overwrites the config, adds the test bits and runs bitbake core-image-sato -c testimage # master ssh connection # if the user knows what they are doing, then by all means... # this is the name of the command that controls the power for a board # e.g: TEST_POWERCONTROL_CMD = "/home/user/myscripts/powercontrol.py ${MACHINE} what-ever-other-args-the-script-wants" # the command should take as the last argument "off" and "on" and "cycle" (off, on) # the external script for controlling power might use ssh # ssh + keys means we need the original user env # be nice, don't just cut power Waits until the target device has booted (if we have just power cycled it) # Subclasses with better methods of determining boot can override this # base class just sets the ssh log file for us # We're not booted into the master image, so try rebooting # set the ssh object for the target/test image # this the value we need to set in the LoaderEntryOneShot EFI variable # so the system boots the 'test' bootloader label and not the default # The first four bytes are EFI bits, and the rest is an utf-16le string # (EFI vars values need to be utf-16) # $ echo -en "test\0" | iconv -f ascii -t utf-16le | hexdump -C # 00000000 74 00 65 00 73 00 74 00 00 00 |t.e.s.t...| # make sure these aren't mounted # from now on, every deploy cmd should return 0 # else an exception will be thrown by sshcontrol # there are better ways than a timeout but this should work for now # this the value we need to set in the LoaderEntryOneShot EFI variable # so the system boots the 'test' bootloader label and not the default # The first four bytes are EFI bits, and the rest is an utf-16le string # (EFI vars values need to be utf-16) # $ echo -en "test\0" | iconv -f ascii -t utf-16le | hexdump -C # 00000000 74 00 65 00 73 00 74 00 00 00 |t.e.s.t...| # make sure these aren't mounted # from now on, every deploy cmd should return 0 # else an exception will be thrown by sshcontrol # there are better ways than a timeout but this should work for now | 2.062491 | 2 |
appionlib/apCtf/genctf.py | vossman/ctfeval | 6 | 6630839 | <gh_stars>1-10
#!/usr/bin/env python
import time
import math
import numpy
from appionlib import apDisplay
from appionlib.apCtf import ctftools
debug = False
#===================
def generateCTF1d(radii=None, focus=1.0e-6, cs=2e-3, volts=120000, ampconst=0.07,
failParams=False, overfocus=False):
"""
calculates a CTF function based on the input details
Use SI units: meters, radians, volts
Underfocus is postive (defocused)
"""
if debug is True:
print "generateCTF1dFromRadii()"
if radii is None:
radii = generateRadii1d(numpoints=256, pixelsize=1e-10)
if debug is True:
apDisplay.printColor("generateCTF radii: 1/%.2fA --> 1/%.2fA"%(1/radii[1]*1e10, 1/radii[-1]*1e10), "cyan")
t0 = time.time()
checkParams(focus1=focus, focus2=focus, cs=cs, volts=volts, ampconst=ampconst, failParams=failParams)
lamb = ctftools.getTEMLambda(volts)
s = radii
pi = math.pi
if overfocus is True:
focus = -1.0*focus
gamma = -0.5*pi*cs*(lamb**3)*(s**4) + pi*focus*lamb*(s**2)
if overfocus is True:
gamma = -1.0*gamma
A = ampconst
B = math.sqrt(1.0 - ampconst**2)
prectf = A*numpy.cos(gamma) + B*numpy.sin(gamma)
ctf = prectf**2
if debug is True:
print "generate 1D ctf complete in %.9f sec"%(time.time()-t0)
return ctf
#===================
def getDiffResForOverfocus(radii=None, cs=2e-3, volts=120000):
"""
given Cs and kV, determine the initial resolution where the difference between
overfocus and underfocus is clearly visible.
value returned in Angstroms, but radii must be in meters
"""
if debug is True:
print "getDiffResForOverfocus()"
if debug is True:
apDisplay.printColor("getDiffRes radii: 1/%.2fA --> 1/%.2fA"%(1/radii[1]*1e10, 1/radii[-1]*1e10), "cyan")
t0 = time.time()
checkParams(focus1=1.0e-6, focus2=1.0e-6, cs=cs, volts=volts, ampconst=0.0, failParams=False)
lamb = ctftools.getTEMLambda(volts)
s = radii
pi = math.pi
csgamma = 2*pi*0.25*cs*(lamb**3)*(s**4)
#over/under-focus difference is visible when Cs component is greater than 0.05
index = numpy.searchsorted(csgamma, 0.03)
diffres = 1.0/radii[index-1]*1e10
apDisplay.printColor("Overfocus/Underfocus difference resolution is: 1/%.2fA"%(diffres), "cyan")
if debug is True:
print "difference resolution complete in %.9f sec"%(time.time()-t0)
return diffres
#===================
def generateCTF1dACE2(radii=None, focus=1.0e-6, cs=2e-3, volts=120000, ampconst=0.07, failParams=False):
"""
calculates a CTF function based on the input details
Use SI units: meters, radians, volts
Underfocus is postive (defocused)
"""
if debug is True:
print "generateCTF1dFromRadii()"
t0 = time.time()
checkParams(focus1=focus, focus2=focus, cs=cs, volts=volts, ampconst=ampconst, failParams=failParams)
minres = 1e10/radii.min()
maxres = 1e10/radii.max()
if debug is True:
print "** CTF limits %.1f A -->> %.1fA"%(minres, maxres)
if maxres < 2.0 or maxres > 50.0:
apDisplay.printError("CTF limits are incorrect %.1f A -->> %.1fA"%(minres, maxres))
wavelength = ctftools.getTEMLambda(volts)
x4 = math.pi/2.0 * wavelength**3 * cs
x2 = math.pi * wavelength
x0 = 1.0*math.asin(ampconst) #CORRECT
if debug is True:
print "x0 shift %.1f degrees"%(math.degrees(x0))
radiisq = radii**2
gamma = (x4 * radiisq**2) + (-focus * x2 * radiisq) + (x0)
#ctf = -1.0*numpy.cos(gamma) #WRONG
#ctf = -1.0*numpy.sin(gamma) #CORRECT
ctf = 1.0*numpy.sin(gamma) #MAYBE CORRECT
if debug is True:
print "generate 1D ctf complete in %.9f sec"%(time.time()-t0)
return ctf**2
#===================
def generateCTF1dMakePoints(numpoints=256, focus=1.0e-6,
pixelsize=1.5e-10, cs=2e-3, volts=120000, ampconst=0.07):
"""
calculates a CTF function based on the input details
Use SI units: meters, radians, volts
Underfocus is postive (defocused)
"""
if debug is True:
print "generateCTF1d()"
checkParams(focus1=focus, focus2=focus, pixelsize=pixelsize, cs=cs,
volts=volts, ampconst=ampconst)
radii = generateRadii1d(numpoints, pixelsize)
ctf = generateCTF1dFromRadii(radii, focus, cs, volts, ampconst)
return ctf
#===================
def generateRadii1d(numpoints=256, pixelsize=1e-10):
radfreq = 1.0/( numpoints*pixelsize )
radii = numpy.arange(numpoints) * radfreq
return radii
#===================
def generateCTF2d(focus1=-1.0e-6, focus2=-1.0e-6, theta=0.0,
shape=(256,256), pixelsize=1.0e-10, cs=2e-3, volts=120000, ampconst=0.000):
"""
calculates a CTF function based on the input details
Use SI units: meters, radians, volts
Underfocus is postive (defocused)
"""
t0 = time.time()
wavelength = getTEMLambda(volts)
xfreq = 1.0/( (shape[1]-1)*2.*pixelsize )
yfreq = 1.0/( (shape[0]-1)*2.*pixelsize )
ctf = numpy.zeros(shape, dtype=numpy.float64)
meanfocus = (focus1 + focus2) / 2.
focusdiff = (focus1 - focus2) / 2.
t1 = math.pi * wavelength
t2 = wavelength**2 * cs / 2.0
t3 = -1.0*math.asin(ampconst)
radiisq = circle.generateRadial1d(shape, xfreq, yfreq)
angles = -circle.generateAngular2d(shape, xfreq, yfreq)
localfocus = meanfocus + focusdiff * numpy.cos(2.0*(angles-theta))
gamma = t1*radiisq * (-localfocus + t2*radiisq) + t3
ctf = numpy.sin(gamma)
gauss = circle.generateGaussion2d(shape)
imagefile.arrayToJpeg(gauss, "gauss2.jpg")
if debug is True:
print "generate ctf 2d complete in %.4f sec"%(time.time()-t0)
return ctf*gauss
#===================
def generateAngular2d(shape, xfreq, yfreq):
"""
this method is about 2x faster than method 1
"""
t0 = time.time()
if shape[0] % 2 != 0 or shape[1] % 2 != 0:
apDisplay.printError("array shape for radial function must be even")
halfshape = numpy.array(shape)/2.0
a = Angular(halfshape, xfreq, yfreq, center=False, flip=False)
angular1 = a.angular
b = Angular(halfshape, xfreq, yfreq, center=False, flip=True)
angular2 = numpy.fliplr(b.angular)
circular = numpy.vstack(
(numpy.hstack(
(numpy.flipud(angular2), -numpy.flipud(angular1))
),numpy.hstack(
(-angular2, angular1),
)))
### raw radius from center
#print numpy.around(circular*180/math.pi,1)
print "angular 2 complete in %.4f sec"%(time.time()-t0)
return circular
#===================
def generateGaussion2d(shape, sigma=None):
"""
this method is about 4x faster than method 1
"""
t0 = time.time()
if sigma is None:
sigma = numpy.mean(shape)/4.0
circular = generateRadial2(shape)
circular = numpy.exp(-circular/sigma**2)
print "gaussian 2 complete in %.4f sec"%(time.time()-t0)
return circular
#===================
class Radial(object):
def __init__(self, shape, xfreq=1.0, yfreq=1.0, center=True):
# setup
if center is True:
### distance from center
self.center = numpy.array(shape, dtype=numpy.float64)/2.0 - 0.5
else:
### the upper-left edge
self.center = (-0.5, -0.5)
self.xfreqsq = xfreq**2
self.yfreqsq = yfreq**2
# function
self.radial = numpy.fromfunction(self.distance, shape, dtype=numpy.float64)
def distance(self, y, x):
distance = (
(x - self.center[1])**2 * self.xfreqsq
+ (y - self.center[0])**2 * self.yfreqsq
)
return distance
#===================
def generateRadial2d(shape, xfreq, yfreq):
"""
this method is about 4x faster than method 1
"""
t0 = time.time()
if shape[0] % 2 != 0 or shape[1] % 2 != 0:
apDisplay.printError("array shape for radial function must be even")
halfshape = numpy.array(shape)/2.0
#radial = numpy.fromfunction(radiusfunc, halfshape)
r = Radial(halfshape, xfreq, yfreq, center=False)
radial = r.radial
circular = numpy.vstack(
(numpy.hstack(
(numpy.fliplr(numpy.flipud(radial)), numpy.flipud(radial))
),numpy.hstack(
(numpy.fliplr(radial), radial),
)))
### raw radius from center
#print circular
print "radial 2 complete in %.4f sec"%(time.time()-t0)
return circular
#===================
def checkParams(focus1=-1.0e-6, focus2=-1.0e-6, pixelsize=1.5e-10,
cs=2e-3, volts=120000, ampconst=0.07, failParams=False):
if debug is True:
print " Defocus1 %.2f microns (underfocus is positive)"%(focus1*1e6)
if focus1 != focus2:
print " Defocus2 %.2f microns (underfocus is positive)"%(focus2*1e6)
print " Pixelsize %.3f Angstroms"%(pixelsize*1e10)
print " C_s %.1f mm"%(cs*1e3)
print " High tension %.1f kV"%(volts*1e-3)
print (" Amp Contrast %.3f (shift %.1f degrees)"
%(ampconst, math.degrees(-math.asin(ampconst))))
if focus1*1e6 > 15.0 or focus1*1e6 < 0.1:
msg = "atypical defocus #1 value %.1f microns (underfocus is positve)"%(focus1*1e6)
if failParams is False:
apDisplay.printWarning(msg)
else:
apDisplay.printError(msg)
if focus2*1e6 > 15.0 or focus2*1e6 < 0.1:
msg = "atypical defocus #2 value %.1f microns (underfocus is positve)"%(focus2*1e6)
if failParams is False:
apDisplay.printWarning(msg)
else:
apDisplay.printError(msg)
if cs*1e3 > 7.0 or cs*1e3 < 0.4:
msg = "atypical C_s value %.1f mm"%(cs*1e3)
if failParams is False:
apDisplay.printWarning(msg)
else:
apDisplay.printError(msg)
if pixelsize*1e10 > 20.0 or pixelsize*1e10 < 0.1:
msg = "atypical pixel size value %.1f Angstroms"%(pixelsize*1e10)
if failParams is False:
apDisplay.printWarning(msg)
else:
apDisplay.printError(msg)
if volts*1e-3 > 400.0 or volts*1e-3 < 60:
msg = "atypical high tension value %.1f kiloVolts"%(volts*1e-3)
if failParams is False:
apDisplay.printWarning(msg)
else:
apDisplay.printError(msg)
if ampconst < 0.0 or ampconst > 0.5:
msg = "atypical amplitude contrast value %.3f"%(ampconst)
if failParams is False:
apDisplay.printWarning(msg)
else:
apDisplay.printError(msg)
return
#===================
#===================
#===================
if __name__ == "__main__":
r = generateRadial2d((8,8), 0.1, 0.1)
radii = generateRadii1d()
ctf = generateCTF1d(radii)
from matplotlib import pyplot
pyplot.plot(radii, ctf, 'r-', )
pyplot.subplots_adjust(wspace=0.05, hspace=0.05,
bottom=0.05, left=0.05, top=0.95, right=0.95, )
pyplot.show()
| #!/usr/bin/env python
import time
import math
import numpy
from appionlib import apDisplay
from appionlib.apCtf import ctftools
debug = False
#===================
def generateCTF1d(radii=None, focus=1.0e-6, cs=2e-3, volts=120000, ampconst=0.07,
failParams=False, overfocus=False):
"""
calculates a CTF function based on the input details
Use SI units: meters, radians, volts
Underfocus is postive (defocused)
"""
if debug is True:
print "generateCTF1dFromRadii()"
if radii is None:
radii = generateRadii1d(numpoints=256, pixelsize=1e-10)
if debug is True:
apDisplay.printColor("generateCTF radii: 1/%.2fA --> 1/%.2fA"%(1/radii[1]*1e10, 1/radii[-1]*1e10), "cyan")
t0 = time.time()
checkParams(focus1=focus, focus2=focus, cs=cs, volts=volts, ampconst=ampconst, failParams=failParams)
lamb = ctftools.getTEMLambda(volts)
s = radii
pi = math.pi
if overfocus is True:
focus = -1.0*focus
gamma = -0.5*pi*cs*(lamb**3)*(s**4) + pi*focus*lamb*(s**2)
if overfocus is True:
gamma = -1.0*gamma
A = ampconst
B = math.sqrt(1.0 - ampconst**2)
prectf = A*numpy.cos(gamma) + B*numpy.sin(gamma)
ctf = prectf**2
if debug is True:
print "generate 1D ctf complete in %.9f sec"%(time.time()-t0)
return ctf
#===================
def getDiffResForOverfocus(radii=None, cs=2e-3, volts=120000):
"""
given Cs and kV, determine the initial resolution where the difference between
overfocus and underfocus is clearly visible.
value returned in Angstroms, but radii must be in meters
"""
if debug is True:
print "getDiffResForOverfocus()"
if debug is True:
apDisplay.printColor("getDiffRes radii: 1/%.2fA --> 1/%.2fA"%(1/radii[1]*1e10, 1/radii[-1]*1e10), "cyan")
t0 = time.time()
checkParams(focus1=1.0e-6, focus2=1.0e-6, cs=cs, volts=volts, ampconst=0.0, failParams=False)
lamb = ctftools.getTEMLambda(volts)
s = radii
pi = math.pi
csgamma = 2*pi*0.25*cs*(lamb**3)*(s**4)
#over/under-focus difference is visible when Cs component is greater than 0.05
index = numpy.searchsorted(csgamma, 0.03)
diffres = 1.0/radii[index-1]*1e10
apDisplay.printColor("Overfocus/Underfocus difference resolution is: 1/%.2fA"%(diffres), "cyan")
if debug is True:
print "difference resolution complete in %.9f sec"%(time.time()-t0)
return diffres
#===================
def generateCTF1dACE2(radii=None, focus=1.0e-6, cs=2e-3, volts=120000, ampconst=0.07, failParams=False):
"""
calculates a CTF function based on the input details
Use SI units: meters, radians, volts
Underfocus is postive (defocused)
"""
if debug is True:
print "generateCTF1dFromRadii()"
t0 = time.time()
checkParams(focus1=focus, focus2=focus, cs=cs, volts=volts, ampconst=ampconst, failParams=failParams)
minres = 1e10/radii.min()
maxres = 1e10/radii.max()
if debug is True:
print "** CTF limits %.1f A -->> %.1fA"%(minres, maxres)
if maxres < 2.0 or maxres > 50.0:
apDisplay.printError("CTF limits are incorrect %.1f A -->> %.1fA"%(minres, maxres))
wavelength = ctftools.getTEMLambda(volts)
x4 = math.pi/2.0 * wavelength**3 * cs
x2 = math.pi * wavelength
x0 = 1.0*math.asin(ampconst) #CORRECT
if debug is True:
print "x0 shift %.1f degrees"%(math.degrees(x0))
radiisq = radii**2
gamma = (x4 * radiisq**2) + (-focus * x2 * radiisq) + (x0)
#ctf = -1.0*numpy.cos(gamma) #WRONG
#ctf = -1.0*numpy.sin(gamma) #CORRECT
ctf = 1.0*numpy.sin(gamma) #MAYBE CORRECT
if debug is True:
print "generate 1D ctf complete in %.9f sec"%(time.time()-t0)
return ctf**2
#===================
def generateCTF1dMakePoints(numpoints=256, focus=1.0e-6,
pixelsize=1.5e-10, cs=2e-3, volts=120000, ampconst=0.07):
"""
calculates a CTF function based on the input details
Use SI units: meters, radians, volts
Underfocus is postive (defocused)
"""
if debug is True:
print "generateCTF1d()"
checkParams(focus1=focus, focus2=focus, pixelsize=pixelsize, cs=cs,
volts=volts, ampconst=ampconst)
radii = generateRadii1d(numpoints, pixelsize)
ctf = generateCTF1dFromRadii(radii, focus, cs, volts, ampconst)
return ctf
#===================
def generateRadii1d(numpoints=256, pixelsize=1e-10):
radfreq = 1.0/( numpoints*pixelsize )
radii = numpy.arange(numpoints) * radfreq
return radii
#===================
def generateCTF2d(focus1=-1.0e-6, focus2=-1.0e-6, theta=0.0,
shape=(256,256), pixelsize=1.0e-10, cs=2e-3, volts=120000, ampconst=0.000):
"""
calculates a CTF function based on the input details
Use SI units: meters, radians, volts
Underfocus is postive (defocused)
"""
t0 = time.time()
wavelength = getTEMLambda(volts)
xfreq = 1.0/( (shape[1]-1)*2.*pixelsize )
yfreq = 1.0/( (shape[0]-1)*2.*pixelsize )
ctf = numpy.zeros(shape, dtype=numpy.float64)
meanfocus = (focus1 + focus2) / 2.
focusdiff = (focus1 - focus2) / 2.
t1 = math.pi * wavelength
t2 = wavelength**2 * cs / 2.0
t3 = -1.0*math.asin(ampconst)
radiisq = circle.generateRadial1d(shape, xfreq, yfreq)
angles = -circle.generateAngular2d(shape, xfreq, yfreq)
localfocus = meanfocus + focusdiff * numpy.cos(2.0*(angles-theta))
gamma = t1*radiisq * (-localfocus + t2*radiisq) + t3
ctf = numpy.sin(gamma)
gauss = circle.generateGaussion2d(shape)
imagefile.arrayToJpeg(gauss, "gauss2.jpg")
if debug is True:
print "generate ctf 2d complete in %.4f sec"%(time.time()-t0)
return ctf*gauss
#===================
def generateAngular2d(shape, xfreq, yfreq):
"""
this method is about 2x faster than method 1
"""
t0 = time.time()
if shape[0] % 2 != 0 or shape[1] % 2 != 0:
apDisplay.printError("array shape for radial function must be even")
halfshape = numpy.array(shape)/2.0
a = Angular(halfshape, xfreq, yfreq, center=False, flip=False)
angular1 = a.angular
b = Angular(halfshape, xfreq, yfreq, center=False, flip=True)
angular2 = numpy.fliplr(b.angular)
circular = numpy.vstack(
(numpy.hstack(
(numpy.flipud(angular2), -numpy.flipud(angular1))
),numpy.hstack(
(-angular2, angular1),
)))
### raw radius from center
#print numpy.around(circular*180/math.pi,1)
print "angular 2 complete in %.4f sec"%(time.time()-t0)
return circular
#===================
def generateGaussion2d(shape, sigma=None):
"""
this method is about 4x faster than method 1
"""
t0 = time.time()
if sigma is None:
sigma = numpy.mean(shape)/4.0
circular = generateRadial2(shape)
circular = numpy.exp(-circular/sigma**2)
print "gaussian 2 complete in %.4f sec"%(time.time()-t0)
return circular
#===================
class Radial(object):
def __init__(self, shape, xfreq=1.0, yfreq=1.0, center=True):
# setup
if center is True:
### distance from center
self.center = numpy.array(shape, dtype=numpy.float64)/2.0 - 0.5
else:
### the upper-left edge
self.center = (-0.5, -0.5)
self.xfreqsq = xfreq**2
self.yfreqsq = yfreq**2
# function
self.radial = numpy.fromfunction(self.distance, shape, dtype=numpy.float64)
def distance(self, y, x):
distance = (
(x - self.center[1])**2 * self.xfreqsq
+ (y - self.center[0])**2 * self.yfreqsq
)
return distance
#===================
def generateRadial2d(shape, xfreq, yfreq):
"""
this method is about 4x faster than method 1
"""
t0 = time.time()
if shape[0] % 2 != 0 or shape[1] % 2 != 0:
apDisplay.printError("array shape for radial function must be even")
halfshape = numpy.array(shape)/2.0
#radial = numpy.fromfunction(radiusfunc, halfshape)
r = Radial(halfshape, xfreq, yfreq, center=False)
radial = r.radial
circular = numpy.vstack(
(numpy.hstack(
(numpy.fliplr(numpy.flipud(radial)), numpy.flipud(radial))
),numpy.hstack(
(numpy.fliplr(radial), radial),
)))
### raw radius from center
#print circular
print "radial 2 complete in %.4f sec"%(time.time()-t0)
return circular
#===================
def checkParams(focus1=-1.0e-6, focus2=-1.0e-6, pixelsize=1.5e-10,
cs=2e-3, volts=120000, ampconst=0.07, failParams=False):
if debug is True:
print " Defocus1 %.2f microns (underfocus is positive)"%(focus1*1e6)
if focus1 != focus2:
print " Defocus2 %.2f microns (underfocus is positive)"%(focus2*1e6)
print " Pixelsize %.3f Angstroms"%(pixelsize*1e10)
print " C_s %.1f mm"%(cs*1e3)
print " High tension %.1f kV"%(volts*1e-3)
print (" Amp Contrast %.3f (shift %.1f degrees)"
%(ampconst, math.degrees(-math.asin(ampconst))))
if focus1*1e6 > 15.0 or focus1*1e6 < 0.1:
msg = "atypical defocus #1 value %.1f microns (underfocus is positve)"%(focus1*1e6)
if failParams is False:
apDisplay.printWarning(msg)
else:
apDisplay.printError(msg)
if focus2*1e6 > 15.0 or focus2*1e6 < 0.1:
msg = "atypical defocus #2 value %.1f microns (underfocus is positve)"%(focus2*1e6)
if failParams is False:
apDisplay.printWarning(msg)
else:
apDisplay.printError(msg)
if cs*1e3 > 7.0 or cs*1e3 < 0.4:
msg = "atypical C_s value %.1f mm"%(cs*1e3)
if failParams is False:
apDisplay.printWarning(msg)
else:
apDisplay.printError(msg)
if pixelsize*1e10 > 20.0 or pixelsize*1e10 < 0.1:
msg = "atypical pixel size value %.1f Angstroms"%(pixelsize*1e10)
if failParams is False:
apDisplay.printWarning(msg)
else:
apDisplay.printError(msg)
if volts*1e-3 > 400.0 or volts*1e-3 < 60:
msg = "atypical high tension value %.1f kiloVolts"%(volts*1e-3)
if failParams is False:
apDisplay.printWarning(msg)
else:
apDisplay.printError(msg)
if ampconst < 0.0 or ampconst > 0.5:
msg = "atypical amplitude contrast value %.3f"%(ampconst)
if failParams is False:
apDisplay.printWarning(msg)
else:
apDisplay.printError(msg)
return
#===================
#===================
#===================
if __name__ == "__main__":
r = generateRadial2d((8,8), 0.1, 0.1)
radii = generateRadii1d()
ctf = generateCTF1d(radii)
from matplotlib import pyplot
pyplot.plot(radii, ctf, 'r-', )
pyplot.subplots_adjust(wspace=0.05, hspace=0.05,
bottom=0.05, left=0.05, top=0.95, right=0.95, )
pyplot.show() | en | 0.690402 | #!/usr/bin/env python #=================== calculates a CTF function based on the input details Use SI units: meters, radians, volts Underfocus is postive (defocused) #=================== given Cs and kV, determine the initial resolution where the difference between overfocus and underfocus is clearly visible. value returned in Angstroms, but radii must be in meters #over/under-focus difference is visible when Cs component is greater than 0.05 #=================== calculates a CTF function based on the input details Use SI units: meters, radians, volts Underfocus is postive (defocused) #CORRECT #ctf = -1.0*numpy.cos(gamma) #WRONG #ctf = -1.0*numpy.sin(gamma) #CORRECT #MAYBE CORRECT #=================== calculates a CTF function based on the input details Use SI units: meters, radians, volts Underfocus is postive (defocused) #=================== #=================== calculates a CTF function based on the input details Use SI units: meters, radians, volts Underfocus is postive (defocused) #=================== this method is about 2x faster than method 1 ### raw radius from center #print numpy.around(circular*180/math.pi,1) #=================== this method is about 4x faster than method 1 #=================== # setup ### distance from center ### the upper-left edge # function #=================== this method is about 4x faster than method 1 #radial = numpy.fromfunction(radiusfunc, halfshape) ### raw radius from center #print circular #=================== #1 value %.1f microns (underfocus is positve)"%(focus1*1e6) #2 value %.1f microns (underfocus is positve)"%(focus2*1e6) #=================== #=================== #=================== | 2.560795 | 3 |
app/helpers/json_encoder_helper.py | DataViva/dataviva-api | 14 | 6630840 | from decimal import Decimal
from flask.json import JSONEncoder
class ApiJSONEncoder(JSONEncoder):
def default(self, obj):
try:
if isinstance(obj, Decimal):
return str(obj)
iterable = iter(obj)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, obj)
| from decimal import Decimal
from flask.json import JSONEncoder
class ApiJSONEncoder(JSONEncoder):
def default(self, obj):
try:
if isinstance(obj, Decimal):
return str(obj)
iterable = iter(obj)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, obj)
| none | 1 | 2.793395 | 3 |
|
amnesia/helpers/content.py | silenius/amnesia | 4 | 6630841 | # -*- coding: utf-8 -*-
from sqlalchemy import orm
from saexts import Serializer
from amnesia.modules.content import Content
def dump_obj(obj, format, **kwargs):
return getattr(Serializer(obj), format)(**kwargs)
def polymorphic_hierarchy(cls=Content):
return list(orm.class_mapper(cls).base_mapper.polymorphic_iterator())
| # -*- coding: utf-8 -*-
from sqlalchemy import orm
from saexts import Serializer
from amnesia.modules.content import Content
def dump_obj(obj, format, **kwargs):
return getattr(Serializer(obj), format)(**kwargs)
def polymorphic_hierarchy(cls=Content):
return list(orm.class_mapper(cls).base_mapper.polymorphic_iterator())
| en | 0.769321 | # -*- coding: utf-8 -*- | 2.170848 | 2 |
tests/database_manager_tests.py | SakuraOneLove/LPM | 0 | 6630842 | """Test for 'database_manager' functions."""
import sqlite3
import unittest
from itertools import islice
from src import database_manager
DB_NAME = "tests/testdb.sqlite3"
VAULT_TABLE_SQL = "create table if not exists vault (\
account_id integer primary key,\
name text not null,\
login text not null,\
password text not null,\
note text);"
SHOP_TABLE_SQL = "create table if not exists shop (\
shop_id integer primary key,\
name text not null,\
address text not null);"
DROP_TABLE_VAULT = "drop table if exists vault;"
DROP_TABLE_SHOP = "drop table if exists shop;"
SELECT_SQL = "select * from vault where login= '<EMAIL>';"
SELECT_SQL_WITHOUT_NOTE = "select * from vault where\
account_id=2;"
class TestExtraMethods(unittest.TestCase):
"""Testing 'database_manager' functions."""
def setUp(self):
"""Creating new database with tables."""
self.database_name = DB_NAME
self.table_name = "vault"
self.table_name2 = "shop"
self.right_insert_dict = {'name': 'mail.ru', 'login': '<EMAIL>',\
'password': '<PASSWORD>', 'note': 'My note'}
self.bad_insert_dict = {'name': 'mail.com', 'person': '<NAME>'}
self.insert_without_note = {'name': 'mail.ru', 'login': '<EMAIL>',\
'password': '<PASSWORD>'}
connection = sqlite3.connect(self.database_name)
cursor = connection.cursor()
cursor.execute(VAULT_TABLE_SQL)
cursor.execute(SHOP_TABLE_SQL)
connection.commit()
connection.close()
def drop_tables(self):
"""Drop tables after tests.
It method should be called in the last method."""
connection = sqlite3.connect(self.database_name)
cursor = connection.cursor()
cursor.execute(DROP_TABLE_VAULT)
cursor.execute(DROP_TABLE_SHOP)
connection.commit()
connection.close()
def test_for_database_existing(self):
"""Checking the existence of a database file."""
self.assertTrue(database_manager.is_database_exists(self.database_name))
self.assertFalse(database_manager.is_database_exists("tests/somedb.db"))
def test_for_table_existing(self):
"""Checking the existence of a table in database.
Tables 'vault' and 'shop' were created in 'setUp.
'"""
self.assertTrue(database_manager.is_table_exists(self.database_name,\
self.table_name))
self.assertTrue(database_manager.is_table_exists(self.database_name,\
self.table_name2))
self.assertFalse(database_manager.is_table_exists(self.database_name, "big_table"))
self.assertFalse(database_manager.is_table_exists("tests/somedb.db", "cool_table"))
def test_table_creating(self):
"""Checking creating of table 'vault'."""
connection = sqlite3.connect(self.database_name)
cursor = connection.cursor()
cursor.execute(DROP_TABLE_VAULT)
connection.commit()
connection.close()
self.assertFalse(database_manager.is_table_exists(self.database_name,\
self.table_name))
database_manager.create_table_if_not_exists(self.database_name)
self.assertTrue(database_manager.is_table_exists(self.database_name,\
self.table_name))
def test_for_insert_into_table(self):
"""Checking that insert into table query completed successfully."""
# Make a right query
database_manager.insert_into_table(self.database_name, **self.right_insert_dict)
connection = sqlite3.connect(self.database_name)
cursor = connection.cursor()
cursor.execute(SELECT_SQL)
right_query_result = cursor.fetchone()
# Prepare initial value to tuple for compare
initial_tuple = (1, ) + tuple(self.right_insert_dict.values())
# Test it
self.assertEqual(initial_tuple, right_query_result)
# Make right query without note
database_manager.insert_into_table(self.database_name, **self.insert_without_note)
cursor.execute(SELECT_SQL_WITHOUT_NOTE)
query_without_note = cursor.fetchone()
initial_tuple2 = (2, ) + tuple(self.insert_without_note.values()) + (None, )
# Test it
self.assertEqual(initial_tuple2, query_without_note)
# Make a bad query with KeyError Exception
bad_query_result = database_manager.insert_into_table(self.database_name,\
**self.bad_insert_dict)
self.assertEqual(-1, bad_query_result)
connection.close()
def test_selecting_rows_by_name(self):
"""Cheking selecting rows by given name."""
# Make query
selected_rows = database_manager.select_row_by_name(self.database_name,\
self.right_insert_dict['name'])
# Dict from 'right_insert_dict' witout account_id
first_dict = dict(islice(selected_rows[0].items(), 1, 5))
second_dict = dict(islice(selected_rows[1].items(), 1, 4))
# Check it
self.assertGreater(len(selected_rows), 0)
self.assertEqual(self.right_insert_dict, first_dict)
self.assertEqual(self.insert_without_note, second_dict)
# Try make select without results
selected_rows = database_manager.select_row_by_name(self.database_name, "vodka")
self.assertListEqual(selected_rows, [])
def test_selecting_rows_by_login(self):
"""Checking selecting rows by given login."""
# Make query
selected_rows = database_manager.select_row_by_login(self.database_name,\
self.right_insert_dict['login'])
result_dict = dict(islice(selected_rows[0].items(), 1, 5))
# Check it
self.assertGreater(len(selected_rows), 0)
self.assertEqual(result_dict, self.right_insert_dict)
# Try make select without results
selected_rows = database_manager.select_row_by_login(self.database_name, "dummy")
self.assertListEqual(selected_rows, [])
def drop_tables():
"""Drop tables after tests.
It method should be called in the last method."""
connection = sqlite3.connect(DB_NAME)
cursor = connection.cursor()
cursor.execute(DROP_TABLE_VAULT)
cursor.execute(DROP_TABLE_SHOP)
connection.commit()
connection.close()
if __name__ == "__main__":
unittest.main(exit=False)
drop_tables()
| """Test for 'database_manager' functions."""
import sqlite3
import unittest
from itertools import islice
from src import database_manager
DB_NAME = "tests/testdb.sqlite3"
VAULT_TABLE_SQL = "create table if not exists vault (\
account_id integer primary key,\
name text not null,\
login text not null,\
password text not null,\
note text);"
SHOP_TABLE_SQL = "create table if not exists shop (\
shop_id integer primary key,\
name text not null,\
address text not null);"
DROP_TABLE_VAULT = "drop table if exists vault;"
DROP_TABLE_SHOP = "drop table if exists shop;"
SELECT_SQL = "select * from vault where login= '<EMAIL>';"
SELECT_SQL_WITHOUT_NOTE = "select * from vault where\
account_id=2;"
class TestExtraMethods(unittest.TestCase):
"""Testing 'database_manager' functions."""
def setUp(self):
"""Creating new database with tables."""
self.database_name = DB_NAME
self.table_name = "vault"
self.table_name2 = "shop"
self.right_insert_dict = {'name': 'mail.ru', 'login': '<EMAIL>',\
'password': '<PASSWORD>', 'note': 'My note'}
self.bad_insert_dict = {'name': 'mail.com', 'person': '<NAME>'}
self.insert_without_note = {'name': 'mail.ru', 'login': '<EMAIL>',\
'password': '<PASSWORD>'}
connection = sqlite3.connect(self.database_name)
cursor = connection.cursor()
cursor.execute(VAULT_TABLE_SQL)
cursor.execute(SHOP_TABLE_SQL)
connection.commit()
connection.close()
def drop_tables(self):
"""Drop tables after tests.
It method should be called in the last method."""
connection = sqlite3.connect(self.database_name)
cursor = connection.cursor()
cursor.execute(DROP_TABLE_VAULT)
cursor.execute(DROP_TABLE_SHOP)
connection.commit()
connection.close()
def test_for_database_existing(self):
"""Checking the existence of a database file."""
self.assertTrue(database_manager.is_database_exists(self.database_name))
self.assertFalse(database_manager.is_database_exists("tests/somedb.db"))
def test_for_table_existing(self):
"""Checking the existence of a table in database.
Tables 'vault' and 'shop' were created in 'setUp.
'"""
self.assertTrue(database_manager.is_table_exists(self.database_name,\
self.table_name))
self.assertTrue(database_manager.is_table_exists(self.database_name,\
self.table_name2))
self.assertFalse(database_manager.is_table_exists(self.database_name, "big_table"))
self.assertFalse(database_manager.is_table_exists("tests/somedb.db", "cool_table"))
def test_table_creating(self):
"""Checking creating of table 'vault'."""
connection = sqlite3.connect(self.database_name)
cursor = connection.cursor()
cursor.execute(DROP_TABLE_VAULT)
connection.commit()
connection.close()
self.assertFalse(database_manager.is_table_exists(self.database_name,\
self.table_name))
database_manager.create_table_if_not_exists(self.database_name)
self.assertTrue(database_manager.is_table_exists(self.database_name,\
self.table_name))
def test_for_insert_into_table(self):
"""Checking that insert into table query completed successfully."""
# Make a right query
database_manager.insert_into_table(self.database_name, **self.right_insert_dict)
connection = sqlite3.connect(self.database_name)
cursor = connection.cursor()
cursor.execute(SELECT_SQL)
right_query_result = cursor.fetchone()
# Prepare initial value to tuple for compare
initial_tuple = (1, ) + tuple(self.right_insert_dict.values())
# Test it
self.assertEqual(initial_tuple, right_query_result)
# Make right query without note
database_manager.insert_into_table(self.database_name, **self.insert_without_note)
cursor.execute(SELECT_SQL_WITHOUT_NOTE)
query_without_note = cursor.fetchone()
initial_tuple2 = (2, ) + tuple(self.insert_without_note.values()) + (None, )
# Test it
self.assertEqual(initial_tuple2, query_without_note)
# Make a bad query with KeyError Exception
bad_query_result = database_manager.insert_into_table(self.database_name,\
**self.bad_insert_dict)
self.assertEqual(-1, bad_query_result)
connection.close()
def test_selecting_rows_by_name(self):
"""Cheking selecting rows by given name."""
# Make query
selected_rows = database_manager.select_row_by_name(self.database_name,\
self.right_insert_dict['name'])
# Dict from 'right_insert_dict' witout account_id
first_dict = dict(islice(selected_rows[0].items(), 1, 5))
second_dict = dict(islice(selected_rows[1].items(), 1, 4))
# Check it
self.assertGreater(len(selected_rows), 0)
self.assertEqual(self.right_insert_dict, first_dict)
self.assertEqual(self.insert_without_note, second_dict)
# Try make select without results
selected_rows = database_manager.select_row_by_name(self.database_name, "vodka")
self.assertListEqual(selected_rows, [])
def test_selecting_rows_by_login(self):
"""Checking selecting rows by given login."""
# Make query
selected_rows = database_manager.select_row_by_login(self.database_name,\
self.right_insert_dict['login'])
result_dict = dict(islice(selected_rows[0].items(), 1, 5))
# Check it
self.assertGreater(len(selected_rows), 0)
self.assertEqual(result_dict, self.right_insert_dict)
# Try make select without results
selected_rows = database_manager.select_row_by_login(self.database_name, "dummy")
self.assertListEqual(selected_rows, [])
def drop_tables():
"""Drop tables after tests.
It method should be called in the last method."""
connection = sqlite3.connect(DB_NAME)
cursor = connection.cursor()
cursor.execute(DROP_TABLE_VAULT)
cursor.execute(DROP_TABLE_SHOP)
connection.commit()
connection.close()
if __name__ == "__main__":
unittest.main(exit=False)
drop_tables()
| en | 0.801283 | Test for 'database_manager' functions. Testing 'database_manager' functions. Creating new database with tables. Drop tables after tests. It method should be called in the last method. Checking the existence of a database file. Checking the existence of a table in database. Tables 'vault' and 'shop' were created in 'setUp. ' Checking creating of table 'vault'. Checking that insert into table query completed successfully. # Make a right query # Prepare initial value to tuple for compare # Test it # Make right query without note # Test it # Make a bad query with KeyError Exception Cheking selecting rows by given name. # Make query # Dict from 'right_insert_dict' witout account_id # Check it # Try make select without results Checking selecting rows by given login. # Make query # Check it # Try make select without results Drop tables after tests. It method should be called in the last method. | 3.274818 | 3 |
test/test_thread.py | m-takeuchi/ilislife_wxp | 0 | 6630843 | <reponame>m-takeuchi/ilislife_wxp
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import threading
import wx
# ボタンIDの定義
ID_START = wx.NewId()
ID_STOP = wx.NewId()
# イベントIDの定義
EVT_UPDATE_ID = wx.NewId()
class UpdateEvent(wx.PyEvent):
"""更新イベント"""
def __init__(self, data):
"""初期化"""
wx.PyEvent.__init__(self)
self.SetEventType(EVT_UPDATE_ID)
self.data = data
class CounterThread(threading.Thread):
"""カウンタスレッド"""
def __init__(self, notify_window):
"""コンストラクタ"""
threading.Thread.__init__(self)
self._notify_window = notify_window
self._cancel = False
self.lock = threading.Lock()
self.start() # 実行開始
def run(self):
"""スレッド実行"""
second = 0
while(True):
with self.lock:
if not self._cancel:
wx.PostEvent(self._notify_window, UpdateEvent(second))
time.sleep(1)
second += 1
else:
wx.PostEvent(self._notify_window, UpdateEvent(None))
return
def cancel(self):
"""キャンセル"""
with self.lock:
self._cancel = True
class MainFrame(wx.Frame):
"""Class MainFrame."""
def __init__(self, parent, id):
"""Create the MainFrame."""
wx.Frame.__init__(self, parent, id, u'wxPythonサンプル')
wx.Button(self, ID_START, u'スタート', pos=(10,10))
wx.Button(self, ID_STOP, u'ストップ', pos=(10,60))
self.counter = wx.StaticText(self, -1, '', pos=(120,13))
self.status = wx.StaticText(self, -1, '', pos=(120,63))
self.worker = None
# コールバックの設定
self.Bind(wx.EVT_BUTTON, self.OnStart, id=ID_START)
self.Bind(wx.EVT_BUTTON, self.OnCancel, id=ID_STOP)
self.Connect(-1, -1, EVT_UPDATE_ID, self.OnUpdate)
def OnStart(self, event):
"""開始"""
if not self.worker:
self.status.SetForegroundColour('black')
self.status.SetLabel(u'カウント中')
self.worker = CounterThread(self)
def OnCancel(self, event):
"""キャンセル"""
if self.worker:
self.status.SetForegroundColour('green')
self.status.SetLabel(u'キャンセル中')
self.worker.cancel()
def OnUpdate(self, event):
"""更新"""
if event.data is None:
self.counter.SetLabel('')
self.status.SetForegroundColour('red')
self.status.SetLabel(u'終了')
self.worker = None
else:
self.counter.SetLabel(u'時間(秒): %s' % event.data)
def main_test():
app = wx.App(False)
frame = MainFrame(None, -1)
app.SetTopWindow(frame)
frame.Show(True)
app.MainLoop()
if __name__ == '__main__':
main_test()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import threading
import wx
# ボタンIDの定義
ID_START = wx.NewId()
ID_STOP = wx.NewId()
# イベントIDの定義
EVT_UPDATE_ID = wx.NewId()
class UpdateEvent(wx.PyEvent):
"""更新イベント"""
def __init__(self, data):
"""初期化"""
wx.PyEvent.__init__(self)
self.SetEventType(EVT_UPDATE_ID)
self.data = data
class CounterThread(threading.Thread):
"""カウンタスレッド"""
def __init__(self, notify_window):
"""コンストラクタ"""
threading.Thread.__init__(self)
self._notify_window = notify_window
self._cancel = False
self.lock = threading.Lock()
self.start() # 実行開始
def run(self):
"""スレッド実行"""
second = 0
while(True):
with self.lock:
if not self._cancel:
wx.PostEvent(self._notify_window, UpdateEvent(second))
time.sleep(1)
second += 1
else:
wx.PostEvent(self._notify_window, UpdateEvent(None))
return
def cancel(self):
"""キャンセル"""
with self.lock:
self._cancel = True
class MainFrame(wx.Frame):
"""Class MainFrame."""
def __init__(self, parent, id):
"""Create the MainFrame."""
wx.Frame.__init__(self, parent, id, u'wxPythonサンプル')
wx.Button(self, ID_START, u'スタート', pos=(10,10))
wx.Button(self, ID_STOP, u'ストップ', pos=(10,60))
self.counter = wx.StaticText(self, -1, '', pos=(120,13))
self.status = wx.StaticText(self, -1, '', pos=(120,63))
self.worker = None
# コールバックの設定
self.Bind(wx.EVT_BUTTON, self.OnStart, id=ID_START)
self.Bind(wx.EVT_BUTTON, self.OnCancel, id=ID_STOP)
self.Connect(-1, -1, EVT_UPDATE_ID, self.OnUpdate)
def OnStart(self, event):
"""開始"""
if not self.worker:
self.status.SetForegroundColour('black')
self.status.SetLabel(u'カウント中')
self.worker = CounterThread(self)
def OnCancel(self, event):
"""キャンセル"""
if self.worker:
self.status.SetForegroundColour('green')
self.status.SetLabel(u'キャンセル中')
self.worker.cancel()
def OnUpdate(self, event):
"""更新"""
if event.data is None:
self.counter.SetLabel('')
self.status.SetForegroundColour('red')
self.status.SetLabel(u'終了')
self.worker = None
else:
self.counter.SetLabel(u'時間(秒): %s' % event.data)
def main_test():
app = wx.App(False)
frame = MainFrame(None, -1)
app.SetTopWindow(frame)
frame.Show(True)
app.MainLoop()
if __name__ == '__main__':
main_test() | ja | 0.998079 | #!/usr/bin/env python # -*- coding: utf-8 -*- # ボタンIDの定義 # イベントIDの定義 更新イベント 初期化 カウンタスレッド コンストラクタ # 実行開始 スレッド実行 キャンセル Class MainFrame. Create the MainFrame. # コールバックの設定 開始 キャンセル 更新 | 2.970708 | 3 |
2020/utctf_2020/Cryptography/Random_ECB/server.py | r4k0nb4k0n/CTF-Writeups | 2 | 6630844 | from Crypto.Cipher import AES
from Crypto.Util.Padding import pad
from Crypto.Random import get_random_bytes
from Crypto.Random.random import getrandbits
from secret import flag
KEY = get_random_bytes(16)
def aes_ecb_encrypt(plaintext, key):
cipher = AES.new(key, AES.MODE_ECB)
return cipher.encrypt(plaintext)
def encryption_oracle(plaintext):
b = getrandbits(1)
plaintext = pad((b'A' * b) + plaintext + flag, 16)
return aes_ecb_encrypt(plaintext, KEY).hex()
if __name__ == '__main__':
while True:
print("Input a string to encrypt (input 'q' to quit):")
user_input = input()
if user_input == 'q':
break
output = encryption_oracle(user_input.encode())
print("Here is your encrypted string, have a nice day :)")
print(output)
| from Crypto.Cipher import AES
from Crypto.Util.Padding import pad
from Crypto.Random import get_random_bytes
from Crypto.Random.random import getrandbits
from secret import flag
KEY = get_random_bytes(16)
def aes_ecb_encrypt(plaintext, key):
cipher = AES.new(key, AES.MODE_ECB)
return cipher.encrypt(plaintext)
def encryption_oracle(plaintext):
b = getrandbits(1)
plaintext = pad((b'A' * b) + plaintext + flag, 16)
return aes_ecb_encrypt(plaintext, KEY).hex()
if __name__ == '__main__':
while True:
print("Input a string to encrypt (input 'q' to quit):")
user_input = input()
if user_input == 'q':
break
output = encryption_oracle(user_input.encode())
print("Here is your encrypted string, have a nice day :)")
print(output)
| none | 1 | 3.485563 | 3 |
|
main.py | thecivilizedgamer/ayu-discord-bot | 1 | 6630845 | from client import Client
from data_store import Data
from db import save_task
from events import alarm_task, timer_task
def main():
client = Client.get_client()
client.loop.create_task(timer_task())
client.loop.create_task(alarm_task())
client.loop.create_task(save_task())
client.run(Data.config.bot_token)
if __name__ == '__main__':
main()
| from client import Client
from data_store import Data
from db import save_task
from events import alarm_task, timer_task
def main():
client = Client.get_client()
client.loop.create_task(timer_task())
client.loop.create_task(alarm_task())
client.loop.create_task(save_task())
client.run(Data.config.bot_token)
if __name__ == '__main__':
main()
| none | 1 | 2.174227 | 2 |
|
ostap/core/config.py | TatianaOvsiannikova/ostap | 14 | 6630846 | <gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
## @file ostap/core/config.py
# The basic configuration of ostap.
# Ostap parses the following configuration files :
# - <code>'~/.ostaprc'</code>
# - <code>'~/.config/ostap/.ostaprc'</code>
# - <code>'.ostaprc'</code>
# - <code>$OSTAP_CONFIG</code>
# @author <NAME> <EMAIL>
# @date 2019-05-19
# =============================================================================
"""The basic configuration of ostap
Ostap parses the following configuration files :
- ~/.ostaprc
- ~/.config/ostap/.ostaprc
- .ostaprc
- $OSTAP_CONFIG
"""
# =============================================================================
__version__ = "$Revision$"
__author__ = "<NAME> <EMAIL>"
__date__ = "2019-05-19"
__all__ = (
'config' , ## the parsed configuration
)
# =============================================================================
import configparser, os, sys
import ostap.core.default_config as _config
# =============================================================================
## print for configparger
def _cp_str_ ( cp ) :
import io
with io.StringIO() as o :
config.write( o )
return o.getvalue()
config = configparser.ConfigParser()
type(config).__str__ = _cp_str_
type(config).__repr__ = _cp_str_
## Define the major sections
config [ 'General' ] = {
'Quiet' : str ( _config.quiet ) ,
'Verbose' : str ( _config.verbose ) ,
'Parallel' : 'PATHOS' ,
}
config [ 'Canvas' ] = { 'Width' : '1000' , 'Height' : '800' ,
'MarginTop' : '0.05' , 'MarginBottom' : '0.12' ,
'MarginRight' : '0.05' , 'MarginLeft' : '0.12' }
config [ 'Fit Draw' ] = {}
config [ 'Parallel' ] = {}
## the list of processes config files
files_read = config.read (
_config.config_files +
os.environ.get ( 'OSTAP_CONFIG', '' ).split( os.pathsep ) )
# =============================================================================
## sections
general = config [ 'General' ]
quiet = general.getboolean ( 'Quiet' , fallback = False )
verbose = general.getboolean ( 'Verbose', fallback = False )
# =============================================================================
## section with canvas configuration
canvas = config [ 'Canvas' ]
# =============================================================================
## section for fit drawing options
fit_draw = config [ 'Fit Draw' ]
# =============================================================================
# logging
# =============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ : logger = getLogger ( 'ostap.core.config' )
else : logger = getLogger ( __name__ )
# =============================================================================
import logging
logging.disable ( ( logging.WARNING - 1 ) if quiet else
( logging.DEBUG - 5 ) if verbose else ( logging.INFO - 1 ) )
# =============================================================================
import atexit
@atexit.register
def config_goodby () :
import datetime
now = datetime.datetime.now()
if files_read :
logger.info ( 'The configuration of Ostap was read from %s' % files_read )
import io
with io.StringIO() as o :
config.write( o )
logger.verbose ( 'Ostap configuration:\n%s' % o.getvalue() )
try :
dump = '.ostap_config.txt'
if os.path.exists ( dump ) : os.remove ( dump )
with open ( dump , 'w' ) as ff :
ff.write('#' + 78*'*' + '\n')
ff.write('# Ostap configuration (read from %s)\n' % files_read )
ff.write('#' + 78*'*' + '\n')
config.write( ff )
ff.write('#' + 78*'*' + '\n')
ff.write('# Configuration saved at %s\n' % now.strftime('%c') )
ff.write('#' + 78*'*' + '\n')
if os.path.exists ( dump ) and os.path.isfile ( dump ) :
logger.info ( 'Ostap configuration saved: %s' % dump )
except :
pass
# =============================================================================
if '__main__' == __name__ :
def _cp_hash_ ( cp ) : return hash ( str ( cp ) )
type(config).__hash__ = _cp_hash_
from ostap.utils.docme import docme
docme ( __name__ , logger = logger )
cnf = '\n' + str(config)
cnf = cnf.replace ('\n','\n# ')
logger.info ( 'Ostap configuration is:%s' % cnf )
# =============================================================================
## The END
# =============================================================================
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
## @file ostap/core/config.py
# The basic configuration of ostap.
# Ostap parses the following configuration files :
# - <code>'~/.ostaprc'</code>
# - <code>'~/.config/ostap/.ostaprc'</code>
# - <code>'.ostaprc'</code>
# - <code>$OSTAP_CONFIG</code>
# @author <NAME> <EMAIL>
# @date 2019-05-19
# =============================================================================
"""The basic configuration of ostap
Ostap parses the following configuration files :
- ~/.ostaprc
- ~/.config/ostap/.ostaprc
- .ostaprc
- $OSTAP_CONFIG
"""
# =============================================================================
__version__ = "$Revision$"
__author__ = "<NAME> <EMAIL>"
__date__ = "2019-05-19"
__all__ = (
'config' , ## the parsed configuration
)
# =============================================================================
import configparser, os, sys
import ostap.core.default_config as _config
# =============================================================================
## print for configparger
def _cp_str_ ( cp ) :
import io
with io.StringIO() as o :
config.write( o )
return o.getvalue()
config = configparser.ConfigParser()
type(config).__str__ = _cp_str_
type(config).__repr__ = _cp_str_
## Define the major sections
config [ 'General' ] = {
'Quiet' : str ( _config.quiet ) ,
'Verbose' : str ( _config.verbose ) ,
'Parallel' : 'PATHOS' ,
}
config [ 'Canvas' ] = { 'Width' : '1000' , 'Height' : '800' ,
'MarginTop' : '0.05' , 'MarginBottom' : '0.12' ,
'MarginRight' : '0.05' , 'MarginLeft' : '0.12' }
config [ 'Fit Draw' ] = {}
config [ 'Parallel' ] = {}
## the list of processes config files
files_read = config.read (
_config.config_files +
os.environ.get ( 'OSTAP_CONFIG', '' ).split( os.pathsep ) )
# =============================================================================
## sections
general = config [ 'General' ]
quiet = general.getboolean ( 'Quiet' , fallback = False )
verbose = general.getboolean ( 'Verbose', fallback = False )
# =============================================================================
## section with canvas configuration
canvas = config [ 'Canvas' ]
# =============================================================================
## section for fit drawing options
fit_draw = config [ 'Fit Draw' ]
# =============================================================================
# logging
# =============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ : logger = getLogger ( 'ostap.core.config' )
else : logger = getLogger ( __name__ )
# =============================================================================
import logging
logging.disable ( ( logging.WARNING - 1 ) if quiet else
( logging.DEBUG - 5 ) if verbose else ( logging.INFO - 1 ) )
# =============================================================================
import atexit
@atexit.register
def config_goodby () :
import datetime
now = datetime.datetime.now()
if files_read :
logger.info ( 'The configuration of Ostap was read from %s' % files_read )
import io
with io.StringIO() as o :
config.write( o )
logger.verbose ( 'Ostap configuration:\n%s' % o.getvalue() )
try :
dump = '.ostap_config.txt'
if os.path.exists ( dump ) : os.remove ( dump )
with open ( dump , 'w' ) as ff :
ff.write('#' + 78*'*' + '\n')
ff.write('# Ostap configuration (read from %s)\n' % files_read )
ff.write('#' + 78*'*' + '\n')
config.write( ff )
ff.write('#' + 78*'*' + '\n')
ff.write('# Configuration saved at %s\n' % now.strftime('%c') )
ff.write('#' + 78*'*' + '\n')
if os.path.exists ( dump ) and os.path.isfile ( dump ) :
logger.info ( 'Ostap configuration saved: %s' % dump )
except :
pass
# =============================================================================
if '__main__' == __name__ :
def _cp_hash_ ( cp ) : return hash ( str ( cp ) )
type(config).__hash__ = _cp_hash_
from ostap.utils.docme import docme
docme ( __name__ , logger = logger )
cnf = '\n' + str(config)
cnf = cnf.replace ('\n','\n# ')
logger.info ( 'Ostap configuration is:%s' % cnf )
# =============================================================================
## The END
# ============================================================================= | en | 0.321798 | #!/usr/bin/env python # -*- coding: utf-8 -*- # ============================================================================= ## @file ostap/core/config.py # The basic configuration of ostap. # Ostap parses the following configuration files : # - <code>'~/.ostaprc'</code> # - <code>'~/.config/ostap/.ostaprc'</code> # - <code>'.ostaprc'</code> # - <code>$OSTAP_CONFIG</code> # @author <NAME> <EMAIL> # @date 2019-05-19 # ============================================================================= The basic configuration of ostap Ostap parses the following configuration files : - ~/.ostaprc - ~/.config/ostap/.ostaprc - .ostaprc - $OSTAP_CONFIG # ============================================================================= ## the parsed configuration # ============================================================================= # ============================================================================= ## print for configparger ## Define the major sections ## the list of processes config files # ============================================================================= ## sections # ============================================================================= ## section with canvas configuration # ============================================================================= ## section for fit drawing options # ============================================================================= # logging # ============================================================================= # ============================================================================= # ============================================================================= # ============================================================================= # ') # ============================================================================= ## The END # ============================================================================= | 2.015672 | 2 |
api/namex/resources/name_requests/constants.py | riyazuddinsyed/namex | 0 | 6630847 | <reponame>riyazuddinsyed/namex
from namex.models import State
# Only allow editing if the request is in certain valid states
request_editable_states = [
State.DRAFT,
State.RESERVED,
State.COND_RESERVE
]
contact_editable_states = [
State.DRAFT,
State.APPROVED,
State.REJECTED,
State.CONDITIONAL
]
| from namex.models import State
# Only allow editing if the request is in certain valid states
request_editable_states = [
State.DRAFT,
State.RESERVED,
State.COND_RESERVE
]
contact_editable_states = [
State.DRAFT,
State.APPROVED,
State.REJECTED,
State.CONDITIONAL
] | en | 0.731132 | # Only allow editing if the request is in certain valid states | 1.386423 | 1 |
blueoil/cmd/init.py | Joeper214/blueoil | 0 | 6630848 | <gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from collections import OrderedDict
import inspect
import re
import shutil
import inquirer
import blueoil.data_augmentor as augmentor
from blueoil.generate_lmnet_config import generate
from blueoil.data_processor import Processor
task_type_choices = [
'classification',
'object_detection',
'semantic_segmentation',
'keypoint_detection'
]
classification_network_definitions = [
{
'name': 'LmnetV1Quantize',
'desc': 'Quantized Lmnet version 1. Accuracy is better than LmnetV0Quantize.',
},
{
'name': 'ResNetQuantize',
'desc': 'Quantized ResNet 18. Accuracy is better than LmnetV1Quantize.',
},
]
object_detection_network_definitions = [
{
'name': 'LMFYoloQuantize',
'desc': 'YOLO-like object detection network.',
},
]
semantic_segmentation_network_definitions = [
{
'name': 'LmSegnetV1Quantize',
'desc': 'Quantized LeapMind original semantic segmentation network, version 1.',
},
]
keypoint_detection_network_definitions = [
{
'name': 'LmSinglePoseV1Quantize',
'desc': 'Quantized LeapMind original single-person pose estimation network, version 1.',
},
]
IMAGE_SIZE_VALIDATION = {
"LmnetV1Quantize": {
"max_size": 512,
"divider": 16,
},
"ResNetQuantize": {
"max_size": 512,
"divider": 16,
},
"LMFYoloQuantize": {
"max_size": 480,
"divider": 32,
},
"LmSegnetV1Quantize": {
"max_size": 512,
"divider": 8,
},
"LmSinglePoseV1Quantize": {
"max_size": 512,
"divider": 8,
},
}
classification_dataset_formats = [
{
'name': 'Caltech101',
'desc': 'Caletch101 compatible',
},
{
'name': 'DeLTA-Mark for Classification',
'desc': 'Dataset for classification created by DeLTA-Mark',
},
]
object_detection_dataset_formats = [
{
'name': 'OpenImagesV4',
'desc': 'OpenImagesV4 compatible',
},
{
'name': 'DeLTA-Mark for Object Detection',
'desc': 'Dataset for object detection created by DeLTA-Mark',
},
]
semantic_segmentation_dataset_formats = [
{
'name': 'CamvidCustom',
'desc': 'CamVid base cumstom format',
},
]
keypoint_detection_dataset_formats = [
{
'name': 'Mscoco for Single-Person Pose Estimation',
'desc': 'Mscoco 2017 for Single-Person Pose Estimation',
},
]
learning_rate_schedule_map = OrderedDict([
("constant", "'constant' -> constant learning rate."),
("cosine", "'cosine' -> cosine learning rate."),
("2-step-decay", "'2-step-decay' -> learning rate decrease by 1/10 on {epochs}/2 and {epochs}-1."),
("3-step-decay", "'3-step-decay' -> learning rate decrease by 1/10 on {epochs}/3 and {epochs}*2/3 and {epochs}-1"),
(
"3-step-decay-with-warmup",
"'3-step-decay-with-warmup' -> "
"warmup learning rate 1/1000 in first epoch, then train the same way as '3-step-decay'"
),
])
def network_name_choices(task_type):
if task_type == 'classification':
return [definition['name'] for definition in classification_network_definitions]
elif task_type == 'object_detection':
return [definition['name'] for definition in object_detection_network_definitions]
elif task_type == 'semantic_segmentation':
return [definition['name'] for definition in semantic_segmentation_network_definitions]
elif task_type == 'keypoint_detection':
return [definition['name'] for definition in keypoint_detection_network_definitions]
def dataset_format_choices(task_type):
if task_type == 'classification':
return [definition['name'] for definition in classification_dataset_formats]
elif task_type == 'object_detection':
return [definition['name'] for definition in object_detection_dataset_formats]
elif task_type == 'semantic_segmentation':
return [definition['name'] for definition in semantic_segmentation_dataset_formats]
elif task_type == 'keypoint_detection':
return [definition['name'] for definition in keypoint_detection_dataset_formats]
def default_batch_size(task_type):
default_task_type_batch_sizes = {
'classification': '10',
'object_detection': '16',
'semantic_segmentation': '8',
'keypoint_detection': '4',
}
return default_task_type_batch_sizes[task_type]
def prompt(question):
"""Execute prompt answer
Args:
question (list): list of inquirer question
Returns: string of answer
"""
answers = inquirer.prompt(question)
return answers['value']
def generate_image_size_validate(network_name):
"""Generate image_size_validate depending on task_type.
Args:
network_name (string): network name.
Returns: validate function.
"""
max_size = IMAGE_SIZE_VALIDATION[network_name]["max_size"]
divider = IMAGE_SIZE_VALIDATION[network_name]["divider"]
def image_size_validate(answers, current):
# change to tuple (height, width).
image_size = image_size_filter(current)
image_size = (int(size) for size in image_size)
for size in image_size:
if not size % divider == 0:
raise inquirer.errors.ValidationError('',
reason="Image size should be multiple of {}, but image size is {}"
.format(divider, current))
if size > max_size:
raise inquirer.errors.ValidationError('',
reason="Image size should be lower than {} but image size is {}"
.format(max_size, current))
return True
return image_size_validate
def integer_validate(answers, current):
if not current.isdigit():
raise inquirer.errors.ValidationError('', reason='Input value should be integer')
return True
def image_size_filter(raw):
match = re.match(r"([0-9]+)[^0-9]+([0-9]+)", raw)
# raw: 128x128 -> ('128', '128')
image_size = match.groups()
return image_size
def save_config(blueoil_config, output=None):
if not output:
output = blueoil_config['model_name'] + ".py"
tmpfile = generate(blueoil_config)
shutil.copy(tmpfile, output)
return output
def ask_questions():
model_name_question = [
inquirer.Text(
name='value',
message='your model name ()')
]
model_name = prompt(model_name_question)
task_type_question = [
inquirer.List(name='value',
message='choose task type',
choices=task_type_choices)
]
task_type = prompt(task_type_question)
network_name_question = [
inquirer.List(name='value',
message='choose network',
choices=network_name_choices(task_type))
]
network_name = prompt(network_name_question)
dataset_format_question = [
inquirer.List(name='value',
message='choose dataset format',
choices=dataset_format_choices(task_type))
]
dataset_format = prompt(dataset_format_question)
enable_data_augmentation = [
inquirer.Confirm(name='value',
message='enable data augmentation?',
default=True)
]
train_dataset_path_question = [
inquirer.Text(name='value',
message='training dataset path')
]
train_path = prompt(train_dataset_path_question)
enable_test_dataset_path_question = [
inquirer.List(name='value',
message='set validation dataset?'
' (if answer no, the dataset will be separated for training and validation'
' by 9:1 ratio.)',
choices=['yes', 'no'])
]
enable_test_dataset_path = prompt(enable_test_dataset_path_question)
test_dataset_path_question = [
inquirer.Text(name='value',
message='validation dataset path')
]
if enable_test_dataset_path == 'yes':
test_path = prompt(test_dataset_path_question)
else:
test_path = ''
batch_size_question = [
inquirer.Text(name='value',
message='batch size (integer)',
default=default_batch_size(task_type),
validate=integer_validate)
]
batch_size = prompt(batch_size_question)
image_size_question = [
inquirer.Text(name='value',
message='image size (integer x integer)',
default='128x128',
validate=generate_image_size_validate(network_name))
]
image_size = image_size_filter(prompt(image_size_question))
training_epochs_question = [
inquirer.Text(name='value',
message='how many epochs do you run training (integer)',
default='100',
validate=integer_validate)
]
training_epochs = prompt(training_epochs_question)
training_optimizer_question = [
inquirer.List(name='value',
message='select optimizer',
choices=['Momentum', 'Adam'],
default='Momentum')
]
training_optimizer = prompt(training_optimizer_question)
initial_learning_rate_value_question = [
inquirer.Text(name='value',
message='initial learning rate',
default='0.001')
]
initial_learning_rate_value = prompt(initial_learning_rate_value_question)
# learning rate schedule
learning_rate_schedule_question = [
inquirer.List(name='value',
message='choose learning rate schedule'
' ({{epochs}} is the number of training epochs you entered before)',
choices=list(learning_rate_schedule_map.values()),
default=learning_rate_schedule_map["constant"])
]
_tmp_learning_rate_schedule = prompt(learning_rate_schedule_question)
for key, value in learning_rate_schedule_map.items():
if value == _tmp_learning_rate_schedule:
learning_rate_schedule = key
data_augmentation = {}
if prompt(enable_data_augmentation):
all_augmentor = {}
checkboxes = []
for name, obj in inspect.getmembers(augmentor):
if inspect.isclass(obj) and issubclass(obj, Processor):
argspec = inspect.getfullargspec(obj)
# ignore self
args = argspec.args[1:]
defaults = argspec.defaults
if len(args) == len(defaults):
default_val = [(arg, default) for arg, default in zip(args, defaults)]
default_str = " (default: {})".format(", ".join(["{}={}".format(a, d) for a, d in default_val]))
else:
defaults = ("# Please fill a value.",) * (len(args) - len(defaults)) + defaults
default_val = [(arg, default) for arg, default in zip(args, defaults)]
default_str = " (**caution**: No default value is provided, \
please modify manually after config exported.)"
all_augmentor[name + default_str] = {"name": name, "defaults": default_val}
checkboxes.append(name + default_str)
data_augmentation_question = [
inquirer.Checkbox(name='value',
message='Please choose augmentors',
choices=checkboxes)
]
data_augmentation_res = prompt(data_augmentation_question)
if data_augmentation_res:
for v in data_augmentation_res:
data_augmentation[all_augmentor[v]["name"]] = all_augmentor[v]["defaults"]
quantize_first_convolution_question = [
inquirer.Confirm(name='value',
message='apply quantization at the first layer?',
default=True)
]
quantize_first_convolution = prompt(quantize_first_convolution_question)
return {
'model_name': model_name,
'task_type': task_type,
'network_name': network_name,
'network': {
'quantize_first_convolution': quantize_first_convolution,
},
'dataset': {
'format': dataset_format,
'train_path': train_path,
'test_path': test_path,
},
'trainer': {
'batch_size': int(batch_size),
'epochs': int(training_epochs),
'optimizer': training_optimizer,
'learning_rate_schedule': learning_rate_schedule,
'initial_learning_rate': float(initial_learning_rate_value),
'save_checkpoint_steps': 1000,
'keep_checkpoint_max': 5,
},
'common': {
'image_size': [int(val) for val in image_size],
'pretrain_model': False,
'dataset_prefetch': True,
'data_augmentation': data_augmentation,
},
}
| # -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from collections import OrderedDict
import inspect
import re
import shutil
import inquirer
import blueoil.data_augmentor as augmentor
from blueoil.generate_lmnet_config import generate
from blueoil.data_processor import Processor
task_type_choices = [
'classification',
'object_detection',
'semantic_segmentation',
'keypoint_detection'
]
classification_network_definitions = [
{
'name': 'LmnetV1Quantize',
'desc': 'Quantized Lmnet version 1. Accuracy is better than LmnetV0Quantize.',
},
{
'name': 'ResNetQuantize',
'desc': 'Quantized ResNet 18. Accuracy is better than LmnetV1Quantize.',
},
]
object_detection_network_definitions = [
{
'name': 'LMFYoloQuantize',
'desc': 'YOLO-like object detection network.',
},
]
semantic_segmentation_network_definitions = [
{
'name': 'LmSegnetV1Quantize',
'desc': 'Quantized LeapMind original semantic segmentation network, version 1.',
},
]
keypoint_detection_network_definitions = [
{
'name': 'LmSinglePoseV1Quantize',
'desc': 'Quantized LeapMind original single-person pose estimation network, version 1.',
},
]
IMAGE_SIZE_VALIDATION = {
"LmnetV1Quantize": {
"max_size": 512,
"divider": 16,
},
"ResNetQuantize": {
"max_size": 512,
"divider": 16,
},
"LMFYoloQuantize": {
"max_size": 480,
"divider": 32,
},
"LmSegnetV1Quantize": {
"max_size": 512,
"divider": 8,
},
"LmSinglePoseV1Quantize": {
"max_size": 512,
"divider": 8,
},
}
classification_dataset_formats = [
{
'name': 'Caltech101',
'desc': 'Caletch101 compatible',
},
{
'name': 'DeLTA-Mark for Classification',
'desc': 'Dataset for classification created by DeLTA-Mark',
},
]
object_detection_dataset_formats = [
{
'name': 'OpenImagesV4',
'desc': 'OpenImagesV4 compatible',
},
{
'name': 'DeLTA-Mark for Object Detection',
'desc': 'Dataset for object detection created by DeLTA-Mark',
},
]
semantic_segmentation_dataset_formats = [
{
'name': 'CamvidCustom',
'desc': 'CamVid base cumstom format',
},
]
keypoint_detection_dataset_formats = [
{
'name': 'Mscoco for Single-Person Pose Estimation',
'desc': 'Mscoco 2017 for Single-Person Pose Estimation',
},
]
learning_rate_schedule_map = OrderedDict([
("constant", "'constant' -> constant learning rate."),
("cosine", "'cosine' -> cosine learning rate."),
("2-step-decay", "'2-step-decay' -> learning rate decrease by 1/10 on {epochs}/2 and {epochs}-1."),
("3-step-decay", "'3-step-decay' -> learning rate decrease by 1/10 on {epochs}/3 and {epochs}*2/3 and {epochs}-1"),
(
"3-step-decay-with-warmup",
"'3-step-decay-with-warmup' -> "
"warmup learning rate 1/1000 in first epoch, then train the same way as '3-step-decay'"
),
])
def network_name_choices(task_type):
if task_type == 'classification':
return [definition['name'] for definition in classification_network_definitions]
elif task_type == 'object_detection':
return [definition['name'] for definition in object_detection_network_definitions]
elif task_type == 'semantic_segmentation':
return [definition['name'] for definition in semantic_segmentation_network_definitions]
elif task_type == 'keypoint_detection':
return [definition['name'] for definition in keypoint_detection_network_definitions]
def dataset_format_choices(task_type):
if task_type == 'classification':
return [definition['name'] for definition in classification_dataset_formats]
elif task_type == 'object_detection':
return [definition['name'] for definition in object_detection_dataset_formats]
elif task_type == 'semantic_segmentation':
return [definition['name'] for definition in semantic_segmentation_dataset_formats]
elif task_type == 'keypoint_detection':
return [definition['name'] for definition in keypoint_detection_dataset_formats]
def default_batch_size(task_type):
default_task_type_batch_sizes = {
'classification': '10',
'object_detection': '16',
'semantic_segmentation': '8',
'keypoint_detection': '4',
}
return default_task_type_batch_sizes[task_type]
def prompt(question):
"""Execute prompt answer
Args:
question (list): list of inquirer question
Returns: string of answer
"""
answers = inquirer.prompt(question)
return answers['value']
def generate_image_size_validate(network_name):
"""Generate image_size_validate depending on task_type.
Args:
network_name (string): network name.
Returns: validate function.
"""
max_size = IMAGE_SIZE_VALIDATION[network_name]["max_size"]
divider = IMAGE_SIZE_VALIDATION[network_name]["divider"]
def image_size_validate(answers, current):
# change to tuple (height, width).
image_size = image_size_filter(current)
image_size = (int(size) for size in image_size)
for size in image_size:
if not size % divider == 0:
raise inquirer.errors.ValidationError('',
reason="Image size should be multiple of {}, but image size is {}"
.format(divider, current))
if size > max_size:
raise inquirer.errors.ValidationError('',
reason="Image size should be lower than {} but image size is {}"
.format(max_size, current))
return True
return image_size_validate
def integer_validate(answers, current):
if not current.isdigit():
raise inquirer.errors.ValidationError('', reason='Input value should be integer')
return True
def image_size_filter(raw):
match = re.match(r"([0-9]+)[^0-9]+([0-9]+)", raw)
# raw: 128x128 -> ('128', '128')
image_size = match.groups()
return image_size
def save_config(blueoil_config, output=None):
if not output:
output = blueoil_config['model_name'] + ".py"
tmpfile = generate(blueoil_config)
shutil.copy(tmpfile, output)
return output
def ask_questions():
model_name_question = [
inquirer.Text(
name='value',
message='your model name ()')
]
model_name = prompt(model_name_question)
task_type_question = [
inquirer.List(name='value',
message='choose task type',
choices=task_type_choices)
]
task_type = prompt(task_type_question)
network_name_question = [
inquirer.List(name='value',
message='choose network',
choices=network_name_choices(task_type))
]
network_name = prompt(network_name_question)
dataset_format_question = [
inquirer.List(name='value',
message='choose dataset format',
choices=dataset_format_choices(task_type))
]
dataset_format = prompt(dataset_format_question)
enable_data_augmentation = [
inquirer.Confirm(name='value',
message='enable data augmentation?',
default=True)
]
train_dataset_path_question = [
inquirer.Text(name='value',
message='training dataset path')
]
train_path = prompt(train_dataset_path_question)
enable_test_dataset_path_question = [
inquirer.List(name='value',
message='set validation dataset?'
' (if answer no, the dataset will be separated for training and validation'
' by 9:1 ratio.)',
choices=['yes', 'no'])
]
enable_test_dataset_path = prompt(enable_test_dataset_path_question)
test_dataset_path_question = [
inquirer.Text(name='value',
message='validation dataset path')
]
if enable_test_dataset_path == 'yes':
test_path = prompt(test_dataset_path_question)
else:
test_path = ''
batch_size_question = [
inquirer.Text(name='value',
message='batch size (integer)',
default=default_batch_size(task_type),
validate=integer_validate)
]
batch_size = prompt(batch_size_question)
image_size_question = [
inquirer.Text(name='value',
message='image size (integer x integer)',
default='128x128',
validate=generate_image_size_validate(network_name))
]
image_size = image_size_filter(prompt(image_size_question))
training_epochs_question = [
inquirer.Text(name='value',
message='how many epochs do you run training (integer)',
default='100',
validate=integer_validate)
]
training_epochs = prompt(training_epochs_question)
training_optimizer_question = [
inquirer.List(name='value',
message='select optimizer',
choices=['Momentum', 'Adam'],
default='Momentum')
]
training_optimizer = prompt(training_optimizer_question)
initial_learning_rate_value_question = [
inquirer.Text(name='value',
message='initial learning rate',
default='0.001')
]
initial_learning_rate_value = prompt(initial_learning_rate_value_question)
# learning rate schedule
learning_rate_schedule_question = [
inquirer.List(name='value',
message='choose learning rate schedule'
' ({{epochs}} is the number of training epochs you entered before)',
choices=list(learning_rate_schedule_map.values()),
default=learning_rate_schedule_map["constant"])
]
_tmp_learning_rate_schedule = prompt(learning_rate_schedule_question)
for key, value in learning_rate_schedule_map.items():
if value == _tmp_learning_rate_schedule:
learning_rate_schedule = key
data_augmentation = {}
if prompt(enable_data_augmentation):
all_augmentor = {}
checkboxes = []
for name, obj in inspect.getmembers(augmentor):
if inspect.isclass(obj) and issubclass(obj, Processor):
argspec = inspect.getfullargspec(obj)
# ignore self
args = argspec.args[1:]
defaults = argspec.defaults
if len(args) == len(defaults):
default_val = [(arg, default) for arg, default in zip(args, defaults)]
default_str = " (default: {})".format(", ".join(["{}={}".format(a, d) for a, d in default_val]))
else:
defaults = ("# Please fill a value.",) * (len(args) - len(defaults)) + defaults
default_val = [(arg, default) for arg, default in zip(args, defaults)]
default_str = " (**caution**: No default value is provided, \
please modify manually after config exported.)"
all_augmentor[name + default_str] = {"name": name, "defaults": default_val}
checkboxes.append(name + default_str)
data_augmentation_question = [
inquirer.Checkbox(name='value',
message='Please choose augmentors',
choices=checkboxes)
]
data_augmentation_res = prompt(data_augmentation_question)
if data_augmentation_res:
for v in data_augmentation_res:
data_augmentation[all_augmentor[v]["name"]] = all_augmentor[v]["defaults"]
quantize_first_convolution_question = [
inquirer.Confirm(name='value',
message='apply quantization at the first layer?',
default=True)
]
quantize_first_convolution = prompt(quantize_first_convolution_question)
return {
'model_name': model_name,
'task_type': task_type,
'network_name': network_name,
'network': {
'quantize_first_convolution': quantize_first_convolution,
},
'dataset': {
'format': dataset_format,
'train_path': train_path,
'test_path': test_path,
},
'trainer': {
'batch_size': int(batch_size),
'epochs': int(training_epochs),
'optimizer': training_optimizer,
'learning_rate_schedule': learning_rate_schedule,
'initial_learning_rate': float(initial_learning_rate_value),
'save_checkpoint_steps': 1000,
'keep_checkpoint_max': 5,
},
'common': {
'image_size': [int(val) for val in image_size],
'pretrain_model': False,
'dataset_prefetch': True,
'data_augmentation': data_augmentation,
},
} | en | 0.75389 | # -*- coding: utf-8 -*- # Copyright 2018 The Blueoil Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= Execute prompt answer Args: question (list): list of inquirer question Returns: string of answer Generate image_size_validate depending on task_type. Args: network_name (string): network name. Returns: validate function. # change to tuple (height, width). # raw: 128x128 -> ('128', '128') # learning rate schedule # ignore self | 1.92606 | 2 |
keras/backend/common.py | lukovkin/keras-temp | 16 | 6630849 | import numpy as np
from collections import defaultdict
# the type of float to use throughout the session.
_FLOATX = 'float32'
_EPSILON = 10e-8
_UID_PREFIXES = defaultdict(int)
_IMAGE_DIM_ORDERING = 'th'
def epsilon():
'''Returns the value of the fuzz
factor used in numeric expressions.
'''
return _EPSILON
def set_epsilon(e):
'''Sets the value of the fuzz
factor used in numeric expressions.
'''
global _EPSILON
_EPSILON = e
def floatx():
'''Returns the default float type, as a string
(e.g. 'float16', 'float32', 'float64').
'''
return _FLOATX
def set_floatx(floatx):
global _FLOATX
if floatx not in {'float16', 'float32', 'float64'}:
raise Exception('Unknown floatx type: ' + str(floatx))
_FLOATX = str(floatx)
def cast_to_floatx(x):
'''Cast a Numpy array to floatx.
'''
return np.asarray(x, dtype=_FLOATX)
def image_dim_ordering():
'''Returns the image dimension ordering
convention ('th' or 'tf').
'''
return _IMAGE_DIM_ORDERING
def set_image_dim_ordering(dim_ordering):
'''Sets the value of the image dimension
ordering convention ('th' or 'tf').
'''
global _IMAGE_DIM_ORDERING
if dim_ordering not in {'tf', 'th'}:
raise Exception('Unknown dim_ordering:', dim_ordering)
_IMAGE_DIM_ORDERING = str(dim_ordering)
def get_uid(prefix=''):
_UID_PREFIXES[prefix] += 1
return _UID_PREFIXES[prefix]
def reset_uids():
global _UID_PREFIXES
_UID_PREFIXES = defaultdict(int)
| import numpy as np
from collections import defaultdict
# the type of float to use throughout the session.
_FLOATX = 'float32'
_EPSILON = 10e-8
_UID_PREFIXES = defaultdict(int)
_IMAGE_DIM_ORDERING = 'th'
def epsilon():
'''Returns the value of the fuzz
factor used in numeric expressions.
'''
return _EPSILON
def set_epsilon(e):
'''Sets the value of the fuzz
factor used in numeric expressions.
'''
global _EPSILON
_EPSILON = e
def floatx():
'''Returns the default float type, as a string
(e.g. 'float16', 'float32', 'float64').
'''
return _FLOATX
def set_floatx(floatx):
global _FLOATX
if floatx not in {'float16', 'float32', 'float64'}:
raise Exception('Unknown floatx type: ' + str(floatx))
_FLOATX = str(floatx)
def cast_to_floatx(x):
'''Cast a Numpy array to floatx.
'''
return np.asarray(x, dtype=_FLOATX)
def image_dim_ordering():
'''Returns the image dimension ordering
convention ('th' or 'tf').
'''
return _IMAGE_DIM_ORDERING
def set_image_dim_ordering(dim_ordering):
'''Sets the value of the image dimension
ordering convention ('th' or 'tf').
'''
global _IMAGE_DIM_ORDERING
if dim_ordering not in {'tf', 'th'}:
raise Exception('Unknown dim_ordering:', dim_ordering)
_IMAGE_DIM_ORDERING = str(dim_ordering)
def get_uid(prefix=''):
_UID_PREFIXES[prefix] += 1
return _UID_PREFIXES[prefix]
def reset_uids():
global _UID_PREFIXES
_UID_PREFIXES = defaultdict(int)
| en | 0.62889 | # the type of float to use throughout the session. Returns the value of the fuzz factor used in numeric expressions. Sets the value of the fuzz factor used in numeric expressions. Returns the default float type, as a string (e.g. 'float16', 'float32', 'float64'). Cast a Numpy array to floatx. Returns the image dimension ordering convention ('th' or 'tf'). Sets the value of the image dimension ordering convention ('th' or 'tf'). | 2.930434 | 3 |
src/sensing/temp_and_light/recv.py | sakthivigneshr/homeauto | 0 | 6630850 | <filename>src/sensing/temp_and_light/recv.py<gh_stars>0
import time
import socket
from datetime import datetime
import paho.mqtt.client as mqtt
host = ''
port = 5002
def bind_socket(s):
while 1:
try:
s.bind((host,port))
print 'Bind to port ' + repr(port) + ' success'
break
except:
print 'Failed to open socket, will retry.'
time.sleep(2)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(30)
bind_socket(s)
mqttc = mqtt.Client()
mqttc.connect("127.0.0.2",1883,60)
#
# Start action
#
while 1:
data = []
try:
s.listen(1)
print 'Listening...'
conn, addr = s.accept()
conn.settimeout(30)
print 'Connection address:', addr
data = conn.recv(256)
conn.close()
except:
if s:
s.close()
if conn:
conn.close()
print 'Socket issue in loop, recreating...'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(30)
bind_socket(s)
continue
if not data:
print 'Received no data...'
time.sleep(2)
continue
split = data.split()
if (split[0] != 'light'):
print "Received invalid data: " + data
time.sleep(2)
continue
mqttc.publish("sensing/room1/temp_n_bright", data)
| <filename>src/sensing/temp_and_light/recv.py<gh_stars>0
import time
import socket
from datetime import datetime
import paho.mqtt.client as mqtt
host = ''
port = 5002
def bind_socket(s):
while 1:
try:
s.bind((host,port))
print 'Bind to port ' + repr(port) + ' success'
break
except:
print 'Failed to open socket, will retry.'
time.sleep(2)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(30)
bind_socket(s)
mqttc = mqtt.Client()
mqttc.connect("127.0.0.2",1883,60)
#
# Start action
#
while 1:
data = []
try:
s.listen(1)
print 'Listening...'
conn, addr = s.accept()
conn.settimeout(30)
print 'Connection address:', addr
data = conn.recv(256)
conn.close()
except:
if s:
s.close()
if conn:
conn.close()
print 'Socket issue in loop, recreating...'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(30)
bind_socket(s)
continue
if not data:
print 'Received no data...'
time.sleep(2)
continue
split = data.split()
if (split[0] != 'light'):
print "Received invalid data: " + data
time.sleep(2)
continue
mqttc.publish("sensing/room1/temp_n_bright", data)
| en | 0.39231 | # # Start action # | 2.919327 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.